From a2e93f6c4f7f7297550b839aa6484553d79ec278 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 16 Apr 2014 23:10:15 +0000 Subject: [PATCH] [rt] Update to 3.14.0-rt1 and reenable svn path=/dists/trunk/linux/; revision=21244 --- debian/changelog | 1 + debian/config/defines | 2 +- ...arc64-use-generic-rwsem-spinlocks-rt.patch | 29 + ...el-SRCU-provide-a-static-initializer.patch | 131 ++ ...highmem-add-a-already-used-pte-check.patch | 24 + .../0003-arm-highmem-flush-tlb-on-unmap.patch | 29 + ...k-unlock-symetry-versus-pi_lock-and-.patch | 45 + ...local_lock-not-__local_lock-for-soft.patch | 33 + ...intk-drop-the-logbuf_lock-more-often.patch | 78 + ...le-IST-stacks-for-debug-int-3-stack-.patch | 114 ++ ...acpi_gbl_hardware-lock-back-to-a-raw.patch | 173 ++ ...on-in-recursive-migrate_disable-call.patch | 48 + .../all/rt/arch-use-pagefault-disabled.patch | 277 +++ ...ove-irq-handler-when-clock-is-unused.patch | 58 + ...-tclib-default-to-tclib-timer-for-rt.patch | 33 + .../all/rt/arm-convert-boot-lock-to-raw.patch | 361 ++++ .../all/rt/arm-disable-highmem-on-rt.patch | 21 + .../all/rt/arm-enable-highmem-for-rt.patch | 140 ++ .../all/rt/arm-preempt-lazy-support.patch | 104 + .../all/rt/arm-unwind-use_raw_lock.patch | 88 + .../rt/ata-disable-interrupts-if-non-rt.patch | 65 + .../all/rt/block-mq-use-cpu_light.patch | 78 + ...k-shorten-interrupt-disabled-regions.patch | 97 + .../features/all/rt/block-use-cpu-chill.patch | 46 + .../all/rt/bug-rt-dependend-variants.patch | 35 + ...source-tclib-allow-higher-clockrates.patch | 161 ++ .../completion-use-simple-wait-queues.patch | 184 ++ .../all/rt/cond-resched-lock-rt-tweak.patch | 21 + .../all/rt/cond-resched-softirq-rt.patch | 48 + ...igration_disable-on-lock-acquisition.patch | 34 + ...ument-why-PREEMPT_RT-uses-a-spinlock.patch | 57 + ...tplug-lock-a-sleeping-spinlock-on-rt.patch | 126 ++ .../all/rt/cpu-rt-rework-cpu-down.patch | 548 +++++ .../features/all/rt/cpu-rt-variants.patch | 27 + ...-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch | 108 + .../cpu_down_move_migrate_enable_back.patch | 54 + .../rt/cpumask-disable-offstack-on-rt.patch | 35 + ...-preempt-disabled-regions-more-algos.patch | 243 +++ .../features/all/rt/debugobjects-rt.patch | 24 + .../rt/disable-lazy-preempt-on-x86-64.patch | 88 + .../features/all/rt/dm-make-rt-aware.patch | 35 + .../drivers-net-8139-disable-irq-nosync.patch | 26 + .../rt/drivers-net-fix-livelock-issues.patch | 127 ++ .../drivers-net-gianfar-make-rt-aware.patch | 56 + ...rivers-net-vortex-fix-locking-issues.patch | 49 + ...andom-reduce-preempt-disabled-region.patch | 33 + ...all-flush_to_ldisc-when-the-irq-is-t.patch | 28 + ...rivers-serial-cleanup-locking-for-rt.patch | 55 + .../rt/drivers-tty-fix-omap-lock-crap.patch | 39 + ...rivers-tty-pl011-irq-disable-madness.patch | 45 + ...op-trace_i915_gem_ring_dispatch-onrt.patch | 60 + .../all/rt/early-printk-consolidate.patch | 45 + .../all/rt/epoll-use-get-cpu-light.patch | 27 + .../features/all/rt/filemap-fix-up.patch | 23 + .../all/rt/fix-rt-int3-x86_32-3.2-rt.patch | 113 + .../all/rt/fixup_opencoded_completions.patch | 55 + .../features/all/rt/fs-block-rt-support.patch | 21 + ...cache-use-cpu-chill-in-trylock-loops.patch | 86 + ...jbd-pull-plug-when-waiting-for-space.patch | 30 + .../all/rt/fs-jbd-replace-bh_state-lock.patch | 101 + ...ull-your-plug-when-waiting-for-space.patch | 33 + .../all/rt/fs-namespace-preemption-fix.patch | 31 + .../rt/fs-ntfs-disable-interrupt-non-rt.patch | 60 + .../fs-replace-bh_uptodate_lock-for-rt.patch | 162 ++ .../rt/ftrace-migrate-disable-tracing.patch | 82 + .../all/rt/futex-requeue-pi-fix.patch | 113 + .../all/rt/genirq-disable-irqpoll-on-rt.patch | 38 + ...voke-the-affinity-callback-via-a-wor.patch | 146 ++ .../all/rt/genirq-force-threading.patch | 46 + .../all/rt/genirq-nodebug-shirq.patch | 21 + .../rt/hotplug-light-get-online-cpus.patch | 205 ++ ...ync_unplug-no-27-5cn-27-in-task-name.patch | 25 + .../all/rt/hotplug-use-migrate-disable.patch | 37 + ...-schedule_work-call-to-helper-thread.patch | 119 ++ ...timer-callback-changes-for-preempt-r.patch | 453 +++++ ...raise-softirq-if-hrtimer-irq-stalled.patch | 38 + .../rt/hrtimers-prepare-full-preemption.patch | 196 ++ ...on-t-ignore-threshold-module-paramet.patch | 27 + ...pdate-hwlat_detector-to-add-outer-lo.patch | 128 ++ ...r-Use-thread-instead-of-stop-machine.patch | 185 ++ ...r-Use-trace_clock_local-if-available.patch | 94 + .../patches/features/all/rt/hwlatdetect.patch | 1348 ++++++++++++ ...-omap-drop-the-lock-hard-irq-context.patch | 35 + .../features/all/rt/i915_compile_fix.patch | 23 + .../rt/ide-use-nort-local-irq-variants.patch | 170 ++ .../idr-use-local-lock-for-protection.patch | 97 + .../infiniband-mellanox-ib-use-nort-irq.patch | 41 + .../rt/inpt-gameport-use-local-irq-nort.patch | 45 + .../features/all/rt/ipc-make-rt-aware.patch | 89 + ...critical-section-to-avoid-a-deadlock.patch | 65 + .../rt/ipc-sem-rework-semaphore-wakeups.patch | 70 + ...irq-processing-in-irq-thread-context.patch | 145 ++ ...low-certain-work-in-hard-irq-context.patch | 159 ++ .../features/all/rt/jump-label-rt.patch | 22 + .../rt/kconfig-disable-a-few-options-rt.patch | 45 + .../all/rt/kconfig-preempt-rt-full.patch | 57 + ...pu-down-problem-if-kthread-s-cpu-is-.patch | 88 + ...restore-original-cpu-mask-oncpu-down.patch | 60 + ...timer-be-non-freezeable-in-cpu_chill.patch | 59 + .../all/rt/kgb-serial-hackaround.patch | 103 + .../features/all/rt/latency-hist.patch | 1810 +++++++++++++++++ ...ds-trigger-disable-CPU-trigger-on-RT.patch | 37 + .../patches/features/all/rt/lglocks-rt.patch | 176 ++ ..._bl.h-make-list-head-locking-RT-safe.patch | 115 ++ .../rt/local-irq-rt-depending-variants.patch | 53 + .../patches/features/all/rt/local-var.patch | 24 + .../all/rt/local-vars-migrate-disable.patch | 47 + .../features/all/rt/localversion.patch | 16 + .../lockdep-no-softirq-accounting-on-rt.patch | 57 + ...ardirq-context-test-for-raw-spinlock.patch | 57 + .../features/all/rt/md-disable-bcache.patch | 36 + .../md-raid5-percpu-handling-rt-aware.patch | 62 + .../all/rt/migrate-disable-rt-variant.patch | 28 + ...pushd-down-in-atomic_dec_and_spin_lo.patch | 30 + ...pushd-down-in-rt_spin_trylock_irqsav.patch | 30 + ...pushd-down-in-rt_write_trylock_irqsa.patch | 27 + .../all/rt/mips-disable-highmem-on-rt.patch | 21 + .../rt/mips-enable-interrupts-in-signal.patch | 20 + .../rt/mm-bounce-local-irq-save-nort.patch | 28 + .../all/rt/mm-cgroup-page-bit-spinlock.patch | 92 + .../rt/mm-convert-swap-to-percpu-locked.patch | 132 ++ .../features/all/rt/mm-disable-sloub-rt.patch | 30 + .../features/all/rt/mm-enable-slub.patch | 395 ++++ .../all/rt/mm-make-vmstat-rt-aware.patch | 85 + ...n-t-call-schedule_work_on-in-preempt.patch | 70 + .../features/all/rt/mm-page-alloc-fix.patch | 23 + .../mm-page-alloc-use-list-last-entry.patch | 21 + ...e-alloc-use-local-lock-on-target-cpu.patch | 28 + ...e_alloc-reduce-lock-sections-further.patch | 189 ++ ...page_alloc-rt-friendly-per-cpu-pages.patch | 202 ++ .../mm-prepare-pf-disable-discoupling.patch | 119 ++ .../rt/mm-protect-activate-switch-mm.patch | 70 + .../rt/mm-remove-preempt-count-from-pf.patch | 30 + .../all/rt/mm-rt-kmap-atomic-scheduling.patch | 275 +++ ...-scatterlist-dont-disable-irqs-on-RT.patch | 39 + .../all/rt/mm-vmalloc-use-get-cpu-light.patch | 63 + .../all/rt/mmci-remove-bogus-irq-save.patch | 40 + .../move_sched_delayed_work_to_helper.patch | 95 + .../features/all/rt/mutex-no-spin-on-rt.patch | 18 + ...al-irq-disable-alloc-atomic-headache.patch | 48 + ...ble-xt-write-recseq-begin-rt-fallout.patch | 74 + .../all/rt/net-flip-lock-dep-thingy.patch | 112 + ...et-gianfar-do-not-disable-interrupts.patch | 246 +++ ...ot-try-to-cleanup-TX-packets-if-they.patch | 68 + ...ast_reply-add-missing-local-serializ.patch | 102 + .../net-make-devnet_rename_seq-a-mutex.patch | 105 + ...net-netif-rx-ni-use-local-bh-disable.patch | 32 + .../rt/net-netif_rx_ni-migrate-disable.patch | 26 + ...activate_many-use-msleep-1-instead-o.patch | 60 + .../net-tx-action-avoid-livelock-on-rt.patch | 93 + .../features/all/rt/net-use-cpu-chill.patch | 63 + ...e-cpu-light-in-ip-send-unicast-reply.patch | 31 + .../all/rt/net-wireless-warn-nort.patch | 21 + .../features/all/rt/oleg-signal-rt-fix.patch | 143 ++ .../all/rt/panic-disable-random-on-rt.patch | 22 + ...ce-rcu-bh-qs-where-safe-from-softirq.patch | 166 ++ .../pci-access-use-__wake_up_all_locked.patch | 26 + .../all/rt/percpu-rwsem-compilefix.patch | 28 + .../all/rt/percpu_ida-use-locklocks.patch | 103 + .../perf-make-swevent-hrtimer-irqsafe.patch | 69 + .../perf-move-irq-work-to-softirq-in-rt.patch | 72 + ...eter_zijlstra-frob-migrate_disable-2.patch | 175 ++ .../peter_zijlstra-frob-migrate_disable.patch | 68 + ...eter_zijlstra-frob-pagefault_disable.patch | 332 +++ .../all/rt/peter_zijlstra-frob-rcu.patch | 167 ++ .../all/rt/peterz-raw_pagefault_disable.patch | 173 ++ .../all/rt/peterz-srcu-crypto-chain.patch | 183 ++ .../all/rt/pid-h-include-atomic-h.patch | 20 + .../patches/features/all/rt/ping-sysrq.patch | 122 ++ ...id-wakeups-when-no-timers-are-active.patch | 58 + .../all/rt/posix-timers-no-broadcast.patch | 34 + ...six-timers-shorten-cpu-timers-thread.patch | 27 + ...timers-thread-posix-cpu-timers-on-rt.patch | 321 +++ .../all/rt/power-disable-highmem-on-rt.patch | 21 + .../rt/power-use-generic-rwsem-on-rt.patch | 24 + .../all/rt/powerpc-preempt-lazy-support.patch | 172 ++ .../all/rt/preempt-lazy-support.patch | 606 ++++++ .../all/rt/preempt-nort-rt-variants.patch | 48 + ...27-boot-param-to-help-with-debugging.patch | 36 + .../patches/features/all/rt/printk-kill.patch | 169 ++ .../features/all/rt/printk-rt-aware.patch | 102 + ...ace-fix-ptrace-vs-tasklist_lock-race.patch | 162 ++ .../features/all/rt/radix-tree-rt-aware.patch | 69 + .../all/rt/random-make-it-work-on-rt.patch | 119 ++ ...nate-softirq-processing-from-rcutree.patch | 409 ++++ .../rt/rcu-disable-rcu-fast-no-hz-on-rt.patch | 25 + .../rt/rcu-make-RCU_BOOST-default-on-RT.patch | 28 + ...merge-rcu-bh-into-rcu-preempt-for-rt.patch | 261 +++ .../all/rt/rcu-more-swait-conversions.patch | 117 ++ .../features/all/rt/rcu-tiny-merge-bh.patch | 28 + ...s-disable-irq-while-calling-rcu_pree.patch | 50 + ...ate_disable-race-with-cpu-hotplug-3f.patch | 35 + ...l-arm-coredump-fails-for-cpu-3e-3d-4.patch | 69 + ...ate_disable-pushdown-to-rt_read_lock.patch | 174 ++ .../all/rt/relay-fix-timer-madness.patch | 53 + .../resource-counters-use-localirq-nort.patch | 87 + ...necessary-do-while-0-in-read-write-_.patch | 68 + ..._chill-use-hrtimer-instead-of-msleep.patch | 90 + .../features/all/rt/rt-add-rt-locks.patch | 904 ++++++++ .../rt/rt-add-rt-spinlock-to-headers.patch | 121 ++ .../all/rt/rt-add-rt-to-mutex-headers.patch | 141 ++ .../all/rt/rt-introduce-cpu-chill.patch | 29 + .../features/all/rt/rt-local-irq-lock.patch | 267 +++ ...mutex-add-sleeping-spinlocks-support.patch | 599 ++++++ .../all/rt/rt-preempt-base-config.patch | 50 + .../all/rt/rt-rw-lockdep-annotations.patch | 127 ++ ...o-not-compare-cpu-masks-in-scheduler.patch | 39 + ...grate_disable-ignore-bounded-threads.patch | 69 + ...ve-task_numa_free-to-__put_task_stru.patch | 48 + ...actual-migration-disalbe-to-schedule.patch | 306 +++ .../features/all/rt/rt-serial-warn-fix.patch | 38 + ...acing-show-padding-as-unsigned-short.patch | 46 + ...rtmutex-add-a-first-shot-of-ww_mutex.patch | 391 ++++ .../all/rt/rtmutex-avoid-include-hell.patch | 21 + .../all/rt/rtmutex-futex-prepare-rt.patch | 215 ++ .../all/rt/rtmutex-lock-killable.patch | 81 + ...a-trylock-for-waiter-lock-in-trylock.patch | 125 ++ ...rn-value-in-__mutex_lock_check_stamp.patch | 28 + .../all/rt/rwsem-add-rt-variant.patch | 180 ++ ...-better-debug-output-for-might_sleep.patch | 77 + ...ched_reset_on_fork-when-nothing-else.patch | 30 + ...d-Check-for-idle-task-in-might_sleep.patch | 29 + ...Consider-pi-boosting-in-setscheduler.patch | 168 ++ .../rt/sched-Fix-broken-setscheduler.patch | 79 + .../sched-Init-idle-on_rq-in-init_idle.patch | 40 + ...eue-RT-tasks-to-head-when-prio-drops.patch | 73 + ...clear-pf-thread-bound-on-fallback-rq.patch | 25 + .../features/all/rt/sched-cond-resched.patch | 32 + .../all/rt/sched-delay-put-task.patch | 79 + .../sched-disable-rt-group-sched-on-rt.patch | 29 + .../all/rt/sched-disable-ttwu-queue.patch | 28 + ...late-hweight-in-update_migrate_disab.patch | 51 + .../all/rt/sched-limit-nr-migrate.patch | 24 + ...might-sleep-do-not-account-rcu-depth.patch | 46 + .../all/rt/sched-migrate-disable.patch | 193 ++ .../all/rt/sched-mmdrop-delayed.patch | 135 ++ .../sched-rt-fix-migrate_enable-thinko.patch | 64 + .../all/rt/sched-rt-mutex-wakeup.patch | 88 + ...igrate_disable-about-atomic-contexts.patch | 89 + ...twu-ensure-success-return-is-correct.patch | 35 + ...Only-wake-up-idle-workers-if-not-blo.patch | 40 + .../features/all/rt/scsi-fcoe-rt-aware.patch | 112 + ...function-called-from-invalid-context.patch | 48 + ...ate-spin_lock-unlock-waiting-with-sp.patch | 71 + .../rt/seqlock-prevent-rt-starvation.patch | 188 ++ .../all/rt/signal-fix-up-rcu-wreckage.patch | 36 + .../signal-revert-ptrace-preempt-magic.patch | 28 + ...t-tasks-to-cache-one-sigqueue-struct.patch | 214 ++ ...me-and-export-the-equivalent-of-wait.patch | 64 + .../features/all/rt/skbufhead-raw-lock.patch | 128 ++ .../all/rt/slub-enable-irqs-for-no-wait.patch | 47 + .../all/rt/slub_delay_ctor_on_rt.patch | 36 + ...ohz-pending-debug-code-to-new-scheme.patch | 175 ++ ...oftirq-disable-softirq-stacks-for-rt.patch | 170 ++ ...lock-after-per-cpu-section-is-set-up.patch | 134 ++ .../features/all/rt/softirq-local-lock.patch | 359 ++++ .../features/all/rt/softirq-make-fifo.patch | 51 + ...able-enable-conditioned-on-softirq_n.patch | 67 + ...rq-make-serving-softirqs-a-task-flag.patch | 75 + .../all/rt/softirq-preempt-fix-3-re.patch | 146 ++ .../rt/softirq-sanitize-softirq-pending.patch | 113 + .../rt/softirq-split-handling-function.patch | 66 + .../features/all/rt/softirq-split-locks.patch | 455 +++++ .../all/rt/softirq-split-out-code.patch | 102 + .../all/rt/softirq-thread-do-softirq.patch | 33 + ...sparc-provide-EARLY_PRINTK-for-SPARC.patch | 36 + .../all/rt/spinlock-types-separate-raw.patch | 205 ++ .../all/rt/stomp-machine-raw-lock.patch | 195 ++ ...nvert-stop_machine_run-to-PREEMPT_RT.patch | 35 + .../suspend-prevernt-might-sleep-splats.patch | 107 + .../all/rt/sysctl-include-atomic-h.patch | 20 + .../all/rt/sysfs-realtime-entry.patch | 48 + ...-from-going-into-infinite-spin-in-rt.patch | 401 ++++ .../tasklist-lock-fix-section-conflict.patch | 58 + .../rt/timekeeping-split-jiffies-lock.patch | 149 ++ ...er-Raise-softirq-if-there-s-irq_work.patch | 47 + ...-waking-softirqs-from-the-jiffy-tick.patch | 76 + .../all/rt/timer-fd-avoid-live-lock.patch | 31 + ...e-idle-trylock-in-get-next-timer-irq.patch | 79 + ...raise-the-softirq-if-there-s-irq_wor.patch | 67 + ...id-the-base-null-otptimization-on-rt.patch | 69 + ...do-not-raise-softirq-unconditionally.patch | 162 ++ .../all/rt/timers-preempt-rt-support.patch | 57 + ...-prepare-for-full-preemption-improve.patch | 57 + .../timers-prepare-for-full-preemption.patch | 129 ++ ...-for-preempt-off-in-preempt_schedule.patch | 47 + .../all/rt/treercu-use-simple-waitqueue.patch | 80 + ...ove-preemption-disabling-in-netif_rx.patch | 66 + ...fix-mouse-problem-copying-large-data.patch | 37 + .../all/rt/usb-use-_nort-in-giveback.patch | 59 + .../use-local-spin_locks-in-local_lock.patch | 83 + .../all/rt/user-use-local-irq-nort.patch | 30 + .../rt/vtime-split-lock-and-seqcount.patch | 203 ++ .../all/rt/wait-simple-implementation.patch | 344 ++++ .../wait-simple-rework-for-completions.patch | 218 ++ .../all/rt/wait.h-include-atomic.h.patch | 35 + ...rk-around-irqsafe-timer-optimization.patch | 133 ++ .../rt/workqueue-distangle-from-rq-lock.patch | 260 +++ .../all/rt/workqueue-use-locallock.patch | 151 ++ .../features/all/rt/workqueue-use-rcu.patch | 309 +++ ...te_disable-pushdown-to-rt_write_lock.patch | 145 ++ ...ypto-reduce-preempt-disabled-regions.patch | 113 + .../all/rt/x86-disable-debug-stack.patch | 103 + .../all/rt/x86-io-apic-migra-no-unmask.patch | 27 + .../rt/x86-kvm-require-const-tsc-for-rt.patch | 26 + ...ce-wakeups-to-threads-for-PREEMPT_RT.patch | 164 ++ .../all/rt/x86-mce-timer-hrtimer.patch | 190 ++ .../features/all/rt/x86-preempt-lazy.patch | 177 ++ .../rt/x86-stackprot-no-random-on-rt.patch | 48 + .../rt/x86-use-gen-rwsem-spinlocks-rt.patch | 29 + debian/patches/series-rt | 652 ++++++ 311 files changed, 34937 insertions(+), 1 deletion(-) create mode 100644 debian/patches/features/all/rt/0001-sparc64-use-generic-rwsem-spinlocks-rt.patch create mode 100644 debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch create mode 100644 debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch create mode 100644 debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch create mode 100644 debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch create mode 100644 debian/patches/features/all/rt/API-cleanup-use-local_lock-not-__local_lock-for-soft.patch create mode 100644 debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch create mode 100644 debian/patches/features/all/rt/Revert-x86-Disable-IST-stacks-for-debug-int-3-stack-.patch create mode 100644 debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch create mode 100644 debian/patches/features/all/rt/allow-preemption-in-recursive-migrate_disable-call.patch create mode 100644 debian/patches/features/all/rt/arch-use-pagefault-disabled.patch create mode 100644 debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch create mode 100644 debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch create mode 100644 debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch create mode 100644 debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch create mode 100644 debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch create mode 100644 debian/patches/features/all/rt/arm-preempt-lazy-support.patch create mode 100644 debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch create mode 100644 debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch create mode 100644 debian/patches/features/all/rt/block-mq-use-cpu_light.patch create mode 100644 debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch create mode 100644 debian/patches/features/all/rt/block-use-cpu-chill.patch create mode 100644 debian/patches/features/all/rt/bug-rt-dependend-variants.patch create mode 100644 debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch create mode 100644 debian/patches/features/all/rt/completion-use-simple-wait-queues.patch create mode 100644 debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch create mode 100644 debian/patches/features/all/rt/cond-resched-softirq-rt.patch create mode 100644 debian/patches/features/all/rt/condition-migration_disable-on-lock-acquisition.patch create mode 100644 debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch create mode 100644 debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch create mode 100644 debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch create mode 100644 debian/patches/features/all/rt/cpu-rt-variants.patch create mode 100644 debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch create mode 100644 debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch create mode 100644 debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch create mode 100644 debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch create mode 100644 debian/patches/features/all/rt/debugobjects-rt.patch create mode 100644 debian/patches/features/all/rt/disable-lazy-preempt-on-x86-64.patch create mode 100644 debian/patches/features/all/rt/dm-make-rt-aware.patch create mode 100644 debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch create mode 100644 debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch create mode 100644 debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch create mode 100644 debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch create mode 100644 debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch create mode 100644 debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch create mode 100644 debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch create mode 100644 debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch create mode 100644 debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch create mode 100644 debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch create mode 100644 debian/patches/features/all/rt/early-printk-consolidate.patch create mode 100644 debian/patches/features/all/rt/epoll-use-get-cpu-light.patch create mode 100644 debian/patches/features/all/rt/filemap-fix-up.patch create mode 100644 debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch create mode 100644 debian/patches/features/all/rt/fixup_opencoded_completions.patch create mode 100644 debian/patches/features/all/rt/fs-block-rt-support.patch create mode 100644 debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch create mode 100644 debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch create mode 100644 debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch create mode 100644 debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch create mode 100644 debian/patches/features/all/rt/fs-namespace-preemption-fix.patch create mode 100644 debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch create mode 100644 debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch create mode 100644 debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch create mode 100644 debian/patches/features/all/rt/futex-requeue-pi-fix.patch create mode 100644 debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch create mode 100644 debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch create mode 100644 debian/patches/features/all/rt/genirq-force-threading.patch create mode 100644 debian/patches/features/all/rt/genirq-nodebug-shirq.patch create mode 100644 debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch create mode 100644 debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch create mode 100644 debian/patches/features/all/rt/hotplug-use-migrate-disable.patch create mode 100644 debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch create mode 100644 debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch create mode 100644 debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch create mode 100644 debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch create mode 100644 debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch create mode 100644 debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch create mode 100644 debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch create mode 100644 debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch create mode 100644 debian/patches/features/all/rt/hwlatdetect.patch create mode 100644 debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch create mode 100644 debian/patches/features/all/rt/i915_compile_fix.patch create mode 100644 debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch create mode 100644 debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch create mode 100644 debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch create mode 100644 debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch create mode 100644 debian/patches/features/all/rt/ipc-make-rt-aware.patch create mode 100644 debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch create mode 100644 debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch create mode 100644 debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch create mode 100644 debian/patches/features/all/rt/irq_work-allow-certain-work-in-hard-irq-context.patch create mode 100644 debian/patches/features/all/rt/jump-label-rt.patch create mode 100644 debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch create mode 100644 debian/patches/features/all/rt/kconfig-preempt-rt-full.patch create mode 100644 debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch create mode 100644 debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch create mode 100644 debian/patches/features/all/rt/kernel-hrtimer-be-non-freezeable-in-cpu_chill.patch create mode 100644 debian/patches/features/all/rt/kgb-serial-hackaround.patch create mode 100644 debian/patches/features/all/rt/latency-hist.patch create mode 100644 debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch create mode 100644 debian/patches/features/all/rt/lglocks-rt.patch create mode 100644 debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch create mode 100644 debian/patches/features/all/rt/local-irq-rt-depending-variants.patch create mode 100644 debian/patches/features/all/rt/local-var.patch create mode 100644 debian/patches/features/all/rt/local-vars-migrate-disable.patch create mode 100644 debian/patches/features/all/rt/localversion.patch create mode 100644 debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch create mode 100644 debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch create mode 100644 debian/patches/features/all/rt/md-disable-bcache.patch create mode 100644 debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch create mode 100644 debian/patches/features/all/rt/migrate-disable-rt-variant.patch create mode 100644 debian/patches/features/all/rt/migrate_disable-pushd-down-in-atomic_dec_and_spin_lo.patch create mode 100644 debian/patches/features/all/rt/migrate_disable-pushd-down-in-rt_spin_trylock_irqsav.patch create mode 100644 debian/patches/features/all/rt/migrate_disable-pushd-down-in-rt_write_trylock_irqsa.patch create mode 100644 debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch create mode 100644 debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch create mode 100644 debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch create mode 100644 debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch create mode 100644 debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch create mode 100644 debian/patches/features/all/rt/mm-disable-sloub-rt.patch create mode 100644 debian/patches/features/all/rt/mm-enable-slub.patch create mode 100644 debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch create mode 100644 debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch create mode 100644 debian/patches/features/all/rt/mm-page-alloc-fix.patch create mode 100644 debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch create mode 100644 debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch create mode 100644 debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch create mode 100644 debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch create mode 100644 debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch create mode 100644 debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch create mode 100644 debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch create mode 100644 debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch create mode 100644 debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch create mode 100644 debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch create mode 100644 debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch create mode 100644 debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch create mode 100644 debian/patches/features/all/rt/mutex-no-spin-on-rt.patch create mode 100644 debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch create mode 100644 debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch create mode 100644 debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch create mode 100644 debian/patches/features/all/rt/net-gianfar-do-not-disable-interrupts.patch create mode 100644 debian/patches/features/all/rt/net-gianfar-do-not-try-to-cleanup-TX-packets-if-they.patch create mode 100644 debian/patches/features/all/rt/net-ip_send_unicast_reply-add-missing-local-serializ.patch create mode 100644 debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch create mode 100644 debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch create mode 100644 debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch create mode 100644 debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch create mode 100644 debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch create mode 100644 debian/patches/features/all/rt/net-use-cpu-chill.patch create mode 100644 debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch create mode 100644 debian/patches/features/all/rt/net-wireless-warn-nort.patch create mode 100644 debian/patches/features/all/rt/oleg-signal-rt-fix.patch create mode 100644 debian/patches/features/all/rt/panic-disable-random-on-rt.patch create mode 100644 debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch create mode 100644 debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch create mode 100644 debian/patches/features/all/rt/percpu-rwsem-compilefix.patch create mode 100644 debian/patches/features/all/rt/percpu_ida-use-locklocks.patch create mode 100644 debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch create mode 100644 debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch create mode 100644 debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch create mode 100644 debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch create mode 100644 debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch create mode 100644 debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch create mode 100644 debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch create mode 100644 debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch create mode 100644 debian/patches/features/all/rt/pid-h-include-atomic-h.patch create mode 100644 debian/patches/features/all/rt/ping-sysrq.patch create mode 100644 debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch create mode 100644 debian/patches/features/all/rt/posix-timers-no-broadcast.patch create mode 100644 debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch create mode 100644 debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch create mode 100644 debian/patches/features/all/rt/power-disable-highmem-on-rt.patch create mode 100644 debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch create mode 100644 debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch create mode 100644 debian/patches/features/all/rt/preempt-lazy-support.patch create mode 100644 debian/patches/features/all/rt/preempt-nort-rt-variants.patch create mode 100644 debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch create mode 100644 debian/patches/features/all/rt/printk-kill.patch create mode 100644 debian/patches/features/all/rt/printk-rt-aware.patch create mode 100644 debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch create mode 100644 debian/patches/features/all/rt/radix-tree-rt-aware.patch create mode 100644 debian/patches/features/all/rt/random-make-it-work-on-rt.patch create mode 100644 debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch create mode 100644 debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch create mode 100644 debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch create mode 100644 debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch create mode 100644 debian/patches/features/all/rt/rcu-more-swait-conversions.patch create mode 100644 debian/patches/features/all/rt/rcu-tiny-merge-bh.patch create mode 100644 debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch create mode 100644 debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch create mode 100644 debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch create mode 100644 debian/patches/features/all/rt/read_lock-migrate_disable-pushdown-to-rt_read_lock.patch create mode 100644 debian/patches/features/all/rt/relay-fix-timer-madness.patch create mode 100644 debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch create mode 100644 debian/patches/features/all/rt/rt-Cleanup-of-unnecessary-do-while-0-in-read-write-_.patch create mode 100644 debian/patches/features/all/rt/rt-Make-cpu_chill-use-hrtimer-instead-of-msleep.patch create mode 100644 debian/patches/features/all/rt/rt-add-rt-locks.patch create mode 100644 debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch create mode 100644 debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch create mode 100644 debian/patches/features/all/rt/rt-introduce-cpu-chill.patch create mode 100644 debian/patches/features/all/rt/rt-local-irq-lock.patch create mode 100644 debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch create mode 100644 debian/patches/features/all/rt/rt-preempt-base-config.patch create mode 100644 debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch create mode 100644 debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch create mode 100644 debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch create mode 100644 debian/patches/features/all/rt/rt-sched-numa-Move-task_numa_free-to-__put_task_stru.patch create mode 100644 debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch create mode 100644 debian/patches/features/all/rt/rt-serial-warn-fix.patch create mode 100644 debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch create mode 100644 debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch create mode 100644 debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch create mode 100644 debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch create mode 100644 debian/patches/features/all/rt/rtmutex-lock-killable.patch create mode 100644 debian/patches/features/all/rt/rtmutex-use-a-trylock-for-waiter-lock-in-trylock.patch create mode 100644 debian/patches/features/all/rt/rtmutex-ww-bad-return-value-in-__mutex_lock_check_stamp.patch create mode 100644 debian/patches/features/all/rt/rwsem-add-rt-variant.patch create mode 100644 debian/patches/features/all/rt/sched-Add-better-debug-output-for-might_sleep.patch create mode 100644 debian/patches/features/all/rt/sched-Adjust-p-sched_reset_on_fork-when-nothing-else.patch create mode 100644 debian/patches/features/all/rt/sched-Check-for-idle-task-in-might_sleep.patch create mode 100644 debian/patches/features/all/rt/sched-Consider-pi-boosting-in-setscheduler.patch create mode 100644 debian/patches/features/all/rt/sched-Fix-broken-setscheduler.patch create mode 100644 debian/patches/features/all/rt/sched-Init-idle-on_rq-in-init_idle.patch create mode 100644 debian/patches/features/all/rt/sched-Queue-RT-tasks-to-head-when-prio-drops.patch create mode 100644 debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch create mode 100644 debian/patches/features/all/rt/sched-cond-resched.patch create mode 100644 debian/patches/features/all/rt/sched-delay-put-task.patch create mode 100644 debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch create mode 100644 debian/patches/features/all/rt/sched-disable-ttwu-queue.patch create mode 100644 debian/patches/features/all/rt/sched-dont-calculate-hweight-in-update_migrate_disab.patch create mode 100644 debian/patches/features/all/rt/sched-limit-nr-migrate.patch create mode 100644 debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch create mode 100644 debian/patches/features/all/rt/sched-migrate-disable.patch create mode 100644 debian/patches/features/all/rt/sched-mmdrop-delayed.patch create mode 100644 debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch create mode 100644 debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch create mode 100644 debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch create mode 100644 debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch create mode 100644 debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch create mode 100644 debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch create mode 100644 debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch create mode 100644 debian/patches/features/all/rt/seqlock-consolidate-spin_lock-unlock-waiting-with-sp.patch create mode 100644 debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch create mode 100644 debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch create mode 100644 debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch create mode 100644 debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch create mode 100644 debian/patches/features/all/rt/simple-wait-rename-and-export-the-equivalent-of-wait.patch create mode 100644 debian/patches/features/all/rt/skbufhead-raw-lock.patch create mode 100644 debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch create mode 100644 debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch create mode 100644 debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch create mode 100644 debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch create mode 100644 debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch create mode 100644 debian/patches/features/all/rt/softirq-local-lock.patch create mode 100644 debian/patches/features/all/rt/softirq-make-fifo.patch create mode 100644 debian/patches/features/all/rt/softirq-make-migrate-disable-enable-conditioned-on-softirq_n.patch create mode 100644 debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch create mode 100644 debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch create mode 100644 debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch create mode 100644 debian/patches/features/all/rt/softirq-split-handling-function.patch create mode 100644 debian/patches/features/all/rt/softirq-split-locks.patch create mode 100644 debian/patches/features/all/rt/softirq-split-out-code.patch create mode 100644 debian/patches/features/all/rt/softirq-thread-do-softirq.patch create mode 100644 debian/patches/features/all/rt/sparc-provide-EARLY_PRINTK-for-SPARC.patch create mode 100644 debian/patches/features/all/rt/spinlock-types-separate-raw.patch create mode 100644 debian/patches/features/all/rt/stomp-machine-raw-lock.patch create mode 100644 debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch create mode 100644 debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch create mode 100644 debian/patches/features/all/rt/sysctl-include-atomic-h.patch create mode 100644 debian/patches/features/all/rt/sysfs-realtime-entry.patch create mode 100644 debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch create mode 100644 debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch create mode 100644 debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch create mode 100644 debian/patches/features/all/rt/timer-Raise-softirq-if-there-s-irq_work.patch create mode 100644 debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch create mode 100644 debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch create mode 100644 debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch create mode 100644 debian/patches/features/all/rt/timer-rt-Always-raise-the-softirq-if-there-s-irq_wor.patch create mode 100644 debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch create mode 100644 debian/patches/features/all/rt/timers-do-not-raise-softirq-unconditionally.patch create mode 100644 debian/patches/features/all/rt/timers-preempt-rt-support.patch create mode 100644 debian/patches/features/all/rt/timers-prepare-for-full-preemption-improve.patch create mode 100644 debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch create mode 100644 debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch create mode 100644 debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch create mode 100644 debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch create mode 100644 debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch create mode 100644 debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch create mode 100644 debian/patches/features/all/rt/use-local-spin_locks-in-local_lock.patch create mode 100644 debian/patches/features/all/rt/user-use-local-irq-nort.patch create mode 100644 debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch create mode 100644 debian/patches/features/all/rt/wait-simple-implementation.patch create mode 100644 debian/patches/features/all/rt/wait-simple-rework-for-completions.patch create mode 100644 debian/patches/features/all/rt/wait.h-include-atomic.h.patch create mode 100644 debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch create mode 100644 debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch create mode 100644 debian/patches/features/all/rt/workqueue-use-locallock.patch create mode 100644 debian/patches/features/all/rt/workqueue-use-rcu.patch create mode 100644 debian/patches/features/all/rt/write_lock-migrate_disable-pushdown-to-rt_write_lock.patch create mode 100644 debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch create mode 100644 debian/patches/features/all/rt/x86-disable-debug-stack.patch create mode 100644 debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch create mode 100644 debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch create mode 100644 debian/patches/features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch create mode 100644 debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch create mode 100644 debian/patches/features/all/rt/x86-preempt-lazy.patch create mode 100644 debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch create mode 100644 debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch create mode 100644 debian/patches/series-rt diff --git a/debian/changelog b/debian/changelog index e13d72bd0..eb8990073 100644 --- a/debian/changelog +++ b/debian/changelog @@ -6,6 +6,7 @@ linux (3.14.1-1~exp1) UNRELEASED; urgency=medium [ Ben Hutchings ] * [armel] Disable ixp4xx flavour (fixes FTBFS) * [armhf] Enable SECURITY_APPARMOR, SECURITY_TOMOYO + * [rt] Update to 3.14.0-rt1 and reenable -- Ben Hutchings Mon, 14 Apr 2014 01:58:22 +0100 diff --git a/debian/config/defines b/debian/config/defines index 59a6a1cc3..76948a2d2 100644 --- a/debian/config/defines +++ b/debian/config/defines @@ -29,7 +29,7 @@ featuresets: rt [featureset-rt_base] -enabled: false +enabled: true [description] part-long-up: This kernel is not suitable for SMP (multi-processor, diff --git a/debian/patches/features/all/rt/0001-sparc64-use-generic-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/0001-sparc64-use-generic-rwsem-spinlocks-rt.patch new file mode 100644 index 000000000..504cb95a2 --- /dev/null +++ b/debian/patches/features/all/rt/0001-sparc64-use-generic-rwsem-spinlocks-rt.patch @@ -0,0 +1,29 @@ +From d6a6675d436897cd1b09e299436df3499abd753e Mon Sep 17 00:00:00 2001 +From: Allen Pais +Date: Fri, 13 Dec 2013 09:44:41 +0530 +Subject: [PATCH 1/3] sparc64: use generic rwsem spinlocks rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Allen Pais +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/sparc/Kconfig | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/arch/sparc/Kconfig ++++ b/arch/sparc/Kconfig +@@ -179,12 +179,10 @@ config NR_CPUS + source kernel/Kconfig.hz + + config RWSEM_GENERIC_SPINLOCK +- bool +- default y if SPARC32 ++ def_bool PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM +- bool +- default y if SPARC64 ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_HWEIGHT + bool diff --git a/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch b/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch new file mode 100644 index 000000000..b4c547310 --- /dev/null +++ b/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch @@ -0,0 +1,131 @@ +From: Sebastian Andrzej Siewior +Date: Tue, 19 Mar 2013 14:44:30 +0100 +Subject: [PATCH] kernel/SRCU: provide a static initializer +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +There are macros for static initializer for the three out of four +possible notifier types, that are: + ATOMIC_NOTIFIER_HEAD() + BLOCKING_NOTIFIER_HEAD() + RAW_NOTIFIER_HEAD() + +This patch provides a static initilizer for the forth type to make it +complete. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/notifier.h | 34 +++++++++++++++++++++++++--------- + include/linux/srcu.h | 9 +++++---- + 2 files changed, 30 insertions(+), 13 deletions(-) + +--- a/include/linux/notifier.h ++++ b/include/linux/notifier.h +@@ -6,7 +6,7 @@ + * + * Alan Cox + */ +- ++ + #ifndef _LINUX_NOTIFIER_H + #define _LINUX_NOTIFIER_H + #include +@@ -42,9 +42,7 @@ + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very +- * often but notifier_blocks will seldom be removed. Also, SRCU notifier +- * chains are slightly more difficult to use because they require special +- * runtime initialization. ++ * often but notifier_blocks will seldom be removed. + */ + + typedef int (*notifier_fn_t)(struct notifier_block *nb, +@@ -88,7 +86,7 @@ struct srcu_notifier_head { + (name)->head = NULL; \ + } while (0) + +-/* srcu_notifier_heads must be initialized and cleaned up dynamically */ ++/* srcu_notifier_heads must be cleaned up dynamically */ + extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); + #define srcu_cleanup_notifier_head(name) \ + cleanup_srcu_struct(&(name)->srcu); +@@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(stru + .head = NULL } + #define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } +-/* srcu_notifier_heads cannot be initialized statically */ ++ ++#define SRCU_NOTIFIER_INIT(name, pcpu) \ ++ { \ ++ .mutex = __MUTEX_INITIALIZER(name.mutex), \ ++ .head = NULL, \ ++ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ ++ } + + #define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ +@@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(stru + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) + ++#define _SRCU_NOTIFIER_HEAD(name, mod) \ ++ static DEFINE_PER_CPU(struct srcu_struct_array, \ ++ name##_head_srcu_array); \ ++ mod struct srcu_notifier_head name = \ ++ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) ++ ++#define SRCU_NOTIFIER_HEAD(name) \ ++ _SRCU_NOTIFIER_HEAD(name, ) ++ ++#define SRCU_NOTIFIER_HEAD_STATIC(name) \ ++ _SRCU_NOTIFIER_HEAD(name, static) ++ + #ifdef __KERNEL__ + + extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, +@@ -182,12 +198,12 @@ static inline int notifier_to_errno(int + + /* + * Declared notifiers so far. I can imagine quite a few more chains +- * over time (eg laptop power reset chains, reboot chain (to clean ++ * over time (eg laptop power reset chains, reboot chain (to clean + * device units up), device [un]mount chain, module load/unload chain, +- * low memory chain, screenblank chain (for plug in modular screenblankers) ++ * low memory chain, screenblank chain (for plug in modular screenblankers) + * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... + */ +- ++ + /* CPU notfiers are defined in include/linux/cpu.h. */ + + /* netdevice notifiers are defined in include/linux/netdevice.h */ +--- a/include/linux/srcu.h ++++ b/include/linux/srcu.h +@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct + + void process_srcu(struct work_struct *work); + +-#define __SRCU_STRUCT_INIT(name) \ ++#define __SRCU_STRUCT_INIT(name, pcpu_name) \ + { \ + .completed = -300, \ +- .per_cpu_ref = &name##_srcu_array, \ ++ .per_cpu_ref = &pcpu_name, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ +@@ -104,11 +104,12 @@ void process_srcu(struct work_struct *wo + */ + #define DEFINE_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- struct srcu_struct name = __SRCU_STRUCT_INIT(name); ++ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array); + + #define DEFINE_STATIC_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- static struct srcu_struct name = __SRCU_STRUCT_INIT(name); ++ static struct srcu_struct name = __SRCU_STRUCT_INIT(\ ++ name, name##_srcu_array); + + /** + * call_srcu() - Queue a callback for invocation after an SRCU grace period diff --git a/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch b/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch new file mode 100644 index 000000000..69810d1a4 --- /dev/null +++ b/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch @@ -0,0 +1,24 @@ +From 65513f34449eedb6b84c24a3583266534c1627e4 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 11 Mar 2013 17:09:55 +0100 +Subject: [PATCH 2/6] x86/highmem: add a "already used pte" check +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +This is a copy from kmap_atomic_prot(). + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/mm/iomap_32.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/x86/mm/iomap_32.c ++++ b/arch/x86/mm/iomap_32.c +@@ -65,6 +65,8 @@ void *kmap_atomic_prot_pfn(unsigned long + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ WARN_ON(!pte_none(*(kmap_pte - idx))); ++ + #ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; + #endif diff --git a/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch b/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch new file mode 100644 index 000000000..47238244b --- /dev/null +++ b/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch @@ -0,0 +1,29 @@ +From e2ca4d092d9c6e6b07b465b4d81da207bbcc7437 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 11 Mar 2013 21:37:27 +0100 +Subject: [PATCH 3/6] arm/highmem: flush tlb on unmap +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The tlb should be flushed on unmap and thus make the mapping entry +invalid. This is only done in the non-debug case which does not look +right. + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/arm/mm/highmem.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/mm/highmem.c ++++ b/arch/arm/mm/highmem.c +@@ -95,10 +95,10 @@ void __kunmap_atomic(void *kvaddr) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); +- set_top_pte(vaddr, __pte(0)); + #else + (void) idx; /* to kill a warning */ + #endif ++ set_top_pte(vaddr, __pte(0)); + kmap_atomic_idx_pop(); + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* this address was obtained through kmap_high_get() */ diff --git a/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch new file mode 100644 index 000000000..614e2fba9 --- /dev/null +++ b/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch @@ -0,0 +1,45 @@ +From eef09918aff670a6162d2ae5fe87b393698ef57d Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 1 Mar 2013 11:17:42 +0100 +Subject: [PATCH 5/6] futex: Ensure lock/unlock symetry versus pi_lock and + hash bucket lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +In exit_pi_state_list() we have the following locking construct: + + spin_lock(&hb->lock); + raw_spin_lock_irq(&curr->pi_lock); + + ... + spin_unlock(&hb->lock); + +In !RT this works, but on RT the migrate_enable() function which is +called from spin_unlock() sees atomic context due to the held pi_lock +and just decrements the migrate_disable_atomic counter of the +task. Now the next call to migrate_disable() sees the counter being +negative and issues a warning. That check should be in +migrate_enable() already. + +Fix this by dropping pi_lock before unlocking hb->lock and reaquire +pi_lock after that again. This is safe as the loop code reevaluates +head again under the pi_lock. + +Reported-by: Yong Zhang +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/futex.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -708,7 +708,9 @@ void exit_pi_state_list(struct task_stru + * task still owns the PI-state: + */ + if (head->next != next) { ++ raw_spin_unlock_irq(&curr->pi_lock); + spin_unlock(&hb->lock); ++ raw_spin_lock_irq(&curr->pi_lock); + continue; + } + diff --git a/debian/patches/features/all/rt/API-cleanup-use-local_lock-not-__local_lock-for-soft.patch b/debian/patches/features/all/rt/API-cleanup-use-local_lock-not-__local_lock-for-soft.patch new file mode 100644 index 000000000..cad38df20 --- /dev/null +++ b/debian/patches/features/all/rt/API-cleanup-use-local_lock-not-__local_lock-for-soft.patch @@ -0,0 +1,33 @@ +From 116a588e1e4b108bfd01b5ae8de602c12aec3323 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Fri, 17 Jan 2014 20:44:03 +0100 +Subject: [PATCH 7/7] API cleanup - use local_lock not __local_lock for soft +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +trivial API cleanup - kernel/softirq.c was mimiking local_lock. + +No change of functional behavior + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/softirq.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -460,12 +460,12 @@ void __init softirq_early_init(void) + + static void lock_softirq(int which) + { +- __local_lock(&__get_cpu_var(local_softirq_locks[which])); ++ local_lock(local_softirq_locks[which]); + } + + static void unlock_softirq(int which) + { +- __local_unlock(&__get_cpu_var(local_softirq_locks[which])); ++ local_unlock(local_softirq_locks[which]); + } + + static void do_single_softirq(int which, int need_rcu_bh_qs) diff --git a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch new file mode 100644 index 000000000..a8a204543 --- /dev/null +++ b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch @@ -0,0 +1,78 @@ +From b72b514282ffad0d665ea94932b968f388304079 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 21 Mar 2013 19:01:05 +0100 +Subject: [PATCH] HACK: printk: drop the logbuf_lock more often +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The lock is hold with irgs off. The latency drops 500us+ on my arm bugs +with a "full" buffer after executing "dmesg" on the shell. + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/printk/printk.c | 27 ++++++++++++++++++++++++++- + 1 file changed, 26 insertions(+), 1 deletion(-) + +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -1025,6 +1025,7 @@ static int syslog_print_all(char __user + { + char *text; + int len = 0; ++ int attempts = 0; + + text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); + if (!text) +@@ -1036,7 +1037,14 @@ static int syslog_print_all(char __user + u64 seq; + u32 idx; + enum log_flags prev; +- ++ int num_msg; ++try_again: ++ attempts++; ++ if (attempts > 10) { ++ len = -EBUSY; ++ goto out; ++ } ++ num_msg = 0; + if (clear_seq < log_first_seq) { + /* messages are gone, move to first available one */ + clear_seq = log_first_seq; +@@ -1057,6 +1065,14 @@ static int syslog_print_all(char __user + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* move first record forward until length fits into the buffer */ +@@ -1070,6 +1086,14 @@ static int syslog_print_all(char __user + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* last message fitting into this dump */ +@@ -1110,6 +1134,7 @@ static int syslog_print_all(char __user + clear_seq = log_next_seq; + clear_idx = log_next_idx; + } ++out: + raw_spin_unlock_irq(&logbuf_lock); + + kfree(text); diff --git a/debian/patches/features/all/rt/Revert-x86-Disable-IST-stacks-for-debug-int-3-stack-.patch b/debian/patches/features/all/rt/Revert-x86-Disable-IST-stacks-for-debug-int-3-stack-.patch new file mode 100644 index 000000000..5e363868e --- /dev/null +++ b/debian/patches/features/all/rt/Revert-x86-Disable-IST-stacks-for-debug-int-3-stack-.patch @@ -0,0 +1,114 @@ +From 272e319977ab4feb79a28621215a9aca42d60003 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 3 Jan 2014 14:55:48 +0100 +Subject: [PATCH] Revert "x86: Disable IST stacks for debug/int 3/stack fault + for PREEMPT_RT" +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +where do I start. Let me explain what is going on here. The code +sequence +| pushf +| pop %edx +| or $0x1,%dh +| push %edx +| mov $0xe0,%eax +| popf +| sysenter + +triggers the bug. On 64bit kernel we see the double fault (with 32bit and +64bit userland) and on 32bit kernel there is no problem. The reporter said +that double fault does not happen on 64bit kernel with 64bit userland and +this is because in that case the VDSO uses the "syscall" interface instead +of "sysenter". + +The bug. "popf" loads the flags with the TF bit set which enables +"single stepping" and this leads to a debug exception. Usually on 64bit +we have a special IST stack for the debug exception. Due to patch [0] we +do not use the IST stack but the kernel stack instead. On 64bit the +sysenter instruction starts in kernel with the stack address NULL. The +code sequence above enters the debug exception (TF flag) after the +sysenter instruction was executed which sets the stack pointer to NULL +and we have a fault (it seems that the debug exception saves some bytes +on the stack). +To fix the double fault I'm going to drop patch [0]. It is completely +pointless. In do_debug() and do_stack_segment() we disable preemption +which means the task can't leave the CPU. So it does not matter if we run +on IST or on kernel stack. +There is a patch [1] which drops preempt_disable() call for a 32bit +kernel but not for 64bit so there should be no regression. +And [1] seems valid even for this code sequence. We enter the debug +exception with a 256bytes long per cpu stack and migrate to the kernel +stack before calling do_debug(). + +[0] x86-disable-debug-stack.patch +[1] fix-rt-int3-x86_32-3.2-rt.patch + +Cc: stable-rt@vger.kernel.org +Reported-by: Brian Silverman +Cc: Andi Kleen +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/include/asm/page_64_types.h | 21 ++++++--------------- + arch/x86/kernel/cpu/common.c | 2 -- + arch/x86/kernel/dumpstack_64.c | 4 ---- + 3 files changed, 6 insertions(+), 21 deletions(-) + +--- a/arch/x86/include/asm/page_64_types.h ++++ b/arch/x86/include/asm/page_64_types.h +@@ -14,21 +14,12 @@ + #define IRQ_STACK_ORDER 2 + #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) + +-#ifdef CONFIG_PREEMPT_RT_FULL +-# define STACKFAULT_STACK 0 +-# define DOUBLEFAULT_STACK 1 +-# define NMI_STACK 2 +-# define DEBUG_STACK 0 +-# define MCE_STACK 3 +-# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */ +-#else +-# define STACKFAULT_STACK 1 +-# define DOUBLEFAULT_STACK 2 +-# define NMI_STACK 3 +-# define DEBUG_STACK 4 +-# define MCE_STACK 5 +-# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +-#endif ++#define STACKFAULT_STACK 1 ++#define DOUBLEFAULT_STACK 2 ++#define NMI_STACK 3 ++#define DEBUG_STACK 4 ++#define MCE_STACK 5 ++#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ + + #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) + #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1116,9 +1116,7 @@ DEFINE_PER_CPU(struct task_struct *, fpu + */ + static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, +-#if DEBUG_STACK > 0 + [DEBUG_STACK - 1] = DEBUG_STKSZ +-#endif + }; + + static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks +--- a/arch/x86/kernel/dumpstack_64.c ++++ b/arch/x86/kernel/dumpstack_64.c +@@ -21,14 +21,10 @@ + (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) + + static char x86_stack_ids[][8] = { +-#if DEBUG_STACK > 0 + [ DEBUG_STACK-1 ] = "#DB", +-#endif + [ NMI_STACK-1 ] = "NMI", + [ DOUBLEFAULT_STACK-1 ] = "#DF", +-#if STACKFAULT_STACK > 0 + [ STACKFAULT_STACK-1 ] = "#SS", +-#endif + [ MCE_STACK-1 ] = "#MC", + #if DEBUG_STKSZ > EXCEPTION_STKSZ + [ N_EXCEPTION_STACKS ... diff --git a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch new file mode 100644 index 000000000..f3335fc32 --- /dev/null +++ b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch @@ -0,0 +1,173 @@ +From: Steven Rostedt +Date: Wed, 13 Feb 2013 09:26:05 -0500 +Subject: [PATCH] acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +We hit the following bug with 3.6-rt: + +[ 5.898990] BUG: scheduling while atomic: swapper/3/0/0x00000002 +[ 5.898991] no locks held by swapper/3/0. +[ 5.898993] Modules linked in: +[ 5.898996] Pid: 0, comm: swapper/3 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1 +[ 5.898997] Call Trace: +[ 5.899011] [] __schedule_bug+0x67/0x90 +[ 5.899028] [] __schedule+0x793/0x7a0 +[ 5.899032] [] ? debug_rt_mutex_print_deadlock+0x50/0x200 +[ 5.899034] [] schedule+0x29/0x70 +[ 5.899036] BUG: scheduling while atomic: swapper/7/0/0x00000002 +[ 5.899037] no locks held by swapper/7/0. +[ 5.899039] [] rt_spin_lock_slowlock+0xe5/0x2f0 +[ 5.899040] Modules linked in: +[ 5.899041] +[ 5.899045] [] ? _raw_spin_unlock_irqrestore+0x38/0x90 +[ 5.899046] Pid: 0, comm: swapper/7 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1 +[ 5.899047] Call Trace: +[ 5.899049] [] rt_spin_lock+0x16/0x40 +[ 5.899052] [] __schedule_bug+0x67/0x90 +[ 5.899054] [] ? notifier_call_chain+0x80/0x80 +[ 5.899056] [] __schedule+0x793/0x7a0 +[ 5.899059] [] acpi_os_acquire_lock+0x1f/0x23 +[ 5.899062] [] ? debug_rt_mutex_print_deadlock+0x50/0x200 +[ 5.899068] [] acpi_write_bit_register+0x33/0xb0 +[ 5.899071] [] schedule+0x29/0x70 +[ 5.899072] [] ? acpi_read_bit_register+0x33/0x51 +[ 5.899074] [] rt_spin_lock_slowlock+0xe5/0x2f0 +[ 5.899077] [] acpi_idle_enter_bm+0x8a/0x28e +[ 5.899079] [] ? _raw_spin_unlock_irqrestore+0x38/0x90 +[ 5.899081] [] ? this_cpu_load+0x1a/0x30 +[ 5.899083] [] rt_spin_lock+0x16/0x40 +[ 5.899087] [] cpuidle_enter+0x19/0x20 +[ 5.899088] [] ? notifier_call_chain+0x80/0x80 +[ 5.899090] [] cpuidle_enter_state+0x17/0x50 +[ 5.899092] [] acpi_os_acquire_lock+0x1f/0x23 +[ 5.899094] [] cpuidle899101] [] ? + +As the acpi code disables interrupts in acpi_idle_enter_bm, and calls +code that grabs the acpi lock, it causes issues as the lock is currently +in RT a sleeping lock. + +The lock was converted from a raw to a sleeping lock due to some +previous issues, and tests that showed it didn't seem to matter. +Unfortunately, it did matter for one of our boxes. + +This patch converts the lock back to a raw lock. I've run this code on a +few of my own machines, one being my laptop that uses the acpi quite +extensively. I've been able to suspend and resume without issues. + +[ tglx: Made the change exclusive for acpi_gbl_hardware_lock ] + +Signed-off-by: Steven Rostedt +Cc: John Kacur +Cc: Clark Williams +Link: http://lkml.kernel.org/r/1360765565.23152.5.camel@gandalf.local.home +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/acpi/acpica/acglobal.h | 2 +- + drivers/acpi/acpica/hwregs.c | 4 ++-- + drivers/acpi/acpica/hwxface.c | 4 ++-- + drivers/acpi/acpica/utmutex.c | 4 ++-- + include/acpi/platform/aclinux.h | 14 ++++++++++++++ + 5 files changed, 21 insertions(+), 7 deletions(-) + +--- a/drivers/acpi/acpica/acglobal.h ++++ b/drivers/acpi/acpica/acglobal.h +@@ -253,7 +253,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pend + * interrupt level + */ + ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */ +-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ ++ACPI_EXTERN acpi_raw_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ + ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock; + + /* Mutex for _OSI support */ +--- a/drivers/acpi/acpica/hwregs.c ++++ b/drivers/acpi/acpica/hwregs.c +@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(vo + ACPI_BITMASK_ALL_FIXED_STATUS, + ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* Clear the fixed events in PM1 A/B */ + + status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, + ACPI_BITMASK_ALL_FIXED_STATUS); + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + + if (ACPI_FAILURE(status)) + goto exit; +--- a/drivers/acpi/acpica/hwxface.c ++++ b/drivers/acpi/acpica/hwxface.c +@@ -374,7 +374,7 @@ acpi_status acpi_write_bit_register(u32 + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* + * At this point, we know that the parent register is one of the +@@ -435,7 +435,7 @@ acpi_status acpi_write_bit_register(u32 + + unlock_and_exit: + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + return_ACPI_STATUS(status); + } + +--- a/drivers/acpi/acpica/utmutex.c ++++ b/drivers/acpi/acpica/utmutex.c +@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(voi + return_ACPI_STATUS (status); + } + +- status = acpi_os_create_lock (&acpi_gbl_hardware_lock); ++ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } +@@ -141,7 +141,7 @@ void acpi_ut_mutex_terminate(void) + /* Delete the spinlocks */ + + acpi_os_delete_lock(acpi_gbl_gpe_lock); +- acpi_os_delete_lock(acpi_gbl_hardware_lock); ++ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); + acpi_os_delete_lock(acpi_gbl_reference_count_lock); + + /* Delete the reader/writer lock */ +--- a/include/acpi/platform/aclinux.h ++++ b/include/acpi/platform/aclinux.h +@@ -73,6 +73,7 @@ + + #define acpi_cache_t struct kmem_cache + #define acpi_spinlock spinlock_t * ++#define acpi_raw_spinlock raw_spinlock_t * + #define acpi_cpu_flags unsigned long + + #else /* !__KERNEL__ */ +@@ -210,6 +211,19 @@ static inline acpi_thread_id acpi_os_get + }) + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock + ++#define acpi_os_create_raw_lock(__handle) \ ++({ \ ++ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ ++ \ ++ if (lock) { \ ++ *(__handle) = lock; \ ++ raw_spin_lock_init(*(__handle)); \ ++ } \ ++ lock ? AE_OK : AE_NO_MEMORY; \ ++}) ++ ++#define acpi_os_delete_raw_lock(__handle) kfree(__handle) ++ + void __iomem *acpi_os_map_memory(acpi_physical_address where, acpi_size length); + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_map_memory + diff --git a/debian/patches/features/all/rt/allow-preemption-in-recursive-migrate_disable-call.patch b/debian/patches/features/all/rt/allow-preemption-in-recursive-migrate_disable-call.patch new file mode 100644 index 000000000..30e90c5d1 --- /dev/null +++ b/debian/patches/features/all/rt/allow-preemption-in-recursive-migrate_disable-call.patch @@ -0,0 +1,48 @@ +From 155cf657f6ddcade424253eb58d03a170dc9f64f Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Wed, 20 Nov 2013 07:22:09 +0800 +Subject: [PATCH 1/2] allow preemption in recursive migrate_disable call +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Minor cleanup in migrate_disable/migrate_enable. The recursive case +does not need to disable preemption as it is "pinned" to the current +cpu any way so it is safe to preempt it. + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/sched/core.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2695,13 +2695,12 @@ void migrate_disable(void) + WARN_ON_ONCE(p->migrate_disable_atomic); + #endif + +- preempt_disable(); + if (p->migrate_disable) { + p->migrate_disable++; +- preempt_enable(); + return; + } + ++ preempt_disable(); + pin_current_cpu(); + p->migrate_disable = 1; + preempt_enable(); +@@ -2727,13 +2726,12 @@ void migrate_enable(void) + #endif + WARN_ON_ONCE(p->migrate_disable <= 0); + +- preempt_disable(); + if (migrate_disable_count(p) > 1) { + p->migrate_disable--; +- preempt_enable(); + return; + } + ++ preempt_disable(); + if (unlikely(migrate_disabled_updated(p))) { + /* + * See comment in update_migrate_disable() about locking. diff --git a/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch b/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch new file mode 100644 index 000000000..292d24bf5 --- /dev/null +++ b/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch @@ -0,0 +1,277 @@ +Subject: mm: Fixup all fault handlers to check current->pagefault_disable +From: Thomas Gleixner +Date: Thu, 17 Mar 2011 11:32:28 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Necessary for decoupling pagefault disable from preempt count. + +Signed-off-by: Thomas Gleixner +--- + arch/alpha/mm/fault.c | 2 +- + arch/arm/mm/fault.c | 2 +- + arch/avr32/mm/fault.c | 3 ++- + arch/cris/mm/fault.c | 2 +- + arch/frv/mm/fault.c | 2 +- + arch/ia64/mm/fault.c | 2 +- + arch/m32r/mm/fault.c | 2 +- + arch/m68k/mm/fault.c | 2 +- + arch/microblaze/mm/fault.c | 2 +- + arch/mips/mm/fault.c | 2 +- + arch/mn10300/mm/fault.c | 2 +- + arch/parisc/mm/fault.c | 2 +- + arch/powerpc/mm/fault.c | 2 +- + arch/s390/mm/fault.c | 3 ++- + arch/score/mm/fault.c | 2 +- + arch/sh/mm/fault.c | 2 +- + arch/sparc/mm/fault_32.c | 2 +- + arch/sparc/mm/fault_64.c | 2 +- + arch/tile/mm/fault.c | 2 +- + arch/um/kernel/trap.c | 2 +- + arch/x86/mm/fault.c | 2 +- + arch/xtensa/mm/fault.c | 2 +- + 22 files changed, 24 insertions(+), 22 deletions(-) + +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns + + /* If we're in an interrupt context, or have no user context, + we must not take the fault. */ +- if (!mm || in_atomic()) ++ if (!mm || in_atomic() || current->pagefault_disabled) + goto no_context; + + #ifdef CONFIG_ALPHA_LARGE_VMALLOC +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c +@@ -277,7 +277,7 @@ do_page_fault(unsigned long addr, unsign + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + if (user_mode(regs)) +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned l + * If we're in an interrupt or have no user context, we must + * not take the fault... + */ +- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) ++ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) || ++ current->pagefault_disabled) + goto no_context; + + local_irq_enable(); +--- a/arch/cris/mm/fault.c ++++ b/arch/cris/mm/fault.c +@@ -113,7 +113,7 @@ do_page_fault(unsigned long address, str + * user context, we must not take the fault. + */ + +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + if (user_mode(regs)) +--- a/arch/frv/mm/fault.c ++++ b/arch/frv/mm/fault.c +@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + if (user_mode(__frame)) +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long addres + /* + * If we're in an interrupt or have no user context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + #ifdef CONFIG_VIRTUAL_MEM_MAP +--- a/arch/m32r/mm/fault.c ++++ b/arch/m32r/mm/fault.c +@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user context or are running in an + * atomic region then we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled + goto bad_area_nosemaphore; + + if (error_code & ACE_USERMODE) +--- a/arch/m68k/mm/fault.c ++++ b/arch/m68k/mm/fault.c +@@ -81,7 +81,7 @@ int do_page_fault(struct pt_regs *regs, + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + if (user_mode(regs)) +--- a/arch/microblaze/mm/fault.c ++++ b/arch/microblaze/mm/fault.c +@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs, + if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) + is_write = 0; + +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + if (kernel_mode(regs)) + goto bad_area_nosemaphore; + +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -89,7 +89,7 @@ static void __kprobes __do_page_fault(st + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto bad_area_nosemaphore; + + if (user_mode(regs)) +--- a/arch/mn10300/mm/fault.c ++++ b/arch/mn10300/mm/fault.c +@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -183,7 +183,7 @@ void do_page_fault(struct pt_regs *regs, + int fault; + unsigned int flags; + +- if (in_atomic()) ++ if (in_atomic() || current->pagefault_disabled) + goto no_context; + + tsk = current; +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + +- if (in_atomic() || mm == NULL) { ++ if (in_atomic() || mm == NULL || current->pagefault_disabled) { + if (!user_mode(regs)) { + rc = SIGSEGV; + goto bail; +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -291,7 +291,8 @@ static inline int do_exception(struct pt + * user context. + */ + fault = VM_FAULT_BADCONTEXT; +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) ++ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || ++ tsk->pagefault_disabled)) + goto out; + + address = trans_exc_code & __FAIL_ADDR_MASK; +--- a/arch/score/mm/fault.c ++++ b/arch/score/mm/fault.c +@@ -73,7 +73,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto bad_area_nosemaphore; + + if (user_mode(regs)) +--- a/arch/sh/mm/fault.c ++++ b/arch/sh/mm/fault.c +@@ -438,7 +438,7 @@ asmlinkage void __kprobes do_page_fault( + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -199,7 +199,7 @@ asmlinkage void do_sparc_fault(struct pt + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -324,7 +324,7 @@ asmlinkage void __kprobes do_sparc64_fau + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_enabled) + goto intr_or_no_mm; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +--- a/arch/tile/mm/fault.c ++++ b/arch/tile/mm/fault.c +@@ -357,7 +357,7 @@ static int handle_page_fault(struct pt_r + * If we're in an interrupt, have no user context or are running in an + * atomic region then we must not take the fault. + */ +- if (in_atomic() || !mm) { ++ if (in_atomic() || !mm || current->pagefault_disabled) { + vma = NULL; /* happy compiler */ + goto bad_area_nosemaphore; + } +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c +@@ -38,7 +38,7 @@ int handle_page_fault(unsigned long addr + * If the fault was during atomic operation, don't take the fault, just + * fail. + */ +- if (in_atomic()) ++ if (in_atomic() || current->pagefault_disabled) + goto out_nosemaphore; + + if (is_user) +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -1103,7 +1103,7 @@ static void __kprobes noinline + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +--- a/arch/xtensa/mm/fault.c ++++ b/arch/xtensa/mm/fault.c +@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) + /* If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) { ++ if (in_atomic() || !mm || current->pagefault_disabled) { + bad_page_fault(regs, address, SIGSEGV); + return; + } diff --git a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch new file mode 100644 index 000000000..9ecfc6154 --- /dev/null +++ b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch @@ -0,0 +1,58 @@ +From: Benedikt Spranger +Date: Sat, 6 Mar 2010 17:47:10 +0100 +Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Setup and remove the interrupt handler in clock event mode selection. +This avoids calling the (shared) interrupt handler when the device is +not used. + +Signed-off-by: Benedikt Spranger +Signed-off-by: Thomas Gleixner +[bigeasy: redo the patch with NR_IRQS_LEGACY which is probably required since +commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6. +Patch based on what Sami Pietikäinen suggested]. +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/arm/mach-at91/at91rm9200_time.c | 1 + + arch/arm/mach-at91/at91sam926x_time.c | 5 ++++- + 2 files changed, 5 insertions(+), 1 deletion(-) + +--- a/arch/arm/mach-at91/at91rm9200_time.c ++++ b/arch/arm/mach-at91/at91rm9200_time.c +@@ -134,6 +134,7 @@ clkevt32k_mode(enum clock_event_mode mod + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: ++ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); + case CLOCK_EVT_MODE_RESUME: + irqmask = 0; + break; +--- a/arch/arm/mach-at91/at91sam926x_time.c ++++ b/arch/arm/mach-at91/at91sam926x_time.c +@@ -78,7 +78,7 @@ static struct clocksource pit_clk = { + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }; + +- ++static struct irqaction at91sam926x_pit_irq; + /* + * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) + */ +@@ -87,6 +87,8 @@ pit_clkevt_mode(enum clock_event_mode mo + { + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: ++ /* Set up irq handler */ ++ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); + /* update clocksource counter */ + pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR)); + pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN +@@ -99,6 +101,7 @@ pit_clkevt_mode(enum clock_event_mode mo + case CLOCK_EVT_MODE_UNUSED: + /* disable irq, leaving the clocksource active */ + pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); ++ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); + break; + case CLOCK_EVT_MODE_RESUME: + break; diff --git a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch new file mode 100644 index 000000000..fc804096a --- /dev/null +++ b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch @@ -0,0 +1,33 @@ +From: Thomas Gleixner +Date: Sat, 1 May 2010 18:29:35 +0200 +Subject: ARM: at91: tclib: Default to tclib timer for RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +RT is not too happy about the shared timer interrupt in AT91 +devices. Default to tclib timer for RT. + +Signed-off-by: Thomas Gleixner + +--- + drivers/misc/Kconfig | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -63,6 +63,7 @@ config ATMEL_PWM + config ATMEL_TCLIB + bool "Atmel AT32/AT91 Timer/Counter Library" + depends on (AVR32 || ARCH_AT91) ++ default y if PREEMPT_RT_FULL + help + Select this if you want a library to allocate the Timer/Counter + blocks found on many Atmel processors. This facilitates using +@@ -95,7 +96,7 @@ config ATMEL_TCB_CLKSRC_BLOCK + config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + bool "TC Block use 32 KiHz clock" + depends on ATMEL_TCB_CLKSRC +- default y ++ default y if !PREEMPT_RT_FULL + help + Select this to use 32 KiHz base clock rate as TC block clock + source for clock events. diff --git a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch new file mode 100644 index 000000000..187273af9 --- /dev/null +++ b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch @@ -0,0 +1,361 @@ +From: Frank Rowand +Date: Mon, 19 Sep 2011 14:51:14 -0700 +Subject: [PATCH] preempt-rt: Convert arm boot_lock to raw +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The arm boot_lock is used by the secondary processor startup code. The locking +task is the idle thread, which has idle->sched_class == &idle_sched_class. +idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the +lock, the attempt to wake it when the lock becomes available will fail: + +try_to_wake_up() + ... + activate_task() + enqueue_task() + p->sched_class->enqueue_task(rq, p, flags) + +Fix by converting boot_lock to a raw spin lock. + +Signed-off-by: Frank Rowand +Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com +Signed-off-by: Thomas Gleixner +--- + arch/arm/mach-exynos/platsmp.c | 12 ++++++------ + arch/arm/mach-msm/platsmp.c | 10 +++++----- + arch/arm/mach-omap2/omap-smp.c | 10 +++++----- + arch/arm/mach-prima2/platsmp.c | 10 +++++----- + arch/arm/mach-spear/platsmp.c | 10 +++++----- + arch/arm/mach-sti/platsmp.c | 10 +++++----- + arch/arm/mach-ux500/platsmp.c | 10 +++++----- + arch/arm/plat-versatile/platsmp.c | 10 +++++----- + 8 files changed, 41 insertions(+), 41 deletions(-) + +--- a/arch/arm/mach-exynos/platsmp.c ++++ b/arch/arm/mach-exynos/platsmp.c +@@ -71,7 +71,7 @@ static void __iomem *scu_base_addr(void) + return (void __iomem *)(S5P_VA_SCU); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void exynos_secondary_init(unsigned int cpu) + { +@@ -84,8 +84,8 @@ static void exynos_secondary_init(unsign + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -97,7 +97,7 @@ static int exynos_boot_secondary(unsigne + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -126,7 +126,7 @@ static int exynos_boot_secondary(unsigne + + if (timeout == 0) { + printk(KERN_ERR "cpu1 power enable failed"); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return -ETIMEDOUT; + } + } +@@ -165,7 +165,7 @@ static int exynos_boot_secondary(unsigne + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +--- a/arch/arm/mach-msm/platsmp.c ++++ b/arch/arm/mach-msm/platsmp.c +@@ -30,7 +30,7 @@ + + extern void msm_secondary_startup(void); + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static inline int get_core_count(void) + { +@@ -50,8 +50,8 @@ static void msm_secondary_init(unsigned + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static void prepare_cold_cpu(unsigned int cpu) +@@ -88,7 +88,7 @@ static int msm_boot_secondary(unsigned i + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -121,7 +121,7 @@ static int msm_boot_secondary(unsigned i + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +--- a/arch/arm/mach-omap2/omap-smp.c ++++ b/arch/arm/mach-omap2/omap-smp.c +@@ -42,7 +42,7 @@ + /* SCU base address */ + static void __iomem *scu_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __iomem *omap4_get_scu_base(void) + { +@@ -73,8 +73,8 @@ static void omap4_secondary_init(unsigne + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -88,7 +88,7 @@ static int omap4_boot_secondary(unsigned + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Update the AuxCoreBoot0 with boot state for secondary core. +@@ -165,7 +165,7 @@ static int omap4_boot_secondary(unsigned + * Now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return 0; + } +--- a/arch/arm/mach-prima2/platsmp.c ++++ b/arch/arm/mach-prima2/platsmp.c +@@ -23,7 +23,7 @@ + static void __iomem *scu_base; + static void __iomem *rsc_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static struct map_desc scu_io_desc __initdata = { + .length = SZ_4K, +@@ -56,8 +56,8 @@ static void sirfsoc_secondary_init(unsig + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static struct of_device_id rsc_ids[] = { +@@ -95,7 +95,7 @@ static int sirfsoc_boot_secondary(unsign + /* make sure write buffer is drained */ + mb(); + +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -127,7 +127,7 @@ static int sirfsoc_boot_secondary(unsign + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +--- a/arch/arm/mach-spear/platsmp.c ++++ b/arch/arm/mach-spear/platsmp.c +@@ -20,7 +20,7 @@ + #include + #include "generic.h" + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void __iomem *scu_base = IOMEM(VA_SCU_BASE); + +@@ -36,8 +36,8 @@ static void spear13xx_secondary_init(uns + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -48,7 +48,7 @@ static int spear13xx_boot_secondary(unsi + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -75,7 +75,7 @@ static int spear13xx_boot_secondary(unsi + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +--- a/arch/arm/mach-sti/platsmp.c ++++ b/arch/arm/mach-sti/platsmp.c +@@ -34,7 +34,7 @@ static void write_pen_release(int val) + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void sti_secondary_init(unsigned int cpu) + { +@@ -49,8 +49,8 @@ void sti_secondary_init(unsigned int cpu + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -61,7 +61,7 @@ int sti_boot_secondary(unsigned int cpu, + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -92,7 +92,7 @@ int sti_boot_secondary(unsigned int cpu, + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +--- a/arch/arm/mach-ux500/platsmp.c ++++ b/arch/arm/mach-ux500/platsmp.c +@@ -51,7 +51,7 @@ static void __iomem *scu_base_addr(void) + return NULL; + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void ux500_secondary_init(unsigned int cpu) + { +@@ -64,8 +64,8 @@ static void ux500_secondary_init(unsigne + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -76,7 +76,7 @@ static int ux500_boot_secondary(unsigned + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -97,7 +97,7 @@ static int ux500_boot_secondary(unsigned + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +--- a/arch/arm/plat-versatile/platsmp.c ++++ b/arch/arm/plat-versatile/platsmp.c +@@ -30,7 +30,7 @@ static void write_pen_release(int val) + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void versatile_secondary_init(unsigned int cpu) + { +@@ -43,8 +43,8 @@ void versatile_secondary_init(unsigned i + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -55,7 +55,7 @@ int versatile_boot_secondary(unsigned in + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary +@@ -85,7 +85,7 @@ int versatile_boot_secondary(unsigned in + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } diff --git a/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch new file mode 100644 index 000000000..46d4d7942 --- /dev/null +++ b/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch @@ -0,0 +1,21 @@ +Subject: arm-disable-highmem-on-rt.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 17:09:28 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/arm/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1789,7 +1789,7 @@ config HAVE_ARCH_PFN_VALID + + config HIGHMEM + bool "High Memory Support" +- depends on MMU ++ depends on MMU && !PREEMPT_RT_FULL + help + The address space of ARM processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address diff --git a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch new file mode 100644 index 000000000..46d36bda4 --- /dev/null +++ b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch @@ -0,0 +1,140 @@ +Subject: arm-enable-highmem-for-rt.patch +From: Thomas Gleixner +Date: Wed, 13 Feb 2013 11:03:11 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/arm/Kconfig | 2 - + arch/arm/include/asm/switch_to.h | 8 +++++++ + arch/arm/mm/highmem.c | 41 +++++++++++++++++++++++++++++++++++++-- + include/linux/highmem.h | 1 + 4 files changed, 49 insertions(+), 3 deletions(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1789,7 +1789,7 @@ config HAVE_ARCH_PFN_VALID + + config HIGHMEM + bool "High Memory Support" +- depends on MMU && !PREEMPT_RT_FULL ++ depends on MMU + help + The address space of ARM processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address +--- a/arch/arm/include/asm/switch_to.h ++++ b/arch/arm/include/asm/switch_to.h +@@ -3,6 +3,13 @@ + + #include + ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + /* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb +@@ -22,6 +29,7 @@ extern struct task_struct *__switch_to(s + + #define switch_to(prev,next,last) \ + do { \ ++ switch_kmaps(prev, next); \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ + } while (0) + +--- a/arch/arm/mm/highmem.c ++++ b/arch/arm/mm/highmem.c +@@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap); + + void *kmap_atomic(struct page *page) + { ++ pte_t pte = mk_pte(page, kmap_prot); + unsigned int idx; + unsigned long vaddr; + void *kmap; +@@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page) + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. + */ +- set_top_pte(vaddr, mk_pte(page, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_top_pte(vaddr, pte); + + return (void *)vaddr; + } +@@ -93,6 +97,9 @@ void __kunmap_atomic(void *kvaddr) + + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + #else +@@ -110,6 +117,7 @@ EXPORT_SYMBOL(__kunmap_atomic); + + void *kmap_atomic_pfn(unsigned long pfn) + { ++ pte_t pte = pfn_pte(pfn, kmap_prot); + unsigned long vaddr; + int idx, type; + +@@ -121,7 +129,10 @@ void *kmap_atomic_pfn(unsigned long pfn) + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(get_top_pte(vaddr))); + #endif +- set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_top_pte(vaddr, pte); + + return (void *)vaddr; + } +@@ -135,3 +146,29 @@ struct page *kmap_atomic_to_page(const v + + return pte_page(get_top_pte(vaddr)); + } ++ ++#if defined CONFIG_PREEMPT_RT_FULL ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), ++ next_p->kmap_pte[i]); ++ } ++} ++#endif +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include + diff --git a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch new file mode 100644 index 000000000..67742f30f --- /dev/null +++ b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch @@ -0,0 +1,104 @@ +Subject: arm-preempt-lazy-support.patch +From: Thomas Gleixner +Date: Wed, 31 Oct 2012 12:04:11 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/arm/Kconfig | 1 + + arch/arm/include/asm/thread_info.h | 3 +++ + arch/arm/kernel/asm-offsets.c | 1 + + arch/arm/kernel/entry-armv.S | 13 +++++++++++-- + arch/arm/kernel/signal.c | 3 ++- + 5 files changed, 18 insertions(+), 3 deletions(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -58,6 +58,7 @@ config ARM + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_UID16 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h +@@ -60,6 +60,7 @@ struct arm_restart_block { + struct thread_info { + unsigned long flags; /* low level flags */ + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ +@@ -153,6 +154,7 @@ extern int vfp_restore_user_hwstate(stru + #define TIF_SIGPENDING 0 + #define TIF_NEED_RESCHED 1 + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ ++#define TIF_NEED_RESCHED_LAZY 3 + #define TIF_SYSCALL_TRACE 8 + #define TIF_SYSCALL_AUDIT 9 + #define TIF_SYSCALL_TRACEPOINT 10 +@@ -165,6 +167,7 @@ extern int vfp_restore_user_hwstate(stru + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +--- a/arch/arm/kernel/asm-offsets.c ++++ b/arch/arm/kernel/asm-offsets.c +@@ -54,6 +54,7 @@ int main(void) + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -205,11 +205,18 @@ ENDPROC(__dabt_svc) + #ifdef CONFIG_PREEMPT + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count +- ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt count != 0 ++ bne 1f @ return from exeption ++ ldr r0, [tsk, #TI_FLAGS] @ get flags ++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set ++ blne svc_preempt @ preempt! ++ ++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count ++ teq r8, #0 @ if preempt lazy count != 0 + movne r0, #0 @ force flags to 0 +- tst r0, #_TIF_NEED_RESCHED ++ tst r0, #_TIF_NEED_RESCHED_LAZY + blne svc_preempt ++1: + #endif + + svc_exit r5, irq = 1 @ return from exception +@@ -224,6 +231,8 @@ ENDPROC(__irq_svc) + 1: bl preempt_schedule_irq @ irq en/disable is done inside + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS + tst r0, #_TIF_NEED_RESCHED ++ bne 1b ++ tst r0, #_TIF_NEED_RESCHED_LAZY + moveq pc, r8 @ go again + b 1b + #endif +--- a/arch/arm/kernel/signal.c ++++ b/arch/arm/kernel/signal.c +@@ -573,7 +573,8 @@ asmlinkage int + do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) + { + do { +- if (likely(thread_flags & _TIF_NEED_RESCHED)) { ++ if (likely(thread_flags & (_TIF_NEED_RESCHED | ++ _TIF_NEED_RESCHED_LAZY))) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) diff --git a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch new file mode 100644 index 000000000..bac9d5ce6 --- /dev/null +++ b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch @@ -0,0 +1,88 @@ +From 4b82f1531c59f7c2f6e4deca57d74ad732a05cdd Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 20 Sep 2013 14:31:54 +0200 +Subject: [PATCH RT] arm/unwind: use a raw_spin_lock +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Mostly unwind is done with irqs enabled however SLUB may call it with +irqs disabled while creating a new SLUB cache. + +I had system freeze while loading a module which called +kmem_cache_create() on init. That means SLUB's __slab_alloc() disabled +interrupts and then + +->new_slab_objects() + ->new_slab() + ->setup_object() + ->setup_object_debug() + ->init_tracking() + ->set_track() + ->save_stack_trace() + ->save_stack_trace_tsk() + ->walk_stackframe() + ->unwind_frame() + ->unwind_find_idx() + =>spin_lock_irqsave(&unwind_lock); + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/arm/kernel/unwind.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +--- a/arch/arm/kernel/unwind.c ++++ b/arch/arm/kernel/unwind.c +@@ -87,7 +87,7 @@ extern const struct unwind_idx __start_u + static const struct unwind_idx *__origin_unwind_idx; + extern const struct unwind_idx __stop_unwind_idx[]; + +-static DEFINE_SPINLOCK(unwind_lock); ++static DEFINE_RAW_SPINLOCK(unwind_lock); + static LIST_HEAD(unwind_tables); + + /* Convert a prel31 symbol to an absolute address */ +@@ -195,7 +195,7 @@ static const struct unwind_idx *unwind_f + /* module unwind tables */ + struct unwind_table *table; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_for_each_entry(table, &unwind_tables, list) { + if (addr >= table->begin_addr && + addr < table->end_addr) { +@@ -207,7 +207,7 @@ static const struct unwind_idx *unwind_f + break; + } + } +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + } + + pr_debug("%s: idx = %p\n", __func__, idx); +@@ -469,9 +469,9 @@ struct unwind_table *unwind_table_add(un + tab->begin_addr = text_addr; + tab->end_addr = text_addr + text_size; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_add_tail(&tab->list, &unwind_tables); +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + + return tab; + } +@@ -483,9 +483,9 @@ void unwind_table_del(struct unwind_tabl + if (!tab) + return; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_del(&tab->list); +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + + kfree(tab); + } diff --git a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch new file mode 100644 index 000000000..c41df77aa --- /dev/null +++ b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch @@ -0,0 +1,65 @@ +From: Steven Rostedt +Date: Fri, 3 Jul 2009 08:44:29 -0500 +Subject: ata: Do not disable interrupts in ide code for preempt-rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Use the local_irq_*_nort variants. + +Signed-off-by: Steven Rostedt +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/ata/libata-sff.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str + unsigned long flags; + unsigned int consumed; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return consumed; + } +@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu + unsigned long flags; + + /* FIXME: use a bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu + do_write); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, +@@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_ + unsigned long flags; + + /* FIXME: use bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_ + count, rw); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, diff --git a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch new file mode 100644 index 000000000..f46b4ed1d --- /dev/null +++ b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch @@ -0,0 +1,78 @@ +From 7632d1dd96f75bdba997003fa61ab14e57afb0fe Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 9 Apr 2014 10:37:23 +0200 +Subject: [PATCH 5/5] block: mq: use cpu_light() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +there is a might sleep splat because get_cpu() disables preemption and +later we grab a lock. As a workaround for this we use get_cpu_light() +and an additional lock to prevent taking the same ctx. + +There is a lock member in the ctx already but there some functions which do ++ +on the member and this works with irq off but on RT we would need the extra lock. + +Signed-off-by: Sebastian Andrzej Siewior +--- + block/blk-mq.c | 14 +++++++++++--- + block/blk-mq.h | 1 + + 2 files changed, 12 insertions(+), 3 deletions(-) + +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -30,7 +30,11 @@ static void __blk_mq_run_hw_queue(struct + static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, + unsigned int cpu) + { +- return per_cpu_ptr(q->queue_ctx, cpu); ++ struct blk_mq_ctx *ctx; ++ ++ ctx = per_cpu_ptr(q->queue_ctx, cpu); ++ spin_lock(&ctx->cpu_lock); ++ return ctx; + } + + /* +@@ -41,12 +45,13 @@ static struct blk_mq_ctx *__blk_mq_get_c + */ + static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) + { +- return __blk_mq_get_ctx(q, get_cpu()); ++ return __blk_mq_get_ctx(q, get_cpu_light()); + } + + static void blk_mq_put_ctx(struct blk_mq_ctx *ctx) + { +- put_cpu(); ++ spin_unlock(&ctx->cpu_lock); ++ put_cpu_light(); + } + + /* +@@ -897,7 +902,9 @@ static void blk_mq_make_request(struct r + if (list_empty(&plug->mq_list)) + trace_block_plug(q); + else if (request_count >= BLK_MAX_REQUEST_COUNT) { ++ spin_unlock(&ctx->cpu_lock); + blk_flush_plug_list(plug, false); ++ spin_lock(&ctx->cpu_lock); + trace_block_plug(q); + } + list_add_tail(&rq->queuelist, &plug->mq_list); +@@ -1212,6 +1219,7 @@ static void blk_mq_init_cpu_queues(struc + memset(__ctx, 0, sizeof(*__ctx)); + __ctx->cpu = i; + spin_lock_init(&__ctx->lock); ++ spin_lock_init(&__ctx->cpu_lock); + INIT_LIST_HEAD(&__ctx->rq_list); + __ctx->queue = q; + +--- a/block/blk-mq.h ++++ b/block/blk-mq.h +@@ -7,6 +7,7 @@ struct blk_mq_ctx { + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + ++ spinlock_t cpu_lock; + unsigned int cpu; + unsigned int index_hw; + unsigned int ipi_redirect; diff --git a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch new file mode 100644 index 000000000..deee051fb --- /dev/null +++ b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch @@ -0,0 +1,97 @@ +Subject: block: Shorten interrupt disabled regions +From: Thomas Gleixner +Date: Wed, 22 Jun 2011 19:47:02 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Moving the blk_sched_flush_plug() call out of the interrupt/preempt +disabled region in the scheduler allows us to replace +local_irq_save/restore(flags) by local_irq_disable/enable() in +blk_flush_plug(). + +Now instead of doing this we disable interrupts explicitely when we +lock the request_queue and reenable them when we drop the lock. That +allows interrupts to be handled when the plug list contains requests +for more than one queue. + +Aside of that this change makes the scope of the irq disabled region +more obvious. The current code confused the hell out of me when +looking at: + + local_irq_save(flags); + spin_lock(q->queue_lock); + ... + queue_unplugged(q...); + scsi_request_fn(); + spin_unlock(q->queue_lock); + spin_lock(shost->host_lock); + spin_unlock_irq(shost->host_lock); + +-------------------^^^ ???? + + spin_lock_irq(q->queue_lock); + spin_unlock(q->lock); + local_irq_restore(flags); + +Also add a comment to __blk_run_queue() documenting that +q->request_fn() can drop q->queue_lock and reenable interrupts, but +must return with q->queue_lock held and interrupts disabled. + +Signed-off-by: Thomas Gleixner +Cc: Peter Zijlstra +Cc: Tejun Heo +Cc: Jens Axboe +Cc: Linus Torvalds +Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de +--- + block/blk-core.c | 12 ++---------- + 1 file changed, 2 insertions(+), 10 deletions(-) + +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -2981,7 +2981,7 @@ static void queue_unplugged(struct reque + blk_run_queue_async(q); + else + __blk_run_queue(q); +- spin_unlock(q->queue_lock); ++ spin_unlock_irq(q->queue_lock); + } + + static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) +@@ -3029,7 +3029,6 @@ EXPORT_SYMBOL(blk_check_plugged); + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) + { + struct request_queue *q; +- unsigned long flags; + struct request *rq; + LIST_HEAD(list); + unsigned int depth; +@@ -3051,11 +3050,6 @@ void blk_flush_plug_list(struct blk_plug + q = NULL; + depth = 0; + +- /* +- * Save and disable interrupts here, to avoid doing it for every +- * queue lock we have to take. +- */ +- local_irq_save(flags); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); + list_del_init(&rq->queuelist); +@@ -3068,7 +3062,7 @@ void blk_flush_plug_list(struct blk_plug + queue_unplugged(q, depth, from_schedule); + q = rq->q; + depth = 0; +- spin_lock(q->queue_lock); ++ spin_lock_irq(q->queue_lock); + } + + /* +@@ -3095,8 +3089,6 @@ void blk_flush_plug_list(struct blk_plug + */ + if (q) + queue_unplugged(q, depth, from_schedule); +- +- local_irq_restore(flags); + } + + void blk_finish_plug(struct blk_plug *plug) diff --git a/debian/patches/features/all/rt/block-use-cpu-chill.patch b/debian/patches/features/all/rt/block-use-cpu-chill.patch new file mode 100644 index 000000000..11b868ac9 --- /dev/null +++ b/debian/patches/features/all/rt/block-use-cpu-chill.patch @@ -0,0 +1,46 @@ +Subject: block: Use cpu_chill() for retry loops +From: Thomas Gleixner +Date: Thu, 20 Dec 2012 18:28:26 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Retry loops on RT might loop forever when the modifying side was +preempted. Steven also observed a live lock when there was a +concurrent priority boosting going on. + +Use cpu_chill() instead of cpu_relax() to let the system +make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + block/blk-ioc.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/block/blk-ioc.c ++++ b/block/blk-ioc.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include "blk.h" + +@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_s + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + } + } +@@ -187,7 +188,7 @@ void put_io_context_active(struct io_con + spin_unlock(icq->q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + goto retry; + } + } diff --git a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch new file mode 100644 index 000000000..54ad90d0f --- /dev/null +++ b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch @@ -0,0 +1,35 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:58 -0500 +Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/asm-generic/bug.h | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +--- a/include/asm-generic/bug.h ++++ b/include/asm-generic/bug.h +@@ -202,6 +202,20 @@ extern void warn_slowpath_null(const cha + # define WARN_ON_SMP(x) ({0;}) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define BUG_ON_RT(c) BUG_ON(c) ++# define BUG_ON_NONRT(c) do { } while (0) ++# define WARN_ON_RT(condition) WARN_ON(condition) ++# define WARN_ON_NONRT(condition) do { } while (0) ++# define WARN_ON_ONCE_NONRT(condition) do { } while (0) ++#else ++# define BUG_ON_RT(c) do { } while (0) ++# define BUG_ON_NONRT(c) BUG_ON(c) ++# define WARN_ON_RT(condition) do { } while (0) ++# define WARN_ON_NONRT(condition) WARN_ON(condition) ++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) ++#endif ++ + #endif /* __ASSEMBLY__ */ + + #endif diff --git a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch new file mode 100644 index 000000000..bb8343d61 --- /dev/null +++ b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch @@ -0,0 +1,161 @@ +From: Benedikt Spranger +Date: Mon, 8 Mar 2010 18:57:04 +0100 +Subject: clocksource: TCLIB: Allow higher clock rates for clock events +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +As default the TCLIB uses the 32KiHz base clock rate for clock events. +Add a compile time selection to allow higher clock resulution. + +(fixed up by Sami Pietikäinen ) + +Signed-off-by: Benedikt Spranger +Signed-off-by: Thomas Gleixner +--- + drivers/clocksource/tcb_clksrc.c | 37 ++++++++++++++++++++++--------------- + drivers/misc/Kconfig | 12 ++++++++++-- + 2 files changed, 32 insertions(+), 17 deletions(-) + +--- a/drivers/clocksource/tcb_clksrc.c ++++ b/drivers/clocksource/tcb_clksrc.c +@@ -23,8 +23,7 @@ + * this 32 bit free-running counter. the second channel is not used. + * + * - The third channel may be used to provide a 16-bit clockevent +- * source, used in either periodic or oneshot mode. This runs +- * at 32 KiHZ, and can handle delays of up to two seconds. ++ * source, used in either periodic or oneshot mode. + * + * A boot clocksource and clockevent source are also currently needed, + * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so +@@ -74,6 +73,7 @@ static struct clocksource clksrc = { + struct tc_clkevt_device { + struct clock_event_device clkevt; + struct clk *clk; ++ u32 freq; + void __iomem *regs; + }; + +@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_cl + return container_of(clkevt, struct tc_clkevt_device, clkevt); + } + +-/* For now, we always use the 32K clock ... this optimizes for NO_HZ, +- * because using one of the divided clocks would usually mean the +- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). +- * +- * A divided clock could be good for high resolution timers, since +- * 30.5 usec resolution can seem "low". +- */ + static u32 timer_clock; + + static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) +@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mod + case CLOCK_EVT_MODE_PERIODIC: + clk_prepare_enable(tcd->clk); + +- /* slow clock, count up to RC, then irq and restart */ ++ /* count up to RC, then irq and restart */ + __raw_writel(timer_clock + | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); ++ __raw_writel((tcd->freq + HZ/2)/HZ, ++ tcaddr + ATMEL_TC_REG(2, RC)); + + /* Enable clock and interrupts on RC compare */ + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mod + case CLOCK_EVT_MODE_ONESHOT: + clk_prepare_enable(tcd->clk); + +- /* slow clock, count up to RC, then irq and stop */ ++ /* count up to RC, then irq and stop */ + __raw_writel(timer_clock | ATMEL_TC_CPCSTOP + | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +@@ -157,8 +151,12 @@ static struct tc_clkevt_device clkevt = + .name = "tc_clkevt", + .features = CLOCK_EVT_FEAT_PERIODIC + | CLOCK_EVT_FEAT_ONESHOT, ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + /* Should be lower than at91rm9200's system timer */ + .rating = 125, ++#else ++ .rating = 200, ++#endif + .set_next_event = tc_next_event, + .set_mode = tc_mode, + }, +@@ -184,8 +182,9 @@ static struct irqaction tc_irqaction = { + .handler = ch2_irq, + }; + +-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ++static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) + { ++ unsigned divisor = atmel_tc_divisors[divisor_idx]; + int ret; + struct clk *t2_clk = tc->clk[2]; + int irq = tc->irq[2]; +@@ -200,7 +199,11 @@ static int __init setup_clkevents(struct + clkevt.clk = t2_clk; + tc_irqaction.dev_id = &clkevt; + +- timer_clock = clk32k_divisor_idx; ++ timer_clock = divisor_idx; ++ if (!divisor) ++ clkevt.freq = 32768; ++ else ++ clkevt.freq = clk_get_rate(t2_clk) / divisor; + + clkevt.clkevt.cpumask = cpumask_of(0); + +@@ -208,7 +211,7 @@ static int __init setup_clkevents(struct + if (ret) + return ret; + +- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); ++ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); + + return ret; + } +@@ -345,7 +348,11 @@ static int __init tcb_clksrc_init(void) + goto err_disable_t1; + + /* channel 2: periodic and oneshot timer support */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + ret = setup_clkevents(tc, clk32k_divisor_idx); ++#else ++ ret = setup_clkevents(tc, best_divisor_idx); ++#endif + if (ret) + goto err_unregister_clksrc; + +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -78,8 +78,7 @@ config ATMEL_TCB_CLKSRC + are combined to make a single 32-bit timer. + + When GENERIC_CLOCKEVENTS is defined, the third timer channel +- may be used as a clock event device supporting oneshot mode +- (delays of up to two seconds) based on the 32 KiHz clock. ++ may be used as a clock event device supporting oneshot mode. + + config ATMEL_TCB_CLKSRC_BLOCK + int +@@ -93,6 +92,15 @@ config ATMEL_TCB_CLKSRC_BLOCK + TC can be used for other purposes, such as PWM generation and + interval timing. + ++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ++ bool "TC Block use 32 KiHz clock" ++ depends on ATMEL_TCB_CLKSRC ++ default y ++ help ++ Select this to use 32 KiHz base clock rate as TC block clock ++ source for clock events. ++ ++ + config DUMMY_IRQ + tristate "Dummy IRQ handler" + default n diff --git a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch new file mode 100644 index 000000000..4721dabdd --- /dev/null +++ b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch @@ -0,0 +1,184 @@ +Subject: completion: Use simple wait queues +From: Thomas Gleixner +Date: Fri, 11 Jan 2013 11:23:51 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Completions have no long lasting callbacks and therefor do not need +the complex waitqueue variant. Use simple waitqueues which reduces the +contention on the waitqueue lock. + +Signed-off-by: Thomas Gleixner +--- + include/linux/completion.h | 9 ++++----- + include/linux/uprobes.h | 1 + + kernel/sched/completion.c | 34 +++++++++++++++++----------------- + kernel/sched/core.c | 10 ++++++++-- + 4 files changed, 30 insertions(+), 24 deletions(-) + +--- a/include/linux/completion.h ++++ b/include/linux/completion.h +@@ -7,8 +7,7 @@ + * Atomic wait-for-completion handler data structures. + * See kernel/sched/completion.c for details. + */ +- +-#include ++#include + + /* + * struct completion - structure used to maintain state for a "completion" +@@ -24,11 +23,11 @@ + */ + struct completion { + unsigned int done; +- wait_queue_head_t wait; ++ struct swait_head wait; + }; + + #define COMPLETION_INITIALIZER(work) \ +- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } ++ { 0, SWAIT_HEAD_INITIALIZER((work).wait) } + + #define COMPLETION_INITIALIZER_ONSTACK(work) \ + ({ init_completion(&work); work; }) +@@ -73,7 +72,7 @@ struct completion { + static inline void init_completion(struct completion *x) + { + x->done = 0; +- init_waitqueue_head(&x->wait); ++ init_swait_head(&x->wait); + } + + /** +--- a/include/linux/uprobes.h ++++ b/include/linux/uprobes.h +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + + struct vm_area_struct; + struct mm_struct; +--- a/kernel/sched/completion.c ++++ b/kernel/sched/completion.c +@@ -30,10 +30,10 @@ void complete(struct completion *x) + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done++; +- __wake_up_locked(&x->wait, TASK_NORMAL, 1); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ __swait_wake_locked(&x->wait, TASK_NORMAL, 1); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete); + +@@ -50,10 +50,10 @@ void complete_all(struct completion *x) + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done += UINT_MAX/2; +- __wake_up_locked(&x->wait, TASK_NORMAL, 0); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ __swait_wake_locked(&x->wait, TASK_NORMAL, 0); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete_all); + +@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x, + long (*action)(long), long timeout, int state) + { + if (!x->done) { +- DECLARE_WAITQUEUE(wait, current); ++ DEFINE_SWAITER(wait); + +- __add_wait_queue_tail_exclusive(&x->wait, &wait); ++ swait_prepare_locked(&x->wait, &wait); + do { + if (signal_pending_state(state, current)) { + timeout = -ERESTARTSYS; + break; + } + __set_current_state(state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + timeout = action(timeout); +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + } while (!x->done && timeout); +- __remove_wait_queue(&x->wait, &wait); ++ swait_finish_locked(&x->wait, &wait); + if (!x->done) + return timeout; + } +@@ -89,9 +89,9 @@ static inline long __sched + { + might_sleep(); + +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + timeout = do_wait_for_common(x, action, timeout, state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + return timeout; + } + +@@ -267,12 +267,12 @@ bool try_wait_for_completion(struct comp + unsigned long flags; + int ret = 1; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; + else + x->done--; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(try_wait_for_completion); +@@ -290,10 +290,10 @@ bool completion_done(struct completion * + unsigned long flags; + int ret = 1; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(completion_done); +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2689,7 +2689,10 @@ void migrate_disable(void) + } + + #ifdef CONFIG_SCHED_DEBUG +- WARN_ON_ONCE(p->migrate_disable_atomic); ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } + #endif + + if (p->migrate_disable) { +@@ -2720,7 +2723,10 @@ void migrate_enable(void) + } + + #ifdef CONFIG_SCHED_DEBUG +- WARN_ON_ONCE(p->migrate_disable_atomic); ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } + #endif + WARN_ON_ONCE(p->migrate_disable <= 0); + diff --git a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch new file mode 100644 index 000000000..6e4603abf --- /dev/null +++ b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch @@ -0,0 +1,21 @@ +Subject: cond-resched-lock-rt-tweak.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 22:51:33 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2722,7 +2722,7 @@ extern int _cond_resched(void); + + extern int __cond_resched_lock(spinlock_t *lock); + +-#ifdef CONFIG_PREEMPT_COUNT ++#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL) + #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET + #else + #define PREEMPT_LOCK_OFFSET 0 diff --git a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch new file mode 100644 index 000000000..c1a13e508 --- /dev/null +++ b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch @@ -0,0 +1,48 @@ +Subject: cond-resched-softirq-fix.patch +From: Thomas Gleixner +Date: Thu, 14 Jul 2011 09:56:44 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 4 ++++ + kernel/sched/core.c | 2 ++ + 2 files changed, 6 insertions(+) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2733,12 +2733,16 @@ extern int __cond_resched_lock(spinlock_ + __cond_resched_lock(lock); \ + }) + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern int __cond_resched_softirq(void); + + #define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ + }) ++#else ++# define cond_resched_softirq() cond_resched() ++#endif + + static inline void cond_resched_rcu(void) + { +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4227,6 +4227,7 @@ int __cond_resched_lock(spinlock_t *lock + } + EXPORT_SYMBOL(__cond_resched_lock); + ++#ifndef CONFIG_PREEMPT_RT_FULL + int __sched __cond_resched_softirq(void) + { + BUG_ON(!in_softirq()); +@@ -4240,6 +4241,7 @@ int __sched __cond_resched_softirq(void) + return 0; + } + EXPORT_SYMBOL(__cond_resched_softirq); ++#endif + + /** + * yield - yield the current processor to other threads. diff --git a/debian/patches/features/all/rt/condition-migration_disable-on-lock-acquisition.patch b/debian/patches/features/all/rt/condition-migration_disable-on-lock-acquisition.patch new file mode 100644 index 000000000..771174d23 --- /dev/null +++ b/debian/patches/features/all/rt/condition-migration_disable-on-lock-acquisition.patch @@ -0,0 +1,34 @@ +From 56f43bce737d3f28ad470c95fa84f824cb0d55ad Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Thu, 21 Nov 2013 22:52:30 -0500 +Subject: [PATCH 2/2] condition migration_disable on lock acquisition +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +No need to unconditionally migrate_disable (what is it protecting ?) and +re-enable on failure to acquire the lock. +This patch moves the migrate_disable to be conditioned on sucessful lock +acquisition only. + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/locking/rt.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +--- a/kernel/locking/rt.c ++++ b/kernel/locking/rt.c +@@ -182,11 +182,10 @@ int __lockfunc rt_write_trylock(rwlock_t + { + int ret = rt_mutex_trylock(&rwlock->lock); + +- migrate_disable(); +- if (ret) ++ if (ret) { + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); +- else +- migrate_enable(); ++ migrate_disable(); ++ } + + return ret; + } diff --git a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch new file mode 100644 index 000000000..5d70765d4 --- /dev/null +++ b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch @@ -0,0 +1,57 @@ +From 107fb2b43f5c80686ee6454713f4963728ca2737 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Thu, 5 Dec 2013 09:16:52 -0500 +Subject: [PATCH] cpu hotplug: Document why PREEMPT_RT uses a spinlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The patch: + + cpu: Make hotplug.lock a "sleeping" spinlock on RT + + Tasks can block on hotplug.lock in pin_current_cpu(), but their + state might be != RUNNING. So the mutex wakeup will set the state + unconditionally to RUNNING. That might cause spurious unexpected + wakeups. We could provide a state preserving mutex_lock() function, + but this is semantically backwards. So instead we convert the + hotplug.lock() to a spinlock for RT, which has the state preserving + semantics already. + +Fixed a bug where the hotplug lock on PREEMPT_RT can be called after a +task set its state to TASK_UNINTERRUPTIBLE and before it called +schedule. If the hotplug_lock used a mutex, and there was contention, +the current task's state would be turned to TASK_RUNNABLE and the +schedule call will not sleep. This caused unexpected results. + +Although the patch had a description of the change, the code had no +comments about it. This causes confusion to those that review the code, +and as PREEMPT_RT is held in a quilt queue and not git, it's not as easy +to see why a change was made. Even if it was in git, the code should +still have a comment for something as subtle as this. + +Document the rational for using a spinlock on PREEMPT_RT in the hotplug +lock code. + +Reported-by: Nicholas Mc Guire +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/cpu.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -84,6 +84,14 @@ struct hotplug_pcp { + int grab_lock; + struct completion synced; + #ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * Note, on PREEMPT_RT, the hotplug lock must save the state of ++ * the task, otherwise the mutex will cause the task to fail ++ * to sleep when required. (Because it's called from migrate_disable()) ++ * ++ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's ++ * state. ++ */ + spinlock_t lock; + #else + struct mutex mutex; diff --git a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch new file mode 100644 index 000000000..3f15351bf --- /dev/null +++ b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch @@ -0,0 +1,126 @@ +Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT +From: Steven Rostedt +Date: Fri, 02 Mar 2012 10:36:57 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Tasks can block on hotplug.lock in pin_current_cpu(), but their state +might be != RUNNING. So the mutex wakeup will set the state +unconditionally to RUNNING. That might cause spurious unexpected +wakeups. We could provide a state preserving mutex_lock() function, +but this is semantically backwards. So instead we convert the +hotplug.lock() to a spinlock for RT, which has the state preserving +semantics already. + +Signed-off-by: Steven Rostedt +Cc: Carsten Emde +Cc: John Kacur +Cc: Peter Zijlstra +Cc: Clark Williams +Cc: stable-rt@vger.kernel.org +Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 35 ++++++++++++++++++++++++++--------- + 1 file changed, 26 insertions(+), 9 deletions(-) + +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -51,7 +51,12 @@ static int cpu_hotplug_disabled; + + static struct { + struct task_struct *active_writer; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* Makes the lock keep the task's state */ ++ spinlock_t lock; ++#else + struct mutex lock; /* Synchronizes accesses to refcount, */ ++#endif + /* + * Also blocks the new readers during + * an ongoing cpu hotplug operation. +@@ -59,10 +64,22 @@ static struct { + int refcount; + } cpu_hotplug = { + .active_writer = NULL, ++#ifdef CONFIG_PREEMPT_RT_FULL ++ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), ++#else + .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), ++#endif + .refcount = 0, + }; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock) ++# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock) ++#else ++# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) ++# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) ++#endif ++ + struct hotplug_pcp { + struct task_struct *unplug; + int refcount; +@@ -92,8 +109,8 @@ void pin_current_cpu(void) + return; + } + preempt_enable(); +- mutex_lock(&cpu_hotplug.lock); +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_lock(); ++ hotplug_unlock(); + preempt_disable(); + goto retry; + } +@@ -165,9 +182,9 @@ void get_online_cpus(void) + might_sleep(); + if (cpu_hotplug.active_writer == current) + return; +- mutex_lock(&cpu_hotplug.lock); ++ hotplug_lock(); + cpu_hotplug.refcount++; +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_unlock(); + + } + EXPORT_SYMBOL_GPL(get_online_cpus); +@@ -176,14 +193,14 @@ void put_online_cpus(void) + { + if (cpu_hotplug.active_writer == current) + return; +- mutex_lock(&cpu_hotplug.lock); + ++ hotplug_lock(); + if (WARN_ON(!cpu_hotplug.refcount)) + cpu_hotplug.refcount++; /* try to fix things up */ + + if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) + wake_up_process(cpu_hotplug.active_writer); +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_unlock(); + + } + EXPORT_SYMBOL_GPL(put_online_cpus); +@@ -215,11 +232,11 @@ void cpu_hotplug_begin(void) + cpu_hotplug.active_writer = current; + + for (;;) { +- mutex_lock(&cpu_hotplug.lock); ++ hotplug_lock(); + if (likely(!cpu_hotplug.refcount)) + break; + __set_current_state(TASK_UNINTERRUPTIBLE); +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_unlock(); + schedule(); + } + } +@@ -227,7 +244,7 @@ void cpu_hotplug_begin(void) + void cpu_hotplug_done(void) + { + cpu_hotplug.active_writer = NULL; +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_unlock(); + } + + /* diff --git a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch new file mode 100644 index 000000000..cb8c8912e --- /dev/null +++ b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch @@ -0,0 +1,548 @@ +From: Steven Rostedt +Date: Mon, 16 Jul 2012 08:07:43 +0000 +Subject: cpu/rt: Rework cpu down for PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Bringing a CPU down is a pain with the PREEMPT_RT kernel because +tasks can be preempted in many more places than in non-RT. In +order to handle per_cpu variables, tasks may be pinned to a CPU +for a while, and even sleep. But these tasks need to be off the CPU +if that CPU is going down. + +Several synchronization methods have been tried, but when stressed +they failed. This is a new approach. + +A sync_tsk thread is still created and tasks may still block on a +lock when the CPU is going down, but how that works is a bit different. +When cpu_down() starts, it will create the sync_tsk and wait on it +to inform that current tasks that are pinned on the CPU are no longer +pinned. But new tasks that are about to be pinned will still be allowed +to do so at this time. + +Then the notifiers are called. Several notifiers will bring down tasks +that will enter these locations. Some of these tasks will take locks +of other tasks that are on the CPU. If we don't let those other tasks +continue, but make them block until CPU down is done, the tasks that +the notifiers are waiting on will never complete as they are waiting +for the locks held by the tasks that are blocked. + +Thus we still let the task pin the CPU until the notifiers are done. +After the notifiers run, we then make new tasks entering the pinned +CPU sections grab a mutex and wait. This mutex is now a per CPU mutex +in the hotplug_pcp descriptor. + +To help things along, a new function in the scheduler code is created +called migrate_me(). This function will try to migrate the current task +off the CPU this is going down if possible. When the sync_tsk is created, +all tasks will then try to migrate off the CPU going down. There are +several cases that this wont work, but it helps in most cases. + +After the notifiers are called and if a task can't migrate off but enters +the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex +until the CPU down is complete. Then the scheduler will force the migration +anyway. + +Also, I found that THREAD_BOUND need to also be accounted for in the +pinned CPU, and the migrate_disable no longer treats them special. +This helps fix issues with ksoftirqd and workqueue that unbind on CPU down. + +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + +--- + include/linux/sched.h | 7 + + kernel/cpu.c | 240 +++++++++++++++++++++++++++++++++++++++++--------- + kernel/sched/core.c | 82 ++++++++++++++++- + 3 files changed, 284 insertions(+), 45 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1995,6 +1995,10 @@ extern void do_set_cpus_allowed(struct t + + extern int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask); ++int migrate_me(void); ++void tell_sched_cpu_down_begin(int cpu); ++void tell_sched_cpu_down_done(int cpu); ++ + #else + static inline void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask) +@@ -2007,6 +2011,9 @@ static inline int set_cpus_allowed_ptr(s + return -EINVAL; + return 0; + } ++static inline int migrate_me(void) { return 0; } ++static inline void tell_sched_cpu_down_begin(int cpu) { } ++static inline void tell_sched_cpu_down_done(int cpu) { } + #endif + + #ifdef CONFIG_NO_HZ_COMMON +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -51,12 +51,7 @@ static int cpu_hotplug_disabled; + + static struct { + struct task_struct *active_writer; +-#ifdef CONFIG_PREEMPT_RT_FULL +- /* Makes the lock keep the task's state */ +- spinlock_t lock; +-#else + struct mutex lock; /* Synchronizes accesses to refcount, */ +-#endif + /* + * Also blocks the new readers during + * an ongoing cpu hotplug operation. +@@ -64,28 +59,46 @@ static struct { + int refcount; + } cpu_hotplug = { + .active_writer = NULL, +-#ifdef CONFIG_PREEMPT_RT_FULL +- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), +-#else + .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), +-#endif + .refcount = 0, + }; + +-#ifdef CONFIG_PREEMPT_RT_FULL +-# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock) +-# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock) +-#else +-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) +-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) +-#endif +- ++/** ++ * hotplug_pcp - per cpu hotplug descriptor ++ * @unplug: set when pin_current_cpu() needs to sync tasks ++ * @sync_tsk: the task that waits for tasks to finish pinned sections ++ * @refcount: counter of tasks in pinned sections ++ * @grab_lock: set when the tasks entering pinned sections should wait ++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished ++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true) ++ * @mutex_init: zero if the mutex hasn't been initialized yet. ++ * ++ * Although @unplug and @sync_tsk may point to the same task, the @unplug ++ * is used as a flag and still exists after @sync_tsk has exited and ++ * @sync_tsk set to NULL. ++ */ + struct hotplug_pcp { + struct task_struct *unplug; ++ struct task_struct *sync_tsk; + int refcount; ++ int grab_lock; + struct completion synced; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ spinlock_t lock; ++#else ++ struct mutex mutex; ++#endif ++ int mutex_init; + }; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock) ++# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock) ++#else ++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) ++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) ++#endif ++ + static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); + + /** +@@ -99,18 +112,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp + void pin_current_cpu(void) + { + struct hotplug_pcp *hp; ++ int force = 0; + + retry: + hp = &__get_cpu_var(hotplug_pcp); + +- if (!hp->unplug || hp->refcount || preempt_count() > 1 || ++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || + hp->unplug == current) { + hp->refcount++; + return; + } +- preempt_enable(); +- hotplug_lock(); +- hotplug_unlock(); ++ if (hp->grab_lock) { ++ preempt_enable(); ++ hotplug_lock(hp); ++ hotplug_unlock(hp); ++ } else { ++ preempt_enable(); ++ /* ++ * Try to push this task off of this CPU. ++ */ ++ if (!migrate_me()) { ++ preempt_disable(); ++ hp = &__get_cpu_var(hotplug_pcp); ++ if (!hp->grab_lock) { ++ /* ++ * Just let it continue it's already pinned ++ * or about to sleep. ++ */ ++ force = 1; ++ goto retry; ++ } ++ preempt_enable(); ++ } ++ } + preempt_disable(); + goto retry; + } +@@ -131,26 +165,84 @@ void unpin_current_cpu(void) + wake_up_process(hp->unplug); + } + +-/* +- * FIXME: Is this really correct under all circumstances ? +- */ ++static void wait_for_pinned_cpus(struct hotplug_pcp *hp) ++{ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (hp->refcount) { ++ schedule_preempt_disabled(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++} ++ + static int sync_unplug_thread(void *data) + { + struct hotplug_pcp *hp = data; + + preempt_disable(); + hp->unplug = current; ++ wait_for_pinned_cpus(hp); ++ ++ /* ++ * This thread will synchronize the cpu_down() with threads ++ * that have pinned the CPU. When the pinned CPU count reaches ++ * zero, we inform the cpu_down code to continue to the next step. ++ */ + set_current_state(TASK_UNINTERRUPTIBLE); +- while (hp->refcount) { +- schedule_preempt_disabled(); ++ preempt_enable(); ++ complete(&hp->synced); ++ ++ /* ++ * If all succeeds, the next step will need tasks to wait till ++ * the CPU is offline before continuing. To do this, the grab_lock ++ * is set and tasks going into pin_current_cpu() will block on the ++ * mutex. But we still need to wait for those that are already in ++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() ++ * will kick this thread out. ++ */ ++ while (!hp->grab_lock && !kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ ++ /* Make sure grab_lock is seen before we see a stale completion */ ++ smp_mb(); ++ ++ /* ++ * Now just before cpu_down() enters stop machine, we need to make ++ * sure all tasks that are in pinned CPU sections are out, and new ++ * tasks will now grab the lock, keeping them from entering pinned ++ * CPU sections. ++ */ ++ if (!kthread_should_stop()) { ++ preempt_disable(); ++ wait_for_pinned_cpus(hp); ++ preempt_enable(); ++ complete(&hp->synced); ++ } ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); +- preempt_enable(); +- complete(&hp->synced); ++ ++ /* ++ * Force this thread off this CPU as it's going down and ++ * we don't want any more work on this CPU. ++ */ ++ current->flags &= ~PF_NO_SETAFFINITY; ++ do_set_cpus_allowed(current, cpu_present_mask); ++ migrate_me(); + return 0; + } + ++static void __cpu_unplug_sync(struct hotplug_pcp *hp) ++{ ++ wake_up_process(hp->sync_tsk); ++ wait_for_completion(&hp->synced); ++} ++ + /* + * Start the sync_unplug_thread on the target cpu and wait for it to + * complete. +@@ -158,23 +250,83 @@ static int sync_unplug_thread(void *data + static int cpu_unplug_begin(unsigned int cpu) + { + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); +- struct task_struct *tsk; ++ int err; ++ ++ /* Protected by cpu_hotplug.lock */ ++ if (!hp->mutex_init) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ spin_lock_init(&hp->lock); ++#else ++ mutex_init(&hp->mutex); ++#endif ++ hp->mutex_init = 1; ++ } ++ ++ /* Inform the scheduler to migrate tasks off this CPU */ ++ tell_sched_cpu_down_begin(cpu); + + init_completion(&hp->synced); +- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); +- if (IS_ERR(tsk)) +- return (PTR_ERR(tsk)); +- kthread_bind(tsk, cpu); +- wake_up_process(tsk); +- wait_for_completion(&hp->synced); ++ ++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); ++ if (IS_ERR(hp->sync_tsk)) { ++ err = PTR_ERR(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++ return err; ++ } ++ kthread_bind(hp->sync_tsk, cpu); ++ ++ /* ++ * Wait for tasks to get out of the pinned sections, ++ * it's still OK if new tasks enter. Some CPU notifiers will ++ * wait for tasks that are going to enter these sections and ++ * we must not have them block. ++ */ ++ __cpu_unplug_sync(hp); ++ + return 0; + } + ++static void cpu_unplug_sync(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ init_completion(&hp->synced); ++ /* The completion needs to be initialzied before setting grab_lock */ ++ smp_wmb(); ++ ++ /* Grab the mutex before setting grab_lock */ ++ hotplug_lock(hp); ++ hp->grab_lock = 1; ++ ++ /* ++ * The CPU notifiers have been completed. ++ * Wait for tasks to get out of pinned CPU sections and have new ++ * tasks block until the CPU is completely down. ++ */ ++ __cpu_unplug_sync(hp); ++ ++ /* All done with the sync thread */ ++ kthread_stop(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++} ++ + static void cpu_unplug_done(unsigned int cpu) + { + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + hp->unplug = NULL; ++ /* Let all tasks know cpu unplug is finished before cleaning up */ ++ smp_wmb(); ++ ++ if (hp->sync_tsk) ++ kthread_stop(hp->sync_tsk); ++ ++ if (hp->grab_lock) { ++ hotplug_unlock(hp); ++ /* protected by cpu_hotplug.lock */ ++ hp->grab_lock = 0; ++ } ++ tell_sched_cpu_down_done(cpu); + } + + void get_online_cpus(void) +@@ -182,9 +334,9 @@ void get_online_cpus(void) + might_sleep(); + if (cpu_hotplug.active_writer == current) + return; +- hotplug_lock(); ++ mutex_lock(&cpu_hotplug.lock); + cpu_hotplug.refcount++; +- hotplug_unlock(); ++ mutex_unlock(&cpu_hotplug.lock); + + } + EXPORT_SYMBOL_GPL(get_online_cpus); +@@ -194,14 +346,13 @@ void put_online_cpus(void) + if (cpu_hotplug.active_writer == current) + return; + +- hotplug_lock(); ++ mutex_lock(&cpu_hotplug.lock); + if (WARN_ON(!cpu_hotplug.refcount)) + cpu_hotplug.refcount++; /* try to fix things up */ + + if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) + wake_up_process(cpu_hotplug.active_writer); +- hotplug_unlock(); +- ++ mutex_unlock(&cpu_hotplug.lock); + } + EXPORT_SYMBOL_GPL(put_online_cpus); + +@@ -232,11 +383,11 @@ void cpu_hotplug_begin(void) + cpu_hotplug.active_writer = current; + + for (;;) { +- hotplug_lock(); ++ mutex_lock(&cpu_hotplug.lock); + if (likely(!cpu_hotplug.refcount)) + break; + __set_current_state(TASK_UNINTERRUPTIBLE); +- hotplug_unlock(); ++ mutex_unlock(&cpu_hotplug.lock); + schedule(); + } + } +@@ -244,7 +395,7 @@ void cpu_hotplug_begin(void) + void cpu_hotplug_done(void) + { + cpu_hotplug.active_writer = NULL; +- hotplug_unlock(); ++ mutex_unlock(&cpu_hotplug.lock); + } + + /* +@@ -458,6 +609,9 @@ static int __ref _cpu_down(unsigned int + + smpboot_park_threads(cpu); + ++ /* Notifiers are done. Don't let any more tasks pin this CPU. */ ++ cpu_unplug_sync(cpu); ++ + /* + * So now all preempt/rcu users must observe !cpu_active(). + */ +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2647,7 +2647,7 @@ void migrate_disable(void) + { + struct task_struct *p = current; + +- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { ++ if (in_atomic()) { + #ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; + #endif +@@ -2677,7 +2677,7 @@ void migrate_enable(void) + unsigned long flags; + struct rq *rq; + +- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { ++ if (in_atomic()) { + #ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; + #endif +@@ -4717,6 +4717,84 @@ void do_set_cpus_allowed(struct task_str + cpumask_copy(&p->cpus_allowed, new_mask); + } + ++static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); ++static DEFINE_MUTEX(sched_down_mutex); ++static cpumask_t sched_down_cpumask; ++ ++void tell_sched_cpu_down_begin(int cpu) ++{ ++ mutex_lock(&sched_down_mutex); ++ cpumask_set_cpu(cpu, &sched_down_cpumask); ++ mutex_unlock(&sched_down_mutex); ++} ++ ++void tell_sched_cpu_down_done(int cpu) ++{ ++ mutex_lock(&sched_down_mutex); ++ cpumask_clear_cpu(cpu, &sched_down_cpumask); ++ mutex_unlock(&sched_down_mutex); ++} ++ ++/** ++ * migrate_me - try to move the current task off this cpu ++ * ++ * Used by the pin_current_cpu() code to try to get tasks ++ * to move off the current CPU as it is going down. ++ * It will only move the task if the task isn't pinned to ++ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY) ++ * and the task has to be in a RUNNING state. Otherwise the ++ * movement of the task will wake it up (change its state ++ * to running) when the task did not expect it. ++ * ++ * Returns 1 if it succeeded in moving the current task ++ * 0 otherwise. ++ */ ++int migrate_me(void) ++{ ++ struct task_struct *p = current; ++ struct migration_arg arg; ++ struct cpumask *cpumask; ++ struct cpumask *mask; ++ unsigned long flags; ++ unsigned int dest_cpu; ++ struct rq *rq; ++ ++ /* ++ * We can not migrate tasks bounded to a CPU or tasks not ++ * running. The movement of the task will wake it up. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY || p->state) ++ return 0; ++ ++ mutex_lock(&sched_down_mutex); ++ rq = task_rq_lock(p, &flags); ++ ++ cpumask = &__get_cpu_var(sched_cpumasks); ++ mask = &p->cpus_allowed; ++ ++ cpumask_andnot(cpumask, mask, &sched_down_cpumask); ++ ++ if (!cpumask_weight(cpumask)) { ++ /* It's only on this CPU? */ ++ task_rq_unlock(rq, p, &flags); ++ mutex_unlock(&sched_down_mutex); ++ return 0; ++ } ++ ++ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); ++ ++ arg.task = p; ++ arg.dest_cpu = dest_cpu; ++ ++ task_rq_unlock(rq, p, &flags); ++ ++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); ++ tlb_migrate_finish(p->mm); ++ mutex_unlock(&sched_down_mutex); ++ ++ return 1; ++} ++ + /* + * This is how migration works: + * diff --git a/debian/patches/features/all/rt/cpu-rt-variants.patch b/debian/patches/features/all/rt/cpu-rt-variants.patch new file mode 100644 index 000000000..e98b003c0 --- /dev/null +++ b/debian/patches/features/all/rt/cpu-rt-variants.patch @@ -0,0 +1,27 @@ +Subject: cpu-rt-variants.patch +From: Thomas Gleixner +Date: Fri, 17 Jun 2011 15:42:38 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/smp.h | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/include/linux/smp.h ++++ b/include/linux/smp.h +@@ -182,6 +182,14 @@ static inline void kick_all_cpus_sync(vo + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define get_cpu_light() get_cpu() ++# define put_cpu_light() put_cpu() ++#else ++# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++# define put_cpu_light() migrate_enable() ++#endif ++ + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: diff --git a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch new file mode 100644 index 000000000..846583cea --- /dev/null +++ b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch @@ -0,0 +1,108 @@ +From a1b3b9eafb916f839a09dcde745518a5ad6703db Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Tue, 4 Mar 2014 12:28:32 -0500 +Subject: [PATCH] cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +We hit another bug that was caused by switching cpu_chill() from +msleep() to hrtimer_nanosleep(). + +This time it is a livelock. The problem is that hrtimer_nanosleep() +calls schedule with the state == TASK_INTERRUPTIBLE. But these means +that if a signal is pending, the scheduler wont schedule, and will +simply change the current task state back to TASK_RUNNING. This +nullifies the whole point of cpu_chill() in the first place. That is, +if a task is spinning on a try_lock() and it preempted the owner of the +lock, if it has a signal pending, it will never give up the CPU to let +the owner of the lock run. + +I made a static function __hrtimer_nanosleep() that takes a fifth +parameter "state", which determines the task state of that the +nanosleep() will be in. The normal hrtimer_nanosleep() will act the +same, but cpu_chill() will call the __hrtimer_nanosleep() directly with +the TASK_UNINTERRUPTIBLE state. + +cpu_chill() only cares that the first sleep happens, and does not care +about the state of the restart schedule (in hrtimer_nanosleep_restart). + +Cc: stable-rt@vger.kernel.org +Reported-by: Ulrich Obergfell +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/hrtimer.c | 25 ++++++++++++++++++------- + 1 file changed, 18 insertions(+), 7 deletions(-) + +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1770,12 +1770,13 @@ void hrtimer_init_sleeper(struct hrtimer + } + EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); + +-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) ++static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode, ++ unsigned long state) + { + hrtimer_init_sleeper(t, current); + + do { +- set_current_state(TASK_INTERRUPTIBLE); ++ set_current_state(state); + hrtimer_start_expires(&t->timer, mode); + if (!hrtimer_active(&t->timer)) + t->task = NULL; +@@ -1819,7 +1820,8 @@ long __sched hrtimer_nanosleep_restart(s + HRTIMER_MODE_ABS); + hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); + +- if (do_nanosleep(&t, HRTIMER_MODE_ABS)) ++ /* cpu_chill() does not care about restart state. */ ++ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE)) + goto out; + + rmtp = restart->nanosleep.rmtp; +@@ -1836,8 +1838,10 @@ long __sched hrtimer_nanosleep_restart(s + return ret; + } + +-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, +- const enum hrtimer_mode mode, const clockid_t clockid) ++static long ++__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, ++ const enum hrtimer_mode mode, const clockid_t clockid, ++ unsigned long state) + { + struct restart_block *restart; + struct hrtimer_sleeper t; +@@ -1850,7 +1854,7 @@ long hrtimer_nanosleep(struct timespec * + + hrtimer_init_on_stack(&t.timer, clockid, mode); + hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); +- if (do_nanosleep(&t, mode)) ++ if (do_nanosleep(&t, mode, state)) + goto out; + + /* Absolute timers do not update the rmtp value and restart: */ +@@ -1877,6 +1881,12 @@ long hrtimer_nanosleep(struct timespec * + return ret; + } + ++long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, ++ const enum hrtimer_mode mode, const clockid_t clockid) ++{ ++ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE); ++} ++ + SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, + struct timespec __user *, rmtp) + { +@@ -1903,7 +1913,8 @@ void cpu_chill(void) + unsigned int freeze_flag = current->flags & PF_NOFREEZE; + + current->flags |= PF_NOFREEZE; +- hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); ++ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC, ++ TASK_UNINTERRUPTIBLE); + if (!freeze_flag) + current->flags &= ~PF_NOFREEZE; + } diff --git a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch new file mode 100644 index 000000000..aa7b67d25 --- /dev/null +++ b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch @@ -0,0 +1,54 @@ +From linux-rt-users-owner@vger.kernel.org Thu Nov 7 03:07:12 2013 +From: Tiejun Chen +Subject: [v1][PATCH] cpu_down: move migrate_enable() back +Date: Thu, 7 Nov 2013 10:06:07 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to +use migrate_enable()/migrate_disable() to replace that combination +of preempt_enable() and preempt_disable(), but actually in +!CONFIG_PREEMPT_RT_FULL case, migrate_enable()/migrate_disable() +are still equal to preempt_enable()/preempt_disable(). So that +followed cpu_hotplug_begin()/cpu_unplug_begin(cpu) would go schedule() +to trigger schedule_debug() like this: + +_cpu_down() + | + + migrate_disable() = preempt_disable() + | + + cpu_hotplug_begin() or cpu_unplug_begin() + | + + schedule() + | + + __schedule() + | + + preempt_disable(); + | + + __schedule_bug() is true! + +So we should move migrate_enable() as the original scheme. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Tiejun Chen +--- + kernel/cpu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -601,6 +601,7 @@ static int __ref _cpu_down(unsigned int + err = -EBUSY; + goto restore_cpus; + } ++ migrate_enable(); + + cpu_hotplug_begin(); + err = cpu_unplug_begin(cpu); +@@ -673,7 +674,6 @@ static int __ref _cpu_down(unsigned int + out_release: + cpu_unplug_done(cpu); + out_cancel: +- migrate_enable(); + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch new file mode 100644 index 000000000..5d92c7e00 --- /dev/null +++ b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch @@ -0,0 +1,35 @@ +Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT +From: Thomas Gleixner +Date: Wed, 14 Dec 2011 01:03:49 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +We can't deal with the cpumask allocations which happen in atomic +context (see arch/x86/kernel/apic/io_apic.c) on RT right now. + +Signed-off-by: Thomas Gleixner +--- + arch/x86/Kconfig | 2 +- + lib/Kconfig | 1 + + 2 files changed, 2 insertions(+), 1 deletion(-) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -811,7 +811,7 @@ config IOMMU_HELPER + config MAXSMP + bool "Enable Maximum number of SMP Processors and NUMA Nodes" + depends on X86_64 && SMP && DEBUG_KERNEL +- select CPUMASK_OFFSTACK ++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL + ---help--- + Enable maximum number of CPUS and NUMA Nodes for this architecture. + If unsure, say N. +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -357,6 +357,7 @@ config CHECK_SIGNATURE + + config CPUMASK_OFFSTACK + bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS ++ depends on !PREEMPT_RT_FULL + help + Use dynamic allocation for cpumask_var_t, instead of putting + them on the stack. This is a bit more expensive, but avoids diff --git a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch new file mode 100644 index 000000000..f6d600777 --- /dev/null +++ b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch @@ -0,0 +1,243 @@ +From 0fcf777e2f217e61564bd30a2c39cb49d0e0b8c3 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 21 Feb 2014 17:24:04 +0100 +Subject: [PATCH] crypto: Reduce preempt disabled regions, more algos +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Don Estabrook reported +| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() +| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2462 migrate_enable+0x17b/0x200() +| kernel: WARNING: CPU: 3 PID: 865 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() + +and his backtrace showed some crypto functions which looked fine. + +The problem is the following sequence: + +glue_xts_crypt_128bit() +{ + blkcipher_walk_virt(); /* normal migrate_disable() */ + + glue_fpu_begin(); /* get atomic */ + + while (nbytes) { + __glue_xts_crypt_128bit(); + blkcipher_walk_done(); /* with nbytes = 0, migrate_enable() + * while we are atomic */ + }; + glue_fpu_end() /* no longer atomic */ +} + +and this is why the counter get out of sync and the warning is printed. +The other problem is that we are non-preemptible between +glue_fpu_begin() and glue_fpu_end() and the latency grows. To fix this, +I shorten the FPU off region and ensure blkcipher_walk_done() is called +with preemption enabled. This might hurt the performance because we now +enable/disable the FPU state more often but we gain lower latency and +the bug is gone. + +Cc: stable-rt@vger.kernel.org +Reported-by: Don Estabrook +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/crypto/cast5_avx_glue.c | 21 +++++++++------------ + arch/x86/crypto/glue_helper.c | 31 +++++++++++++++---------------- + 2 files changed, 24 insertions(+), 28 deletions(-) + +--- a/arch/x86/crypto/cast5_avx_glue.c ++++ b/arch/x86/crypto/cast5_avx_glue.c +@@ -60,7 +60,7 @@ static inline void cast5_fpu_end(bool fp + static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, + bool enc) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + const unsigned int bsize = CAST5_BLOCK_SIZE; + unsigned int nbytes; +@@ -76,7 +76,7 @@ static int ecb_crypt(struct blkcipher_de + u8 *wsrc = walk->src.virt.addr; + u8 *wdst = walk->dst.virt.addr; + +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + + /* Process multi-block batch */ + if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { +@@ -104,10 +104,9 @@ static int ecb_crypt(struct blkcipher_de + } while (nbytes >= bsize); + + done: ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -231,7 +230,7 @@ static unsigned int __cbc_decrypt(struct + static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -240,12 +239,11 @@ static int cbc_decrypt(struct blkcipher_ + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + while ((nbytes = walk.nbytes)) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + nbytes = __cbc_decrypt(desc, &walk); ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -315,7 +313,7 @@ static unsigned int __ctr_crypt(struct b + static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -324,13 +322,12 @@ static int ctr_crypt(struct blkcipher_de + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + nbytes = __ctr_crypt(desc, &walk); ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- cast5_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + ctr_crypt_final(desc, &walk); + err = blkcipher_walk_done(desc, &walk, 0); +--- a/arch/x86/crypto/glue_helper.c ++++ b/arch/x86/crypto/glue_helper.c +@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const + void *ctx = crypto_blkcipher_ctx(desc->tfm); + const unsigned int bsize = 128 / 8; + unsigned int nbytes, i, func_bytes; +- bool fpu_enabled = false; ++ bool fpu_enabled; + int err; + + err = blkcipher_walk_virt(desc, walk); +@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const + u8 *wdst = walk->dst.virt.addr; + + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + + for (i = 0; i < gctx->num_funcs; i++) { + func_bytes = bsize * gctx->funcs[i].num_blocks; +@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const + } + + done: ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); + return err; + } + +@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct + struct scatterlist *src, unsigned int nbytes) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct + + while ((nbytes = walk.nbytes)) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); + return err; + } + EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); +@@ -278,7 +278,7 @@ int glue_ctr_crypt_128bit(const struct c + struct scatterlist *src, unsigned int nbytes) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -287,13 +287,12 @@ int glue_ctr_crypt_128bit(const struct c + + while ((nbytes = walk.nbytes) >= bsize) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + glue_ctr_crypt_final_128bit( + gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); +@@ -348,7 +347,7 @@ int glue_xts_crypt_128bit(const struct c + void *tweak_ctx, void *crypt_ctx) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -361,21 +360,21 @@ int glue_xts_crypt_128bit(const struct c + + /* set minimum length to bsize, for tweak_fn */ + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, ++ desc, false, + nbytes < bsize ? bsize : nbytes); +- + /* calculate first value of T */ + tweak_fn(tweak_ctx, walk.iv, walk.iv); ++ glue_fpu_end(fpu_enabled); + + while (nbytes) { ++ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, ++ desc, false, nbytes); + nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); + ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + nbytes = walk.nbytes; + } +- +- glue_fpu_end(fpu_enabled); +- + return err; + } + EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); diff --git a/debian/patches/features/all/rt/debugobjects-rt.patch b/debian/patches/features/all/rt/debugobjects-rt.patch new file mode 100644 index 000000000..aad904aaa --- /dev/null +++ b/debian/patches/features/all/rt/debugobjects-rt.patch @@ -0,0 +1,24 @@ +Subject: debugobjects-rt.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:41:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + lib/debugobjects.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/lib/debugobjects.c ++++ b/lib/debugobjects.c +@@ -308,7 +308,10 @@ static void + struct debug_obj *obj; + unsigned long flags; + +- fill_pool(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (preempt_count() == 0 && !irqs_disabled()) ++#endif ++ fill_pool(); + + db = get_bucket((unsigned long) addr); + diff --git a/debian/patches/features/all/rt/disable-lazy-preempt-on-x86-64.patch b/debian/patches/features/all/rt/disable-lazy-preempt-on-x86-64.patch new file mode 100644 index 000000000..29c6b5def --- /dev/null +++ b/debian/patches/features/all/rt/disable-lazy-preempt-on-x86-64.patch @@ -0,0 +1,88 @@ +From 27bcad87397de27b92b8651630771e032cf87116 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 11 Apr 2014 20:12:43 +0200 +Subject: [PATCH] disable lazy preempt on x86-64 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/Kconfig | 2 +- + arch/x86/kernel/entry_64.S | 28 ++++++++-------------------- + 2 files changed, 9 insertions(+), 21 deletions(-) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -21,7 +21,7 @@ config X86_64 + ### Arch settings + config X86 + def_bool y +- select HAVE_PREEMPT_LAZY ++ select HAVE_PREEMPT_LAZY if X86_32 + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -658,8 +658,8 @@ GLOBAL(system_call_after_swapgs) + /* Handle reschedules */ + /* edx: work, edi: workmask */ + sysret_careful: +- testl $_TIF_NEED_RESCHED_MASK,%edx +- jz sysret_signal ++ bt $TIF_NEED_RESCHED,%edx ++ jnc sysret_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -771,8 +771,8 @@ GLOBAL(int_with_check) + /* First do a reschedule test. */ + /* edx: work, edi: workmask */ + int_careful: +- testl $_TIF_NEED_RESCHED_MASK,%edx +- jz int_very_careful ++ bt $TIF_NEED_RESCHED,%edx ++ jnc int_very_careful + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -1071,8 +1071,8 @@ ENTRY(native_iret) + /* edi: workmask, edx: work */ + retint_careful: + CFI_RESTORE_STATE +- testl $_TIF_NEED_RESCHED_MASK,%edx +- jz retint_signal ++ bt $TIF_NEED_RESCHED,%edx ++ jnc retint_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -1104,19 +1104,7 @@ ENTRY(native_iret) + /* rcx: threadinfo. interrupts off. */ + ENTRY(retint_kernel) + cmpl $0,PER_CPU_VAR(__preempt_count) +- jz check_int_off +- +- # atleast preempt count == 0 ? +- cmpl $_TIF_NEED_RESCHED,PER_CPU_VAR(__preempt_count) +- jnz retint_restore_args +- +- cmpl $0, TI_preempt_lazy_count(%rcx) +- jnz retint_restore_args +- +- bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) +- jnc retint_restore_args +- +-check_int_off: ++ jnz retint_restore_args + bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ + jnc retint_restore_args + call preempt_schedule_irq +@@ -1552,7 +1540,7 @@ ENTRY(paranoid_exit) + movq %rsp,%rdi /* &pt_regs */ + call sync_regs + movq %rax,%rsp /* switch stack for scheduling */ +- testl $_TIF_NEED_RESCHED_MASK,%ebx ++ testl $_TIF_NEED_RESCHED,%ebx + jnz paranoid_schedule + movl %ebx,%edx /* arg3: thread flags */ + TRACE_IRQS_ON diff --git a/debian/patches/features/all/rt/dm-make-rt-aware.patch b/debian/patches/features/all/rt/dm-make-rt-aware.patch new file mode 100644 index 000000000..4473e0bc6 --- /dev/null +++ b/debian/patches/features/all/rt/dm-make-rt-aware.patch @@ -0,0 +1,35 @@ +Subject: dm: Make rt aware +From: Thomas Gleixner +Date: Mon, 14 Nov 2011 23:06:09 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has +interrupts legitimately enabled here as we cant deadlock against the +irq thread due to the "sleeping spinlocks" conversion. + +Reported-by: Luis Claudio R. Goncalves +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +--- + drivers/md/dm.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1716,14 +1716,14 @@ static void dm_request_fn(struct request + if (map_request(ti, clone, md)) + goto requeued; + +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + spin_lock(q->queue_lock); + } + + goto out; + + requeued: +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + spin_lock(q->queue_lock); + + delay_and_out: diff --git a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch new file mode 100644 index 000000000..091ddec97 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch @@ -0,0 +1,26 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:24 -0500 +Subject: drivers/net: Use disable_irq_nosync() in 8139too +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Use disable_irq_nosync() instead of disable_irq() as this might be +called in atomic context with netpoll. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/net/ethernet/realtek/8139too.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/ethernet/realtek/8139too.c ++++ b/drivers/net/ethernet/realtek/8139too.c +@@ -2215,7 +2215,7 @@ static void rtl8139_poll_controller(stru + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + +- disable_irq(irq); ++ disable_irq_nosync(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); + } diff --git a/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch b/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch new file mode 100644 index 000000000..1cdcaf74b --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch @@ -0,0 +1,127 @@ +From: Thomas Gleixner +Date: Sat, 20 Jun 2009 11:36:54 +0200 +Subject: drivers/net: fix livelock issues +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro +optimization. The reason is that the softirq thread is rescheduling +itself on that return value. Depending on priorities it starts to +monoplize the CPU and livelock on UP systems. + +Remove it. + +Signed-off-by: Thomas Gleixner + +--- + drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +----- + drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +-- + drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +-- + drivers/net/ethernet/neterion/s2io.c | 7 +------ + drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++---- + drivers/net/ethernet/tehuti/tehuti.c | 9 ++------- + drivers/net/rionet.c | 6 +----- + 7 files changed, 9 insertions(+), 31 deletions(-) + +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +@@ -2217,11 +2217,7 @@ static netdev_tx_t atl1c_xmit_frame(stru + } + + tpd_req = atl1c_cal_tpd_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { +- if (netif_msg_pktdata(adapter)) +- dev_info(&adapter->pdev->dev, "tx locked\n"); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1c_tpd_avail(adapter, type) < tpd_req) { + /* no enough descriptor, just stop queue */ +--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c ++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +@@ -1883,8 +1883,7 @@ static netdev_tx_t atl1e_xmit_frame(stru + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) +- return NETDEV_TX_LOCKED; ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ +--- a/drivers/net/ethernet/chelsio/cxgb/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb/sge.c +@@ -1663,8 +1663,7 @@ static int t1_sge_tx(struct sk_buff *skb + struct cmdQ *q = &sge->cmdQ[qid]; + unsigned int credits, pidx, genbit, count, use_sched_skb = 0; + +- if (!spin_trylock(&q->lock)) +- return NETDEV_TX_LOCKED; ++ spin_lock(&q->lock); + + reclaim_completed_tx(sge, q); + +--- a/drivers/net/ethernet/neterion/s2io.c ++++ b/drivers/net/ethernet/neterion/s2io.c +@@ -4089,12 +4089,7 @@ static netdev_tx_t s2io_xmit(struct sk_b + [skb->priority & (MAX_TX_FIFOS - 1)]; + fifo = &mac_control->fifos[queue]; + +- if (do_spin_lock) +- spin_lock_irqsave(&fifo->tx_lock, flags); +- else { +- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&fifo->tx_lock, flags); + + if (sp->config.multiq) { + if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { +--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c ++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +@@ -2141,10 +2141,8 @@ static int pch_gbe_xmit_frame(struct sk_ + struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; + unsigned long flags; + +- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { +- /* Collision - tell upper layer to requeue */ +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&tx_ring->tx_lock, flags); ++ + if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); +--- a/drivers/net/ethernet/tehuti/tehuti.c ++++ b/drivers/net/ethernet/tehuti/tehuti.c +@@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struc + unsigned long flags; + + ENTER; +- local_irq_save(flags); +- if (!spin_trylock(&priv->tx_lock)) { +- local_irq_restore(flags); +- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", +- BDX_DRV_NAME, ndev->name); +- return NETDEV_TX_LOCKED; +- } ++ ++ spin_lock_irqsave(&priv->tx_lock, flags); + + /* build tx descriptor */ + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ +--- a/drivers/net/rionet.c ++++ b/drivers/net/rionet.c +@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_b + unsigned long flags; + int add_num = 1; + +- local_irq_save(flags); +- if (!spin_trylock(&rnet->tx_lock)) { +- local_irq_restore(flags); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&rnet->tx_lock, flags); + + if (is_multicast_ether_addr(eth->h_dest)) + add_num = nets[rnet->mport->id].nact; diff --git a/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch b/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch new file mode 100644 index 000000000..fd2b9af1a --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch @@ -0,0 +1,56 @@ +From: Thomas Gleixner +Date: Thu, 1 Apr 2010 20:20:57 +0200 +Subject: drivers: net: gianfar: Make RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The adjust_link() disables interrupts before taking the queue +locks. On RT those locks are converted to "sleeping" locks and +therefor the local_irq_save/restore must be converted to +local_irq_save/restore_nort. + +Reported-by: Xianghua Xiao +Signed-off-by: Thomas Gleixner +Tested-by: Xianghua Xiao + +--- + drivers/net/ethernet/freescale/gianfar.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -1716,7 +1716,7 @@ void stop_gfar(struct net_device *dev) + + + /* Lock it down */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + +@@ -1724,7 +1724,7 @@ void stop_gfar(struct net_device *dev) + + unlock_rx_qs(priv); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { +@@ -3105,7 +3105,7 @@ static void adjust_link(struct net_devic + struct phy_device *phydev = priv->phydev; + int new_state = 0; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + if (phydev->link) { +@@ -3179,7 +3179,7 @@ static void adjust_link(struct net_devic + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + /* Update the hash table based on the current list of multicast diff --git a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch new file mode 100644 index 000000000..01de7e842 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch @@ -0,0 +1,49 @@ +From: Steven Rostedt +Date: Fri, 3 Jul 2009 08:30:00 -0500 +Subject: drivers/net: vortex fix locking issues +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Argh, cut and paste wasn't enough... + +Use this patch instead. It needs an irq disable. But, believe it or not, +on SMP this is actually better. If the irq is shared (as it is in Mark's +case), we don't stop the irq of other devices from being handled on +another CPU (unfortunately for Mark, he pinned all interrupts to one CPU). + +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + + drivers/net/ethernet/3com/3c59x.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +Signed-off-by: Ingo Molnar + +--- a/drivers/net/ethernet/3com/3c59x.c ++++ b/drivers/net/ethernet/3com/3c59x.c +@@ -842,9 +842,9 @@ static void poll_vortex(struct net_devic + { + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + #endif + +@@ -1917,12 +1917,12 @@ static void vortex_tx_timeout(struct net + * Block interrupts because vortex_interrupt does a bare spin_lock() + */ + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev); + else + vortex_interrupt(dev->irq, dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + } + diff --git a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch new file mode 100644 index 000000000..c1001c743 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch @@ -0,0 +1,33 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:30 -0500 +Subject: drivers: random: Reduce preempt disabled region +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +No need to keep preemption disabled across the whole function. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/char/random.c | 3 --- + 1 file changed, 3 deletions(-) + +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -772,8 +772,6 @@ static void add_timer_randomness(struct + } sample; + long delta, delta2, delta3; + +- preempt_disable(); +- + sample.jiffies = jiffies; + sample.cycles = random_get_entropy(); + sample.num = num; +@@ -814,7 +812,6 @@ static void add_timer_randomness(struct + */ + credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); + } +- preempt_enable(); + } + + void add_input_randomness(unsigned int type, unsigned int code, diff --git a/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch b/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch new file mode 100644 index 000000000..8de8786c4 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch @@ -0,0 +1,28 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:01 -0500 +Subject: serial: 8250: Call flush_to_ldisc when the irq is threaded +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Ingo Molnar + +--- + drivers/tty/tty_buffer.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/tty/tty_buffer.c ++++ b/drivers/tty/tty_buffer.c +@@ -504,10 +504,14 @@ void tty_flip_buffer_push(struct tty_por + + buf->tail->commit = buf->tail->used; + ++#ifndef CONFIG_PREEMPT_RT_FULL + if (port->low_latency) + flush_to_ldisc(&buf->work); + else + schedule_work(&buf->work); ++#else ++ schedule_work(&buf->work); ++#endif + } + EXPORT_SYMBOL(tty_flip_buffer_push); + diff --git a/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch b/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch new file mode 100644 index 000000000..a5c457014 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch @@ -0,0 +1,55 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:01 -0500 +Subject: [PATCH] serial: 8250: Clean up the locking for -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +In -RT the spin_lock_irqsave() does not spin but sleep if the lock is +taken. Before that, local_irq_save() is invoked which disables +interrupts even on -RT. Therefore local_irq_save() + spin_lock() does not +work. +In the ->sysrq and oops_in_progress case it is save to trylock the lock +i.e. this is what we do now anyway except for ->sysrq where we assume +that the lock is already taken. + +The spin_lock_irqsave() grabs the lock and disables the interrupts on +vanilla (the same behavior) and on -RT it won't disable interrupts. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +[bigeasy: add a patch description] +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/tty/serial/8250/8250_core.c | 15 +++++---------- + 1 file changed, 5 insertions(+), 10 deletions(-) + +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -2882,14 +2882,10 @@ serial8250_console_write(struct console + + touch_nmi_watchdog(); + +- local_irq_save(flags); +- if (port->sysrq) { +- /* serial8250_handle_irq() already took the lock */ +- locked = 0; +- } else if (oops_in_progress) { +- locked = spin_trylock(&port->lock); +- } else +- spin_lock(&port->lock); ++ if (port->sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&port->lock, flags); ++ else ++ spin_lock_irqsave(&port->lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -2921,8 +2917,7 @@ serial8250_console_write(struct console + serial8250_modem_status(up); + + if (locked) +- spin_unlock(&port->lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&port->lock, flags); + } + + static int __init serial8250_console_setup(struct console *co, char *options) diff --git a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch new file mode 100644 index 000000000..3a7262803 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch @@ -0,0 +1,39 @@ +Subject: drivers-tty-fix-omap-lock-crap.patch +From: Thomas Gleixner +Date: Thu, 28 Jul 2011 13:32:57 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + drivers/tty/serial/omap-serial.c | 12 ++++-------- + 1 file changed, 4 insertions(+), 8 deletions(-) + +--- a/drivers/tty/serial/omap-serial.c ++++ b/drivers/tty/serial/omap-serial.c +@@ -1266,13 +1266,10 @@ serial_omap_console_write(struct console + + pm_runtime_get_sync(up->dev); + +- local_irq_save(flags); +- if (up->port.sysrq) +- locked = 0; +- else if (oops_in_progress) +- locked = spin_trylock(&up->port.lock); ++ if (up->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&up->port.lock, flags); + else +- spin_lock(&up->port.lock); ++ spin_lock_irqsave(&up->port.lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -1301,8 +1298,7 @@ serial_omap_console_write(struct console + pm_runtime_mark_last_busy(up->dev); + pm_runtime_put_autosuspend(up->dev); + if (locked) +- spin_unlock(&up->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&up->port.lock, flags); + } + + static int __init diff --git a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch new file mode 100644 index 000000000..1a1cc597b --- /dev/null +++ b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch @@ -0,0 +1,45 @@ +Subject: drivers-tty-pl011-irq-disable-madness.patch +From: Thomas Gleixner +Date: Tue, 08 Jan 2013 21:36:51 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + drivers/tty/serial/amba-pl011.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -1919,13 +1919,19 @@ pl011_console_write(struct console *co, + + clk_enable(uap->clk); + +- local_irq_save(flags); ++ /* ++ * local_irq_save(flags); ++ * ++ * This local_irq_save() is nonsense. If we come in via sysrq ++ * handling then interrupts are already disabled. Aside of ++ * that the port.sysrq check is racy on SMP regardless. ++ */ + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) +- locked = spin_trylock(&uap->port.lock); ++ locked = spin_trylock_irqsave(&uap->port.lock, flags); + else +- spin_lock(&uap->port.lock); ++ spin_lock_irqsave(&uap->port.lock, flags); + + /* + * First save the CR then disable the interrupts +@@ -1947,8 +1953,7 @@ pl011_console_write(struct console *co, + writew(old_cr, uap->port.membase + UART011_CR); + + if (locked) +- spin_unlock(&uap->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&uap->port.lock, flags); + + clk_disable(uap->clk); + } diff --git a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch new file mode 100644 index 000000000..ead4afbdd --- /dev/null +++ b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch @@ -0,0 +1,60 @@ +From d841118ac80c5bfb18f47984bc40687eed08b714 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 25 Apr 2013 18:12:52 +0200 +Subject: [PATCH] drm/i915: drop trace_i915_gem_ring_dispatch on rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +This tracepoint is responsible for: + +|[<814cc358>] __schedule_bug+0x4d/0x59 +|[<814d24cc>] __schedule+0x88c/0x930 +|[<814d3b90>] ? _raw_spin_unlock_irqrestore+0x40/0x50 +|[<814d3b95>] ? _raw_spin_unlock_irqrestore+0x45/0x50 +|[<810b57b5>] ? task_blocks_on_rt_mutex+0x1f5/0x250 +|[<814d27d9>] schedule+0x29/0x70 +|[<814d3423>] rt_spin_lock_slowlock+0x15b/0x278 +|[<814d3786>] rt_spin_lock+0x26/0x30 +|[] gen6_gt_force_wake_get+0x29/0x60 [i915] +|[] gen6_ring_get_irq+0x5f/0x100 [i915] +|[] ftrace_raw_event_i915_gem_ring_dispatch+0xe3/0x100 [i915] +|[] i915_gem_do_execbuffer.isra.13+0xbd3/0x1430 [i915] +|[<810f8943>] ? trace_buffer_unlock_commit+0x43/0x60 +|[<8113e8d2>] ? ftrace_raw_event_kmem_alloc+0xd2/0x180 +|[<8101d063>] ? native_sched_clock+0x13/0x80 +|[] i915_gem_execbuffer2+0x99/0x280 [i915] +|[] drm_ioctl+0x4c3/0x570 [drm] +|[<8101d0d9>] ? sched_clock+0x9/0x10 +|[] ? i915_gem_execbuffer+0x480/0x480 [i915] +|[<810f1c18>] ? rb_commit+0x68/0xa0 +|[<810f1c6c>] ? ring_buffer_unlock_commit+0x1c/0xa0 +|[<81197467>] do_vfs_ioctl+0x97/0x540 +|[<81021318>] ? ftrace_raw_event_sys_enter+0xd8/0x130 +|[<811979a1>] sys_ioctl+0x91/0xb0 +|[<814db931>] tracesys+0xe1/0xe6 + +Chris Wilson does not like to move i915_trace_irq_get() out of the macro + +|No. This enables the IRQ, as well as making a number of +|very expensively serialised read, unconditionally. + +so it is gone now on RT. + +Cc: stable-rt@vger.kernel.org +Reported-by: Joakim Hernberg +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +@@ -1243,7 +1243,9 @@ i915_gem_do_execbuffer(struct drm_device + goto err; + } + ++#ifndef CONFIG_PREEMPT_RT_BASE + trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); ++#endif + + i915_gem_execbuffer_move_to_active(&eb->vmas, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); diff --git a/debian/patches/features/all/rt/early-printk-consolidate.patch b/debian/patches/features/all/rt/early-printk-consolidate.patch new file mode 100644 index 000000000..e0486eafa --- /dev/null +++ b/debian/patches/features/all/rt/early-printk-consolidate.patch @@ -0,0 +1,45 @@ +Subject: early-printk-consolidate.patch +From: Thomas Gleixner +Date: Sat, 23 Jul 2011 11:04:08 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/sparc/kernel/setup_32.c | 1 + + arch/sparc/kernel/setup_64.c | 8 +++++++- + 2 files changed, 8 insertions(+), 1 deletion(-) + +--- a/arch/sparc/kernel/setup_32.c ++++ b/arch/sparc/kernel/setup_32.c +@@ -309,6 +309,7 @@ void __init setup_arch(char **cmdline_p) + + boot_flags_init(*cmdline_p); + ++ early_console = &prom_early_console; + register_console(&prom_early_console); + + printk("ARCH: "); +--- a/arch/sparc/kernel/setup_64.c ++++ b/arch/sparc/kernel/setup_64.c +@@ -555,6 +555,12 @@ static void __init init_sparc64_elf_hwca + pause_patch(); + } + ++static inline void register_prom_console(void) ++{ ++ early_console = &prom_early_console; ++ register_console(&prom_early_console); ++} ++ + void __init setup_arch(char **cmdline_p) + { + /* Initialize PROM console and command line. */ +@@ -566,7 +572,7 @@ void __init setup_arch(char **cmdline_p) + #ifdef CONFIG_EARLYFB + if (btext_find_display()) + #endif +- register_console(&prom_early_console); ++ register_prom_console(); + + if (tlb_type == hypervisor) + printk("ARCH: SUN4V\n"); diff --git a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch new file mode 100644 index 000000000..7bedbb9ad --- /dev/null +++ b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch @@ -0,0 +1,27 @@ +Subject: epoll.patch +From: Thomas Gleixner +Date: Fri, 08 Jul 2011 16:35:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + fs/eventpoll.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *pri + */ + static void ep_poll_safewake(wait_queue_head_t *wq) + { +- int this_cpu = get_cpu(); ++ int this_cpu = get_cpu_light(); + + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + +- put_cpu(); ++ put_cpu_light(); + } + + static void ep_remove_wait_queue(struct eppoll_entry *pwq) diff --git a/debian/patches/features/all/rt/filemap-fix-up.patch b/debian/patches/features/all/rt/filemap-fix-up.patch new file mode 100644 index 000000000..3aab5b90b --- /dev/null +++ b/debian/patches/features/all/rt/filemap-fix-up.patch @@ -0,0 +1,23 @@ +Subject: filemap-fix-up.patch +From: Thomas Gleixner +Date: Fri, 17 Jun 2011 18:56:24 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Cc: Peter Zijlstra +Signed-off-by: Thomas Gleixner +Link: http://lkml.kernel.org/n/tip-m6yuzd6ul717hlnl2gj6p3ou@git.kernel.org +--- + mm/filemap.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -1974,7 +1974,7 @@ size_t iov_iter_copy_from_user_atomic(st + char *kaddr; + size_t copied; + +- BUG_ON(!in_atomic()); ++ BUG_ON(!pagefault_disabled()); + kaddr = kmap_atomic(page); + if (likely(i->nr_segs == 1)) { + int left; diff --git a/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch b/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch new file mode 100644 index 000000000..2cf0e441d --- /dev/null +++ b/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch @@ -0,0 +1,113 @@ +From: Steven Rostedt +Subject: x86: Do not disable preemption in int3 on 32bit +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Preemption must be disabled before enabling interrupts in do_trap +on x86_64 because the stack in use for int3 and debug is a per CPU +stack set by th IST. But 32bit does not have an IST and the stack +still belongs to the current task and there is no problem in scheduling +out the task. + +Keep preemption enabled on X86_32 when enabling interrupts for +do_trap(). + +The name of the function is changed from preempt_conditional_sti/cli() +to conditional_sti/cli_ist(), to annotate that this function is used +when the stack is on the IST. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + +--- + arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++--------- + 1 file changed, 23 insertions(+), 9 deletions(-) + +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -86,9 +86,21 @@ static inline void conditional_sti(struc + local_irq_enable(); + } + +-static inline void preempt_conditional_sti(struct pt_regs *regs) ++static inline void conditional_sti_ist(struct pt_regs *regs) + { ++#ifdef CONFIG_X86_64 ++ /* ++ * X86_64 uses a per CPU stack on the IST for certain traps ++ * like int3. The task can not be preempted when using one ++ * of these stacks, thus preemption must be disabled, otherwise ++ * the stack can be corrupted if the task is scheduled out, ++ * and another task comes in and uses this stack. ++ * ++ * On x86_32 the task keeps its own stack and it is OK if the ++ * task schedules out. ++ */ + preempt_count_inc(); ++#endif + if (regs->flags & X86_EFLAGS_IF) + local_irq_enable(); + } +@@ -99,11 +111,13 @@ static inline void conditional_cli(struc + local_irq_disable(); + } + +-static inline void preempt_conditional_cli(struct pt_regs *regs) ++static inline void conditional_cli_ist(struct pt_regs *regs) + { + if (regs->flags & X86_EFLAGS_IF) + local_irq_disable(); ++#ifdef CONFIG_X86_64 + preempt_count_dec(); ++#endif + } + + static int __kprobes +@@ -232,9 +246,9 @@ dotraplinkage void do_stack_segment(stru + prev_state = exception_enter(); + if (notify_die(DIE_TRAP, "stack segment", regs, error_code, + X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + } + exception_exit(prev_state); + } +@@ -343,9 +357,9 @@ dotraplinkage void __kprobes notrace do_ + * as we may switch to the interrupt stack. + */ + debug_stack_usage_inc(); +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + exit: + exception_exit(prev_state); +@@ -451,12 +465,12 @@ dotraplinkage void __kprobes do_debug(st + debug_stack_usage_inc(); + + /* It's safe to allow irq's after DR6 has been saved */ +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + + if (regs->flags & X86_VM_MASK) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, + X86_TRAP_DB); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + goto exit; + } +@@ -476,7 +490,7 @@ dotraplinkage void __kprobes do_debug(st + si_code = get_si_code(tsk->thread.debugreg6); + if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) + send_sigtrap(tsk, regs, error_code, si_code); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + + exit: diff --git a/debian/patches/features/all/rt/fixup_opencoded_completions.patch b/debian/patches/features/all/rt/fixup_opencoded_completions.patch new file mode 100644 index 000000000..433d59393 --- /dev/null +++ b/debian/patches/features/all/rt/fixup_opencoded_completions.patch @@ -0,0 +1,55 @@ +From 53a9508f5983092928b0e6e12f400b686e1f04b1 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 28 Oct 2013 11:50:06 +0100 +Subject: [PATCH] a few open coded completions +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/net/wireless/orinoco/orinoco_usb.c | 2 +- + drivers/usb/gadget/f_fs.c | 2 +- + drivers/usb/gadget/inode.c | 4 ++-- + 3 files changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/net/wireless/orinoco/orinoco_usb.c ++++ b/drivers/net/wireless/orinoco/orinoco_usb.c +@@ -713,7 +713,7 @@ static void ezusb_req_ctx_wait(struct ez + while (!ctx->done.done && msecs--) + udelay(1000); + } else { +- wait_event_interruptible(ctx->done.wait, ++ swait_event_interruptible(ctx->done.wait, + ctx->done.done); + } + break; +--- a/drivers/usb/gadget/f_fs.c ++++ b/drivers/usb/gadget/f_fs.c +@@ -1120,7 +1120,7 @@ static void ffs_data_put(struct ffs_data + pr_info("%s(): freeing\n", __func__); + ffs_data_clear(ffs); + BUG_ON(waitqueue_active(&ffs->ev.waitq) || +- waitqueue_active(&ffs->ep0req_completion.wait)); ++ swaitqueue_active(&ffs->ep0req_completion.wait)); + kfree(ffs->dev_name); + kfree(ffs); + } +--- a/drivers/usb/gadget/inode.c ++++ b/drivers/usb/gadget/inode.c +@@ -340,7 +340,7 @@ ep_io (struct ep_data *epdata, void *buf + spin_unlock_irq (&epdata->dev->lock); + + if (likely (value == 0)) { +- value = wait_event_interruptible (done.wait, done.done); ++ value = swait_event_interruptible (done.wait, done.done); + if (value != 0) { + spin_lock_irq (&epdata->dev->lock); + if (likely (epdata->ep != NULL)) { +@@ -349,7 +349,7 @@ ep_io (struct ep_data *epdata, void *buf + usb_ep_dequeue (epdata->ep, epdata->req); + spin_unlock_irq (&epdata->dev->lock); + +- wait_event (done.wait, done.done); ++ swait_event (done.wait, done.done); + if (epdata->status == -ECONNRESET) + epdata->status = -EINTR; + } else { diff --git a/debian/patches/features/all/rt/fs-block-rt-support.patch b/debian/patches/features/all/rt/fs-block-rt-support.patch new file mode 100644 index 000000000..8047cf40d --- /dev/null +++ b/debian/patches/features/all/rt/fs-block-rt-support.patch @@ -0,0 +1,21 @@ +Subject: fs-block-rt-support.patch +From: Thomas Gleixner +Date: Tue, 14 Jun 2011 17:05:09 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + block/blk-core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -197,7 +197,7 @@ EXPORT_SYMBOL(blk_delay_queue); + **/ + void blk_start_queue(struct request_queue *q) + { +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + __blk_run_queue(q); diff --git a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch new file mode 100644 index 000000000..a65826846 --- /dev/null +++ b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch @@ -0,0 +1,86 @@ +Subject: fs: dcache: Use cpu_chill() in trylock loops +From: Thomas Gleixner +Date: Wed, 07 Mar 2012 21:00:34 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Retry loops on RT might loop forever when the modifying side was +preempted. Use cpu_chill() instead of cpu_relax() to let the system +make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + fs/autofs4/autofs_i.h | 1 + + fs/autofs4/expire.c | 2 +- + fs/dcache.c | 5 +++-- + fs/namespace.c | 3 ++- + 4 files changed, 7 insertions(+), 4 deletions(-) + +--- a/fs/autofs4/autofs_i.h ++++ b/fs/autofs4/autofs_i.h +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + #include + #include + +--- a/fs/autofs4/expire.c ++++ b/fs/autofs4/expire.c +@@ -157,7 +157,7 @@ static struct dentry *get_next_positive_ + parent = p->d_parent; + if (!spin_trylock(&parent->d_lock)) { + spin_unlock(&p->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto relock; + } + spin_unlock(&p->d_lock); +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -517,7 +518,7 @@ dentry_kill(struct dentry *dentry, int u + relock: + if (unlock_on_failure) { + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + } + return dentry; /* try again with same dentry */ + } +@@ -2379,7 +2380,7 @@ void d_delete(struct dentry * dentry) + if (dentry->d_lockref.count == 1) { + if (!spin_trylock(&inode->i_lock)) { + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto again; + } + dentry->d_flags &= ~DCACHE_CANT_MOUNT; +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include /* acct_auto_close_mnt */ +@@ -345,7 +346,7 @@ int __mnt_want_write(struct vfsmount *m) + smp_mb(); + while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { + preempt_enable(); +- cpu_relax(); ++ cpu_chill(); + preempt_disable(); + } + /* diff --git a/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch new file mode 100644 index 000000000..db7fe7cf8 --- /dev/null +++ b/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch @@ -0,0 +1,30 @@ +From: Mike Galbraith +Date: Wed, 11 Jul 2012 22:05:20 +0000 +Subject: fs, jbd: pull your plug when waiting for space +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +With an -rt kernel, and a heavy sync IO load, tasks can jam +up on journal locks without unplugging, which can lead to +terminal IO starvation. Unplug and schedule when waiting for space. + +Signed-off-by: Mike Galbraith +Cc: Steven Rostedt +Cc: Theodore Tso +Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net +Signed-off-by: Thomas Gleixner + +--- + fs/jbd/checkpoint.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/fs/jbd/checkpoint.c ++++ b/fs/jbd/checkpoint.c +@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *jou + if (journal->j_flags & JFS_ABORT) + return; + spin_unlock(&journal->j_state_lock); ++ if (current->plug) ++ io_schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + + /* diff --git a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch new file mode 100644 index 000000000..ebd10cb1b --- /dev/null +++ b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch @@ -0,0 +1,101 @@ +From: Thomas Gleixner +Date: Fri, 18 Mar 2011 10:11:25 +0100 +Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +bit_spin_locks break under RT. + +Based on a previous patch from Steven Rostedt +Signed-off-by: Thomas Gleixner + +-- + + include/linux/buffer_head.h | 10 ++++++++++ + include/linux/jbd_common.h | 24 ++++++++++++++++++++++++ + 2 files changed, 34 insertions(+) + +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h +@@ -77,6 +77,11 @@ struct buffer_head { + atomic_t b_count; /* users using this buffer_head */ + #ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t b_uptodate_lock; ++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ ++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) ++ spinlock_t b_state_lock; ++ spinlock_t b_journal_head_lock; ++#endif + #endif + }; + +@@ -108,6 +113,11 @@ static inline void buffer_head_init_lock + { + #ifdef CONFIG_PREEMPT_RT_BASE + spin_lock_init(&bh->b_uptodate_lock); ++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ ++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) ++ spin_lock_init(&bh->b_state_lock); ++ spin_lock_init(&bh->b_journal_head_lock); ++#endif + #endif + } + +--- a/include/linux/jbd_common.h ++++ b/include/linux/jbd_common.h +@@ -15,32 +15,56 @@ static inline struct journal_head *bh2jh + + static inline void jbd_lock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_State, &bh->b_state); ++#else ++ spin_lock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_trylock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_trylock(BH_State, &bh->b_state); ++#else ++ return spin_trylock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_is_locked_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_is_locked(BH_State, &bh->b_state); ++#else ++ return spin_is_locked(&bh->b_state_lock); ++#endif + } + + static inline void jbd_unlock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_State, &bh->b_state); ++#else ++ spin_unlock(&bh->b_state_lock); ++#endif + } + + static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_JournalHead, &bh->b_state); ++#else ++ spin_lock(&bh->b_journal_head_lock); ++#endif + } + + static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_JournalHead, &bh->b_state); ++#else ++ spin_unlock(&bh->b_journal_head_lock); ++#endif + } + + #endif diff --git a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch new file mode 100644 index 000000000..3fe048781 --- /dev/null +++ b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch @@ -0,0 +1,33 @@ +From c28e07715162bb1e1567a935b45772ca85a5267c Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 17 Feb 2014 17:30:03 +0100 +Subject: [PATCH] fs: jbd2: pull your plug when waiting for space +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Two cps in parallel managed to stall the the ext4 fs. It seems that +journal code is either waiting for locks or sleeping waiting for +something to happen. This seems similar to what Mike observed on ext3, +here is his description: + +|With an -rt kernel, and a heavy sync IO load, tasks can jam +|up on journal locks without unplugging, which can lead to +|terminal IO starvation. Unplug and schedule when waiting +|for space. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + fs/jbd2/checkpoint.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/fs/jbd2/checkpoint.c ++++ b/fs/jbd2/checkpoint.c +@@ -125,6 +125,8 @@ void __jbd2_log_wait_for_space(journal_t + if (journal->j_flags & JBD2_ABORT) + return; + write_unlock(&journal->j_state_lock); ++ if (current->plug) ++ io_schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + + /* diff --git a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch new file mode 100644 index 000000000..65da1ebaa --- /dev/null +++ b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch @@ -0,0 +1,31 @@ +From: Thomas Gleixner +Date: Sun, 19 Jul 2009 08:44:27 -0500 +Subject: fs: namespace preemption fix +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +On RT we cannot loop with preemption disabled here as +mnt_make_readonly() might have been preempted. We can safely enable +preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT +as well. + +Signed-off-by: Thomas Gleixner + +--- + fs/namespace.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -343,8 +343,11 @@ int __mnt_want_write(struct vfsmount *m) + * incremented count after it has set MNT_WRITE_HOLD. + */ + smp_mb(); +- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) ++ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { ++ preempt_enable(); + cpu_relax(); ++ preempt_disable(); ++ } + /* + * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will + * be set to match its requirements. So we must not load that until diff --git a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch new file mode 100644 index 000000000..7e1d20796 --- /dev/null +++ b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch @@ -0,0 +1,60 @@ +From: Mike Galbraith +Date: Fri, 3 Jul 2009 08:44:12 -0500 +Subject: fs: ntfs: disable interrupt only on !RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote: +> * Nick Piggin wrote: +> +> > > [10138.175796] [] show_trace+0x12/0x14 +> > > [10138.180291] [] dump_stack+0x16/0x18 +> > > [10138.184769] [] native_smp_call_function_mask+0x138/0x13d +> > > [10138.191117] [] smp_call_function+0x1e/0x24 +> > > [10138.196210] [] on_each_cpu+0x25/0x50 +> > > [10138.200807] [] flush_tlb_all+0x1e/0x20 +> > > [10138.205553] [] kmap_high+0x1b6/0x417 +> > > [10138.210118] [] kmap+0x4d/0x4f +> > > [10138.214102] [] ntfs_end_buffer_async_read+0x228/0x2f9 +> > > [10138.220163] [] end_bio_bh_io_sync+0x26/0x3f +> > > [10138.225352] [] bio_endio+0x42/0x6d +> > > [10138.229769] [] __end_that_request_first+0x115/0x4ac +> > > [10138.235682] [] end_that_request_chunk+0x8/0xa +> > > [10138.241052] [] ide_end_request+0x55/0x10a +> > > [10138.246058] [] ide_dma_intr+0x6f/0xac +> > > [10138.250727] [] ide_intr+0x93/0x1e0 +> > > [10138.255125] [] handle_IRQ_event+0x5c/0xc9 +> > +> > Looks like ntfs is kmap()ing from interrupt context. Should be using +> > kmap_atomic instead, I think. +> +> it's not atomic interrupt context but irq thread context - and -rt +> remaps kmap_atomic() to kmap() internally. + +Hm. Looking at the change to mm/bounce.c, perhaps I should do this +instead? + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + fs/ntfs/aops.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/fs/ntfs/aops.c ++++ b/fs/ntfs/aops.c +@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s + recs = PAGE_CACHE_SIZE / rec_size; + /* Should have been verified before we got here... */ + BUG_ON(!recs); +- local_irq_save(flags); ++ local_irq_save_nort(flags); + kaddr = kmap_atomic(page); + for (i = 0; i < recs; i++) + post_read_mst_fixup((NTFS_RECORD*)(kaddr + + i * rec_size), rec_size); + kunmap_atomic(kaddr); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + flush_dcache_page(page); + if (likely(page_uptodate && !PageError(page))) + SetPageUptodate(page); diff --git a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch new file mode 100644 index 000000000..8e73067b6 --- /dev/null +++ b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch @@ -0,0 +1,162 @@ +From: Thomas Gleixner +Date: Fri, 18 Mar 2011 09:18:52 +0100 +Subject: buffer_head: Replace bh_uptodate_lock for -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Wrap the bit_spin_lock calls into a separate inline and add the RT +replacements with a real spinlock. + +Signed-off-by: Thomas Gleixner +--- + fs/buffer.c | 21 +++++++-------------- + fs/ntfs/aops.c | 10 +++------- + include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++ + 3 files changed, 44 insertions(+), 21 deletions(-) + +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -322,8 +322,7 @@ static void end_buffer_async_read(struct + * decide that the page is now completely done. + */ + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -336,8 +335,7 @@ static void end_buffer_async_read(struct + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + + /* + * If none of the buffers had errors and they are all +@@ -349,9 +347,7 @@ static void end_buffer_async_read(struct + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /* +@@ -385,8 +381,7 @@ void end_buffer_async_write(struct buffe + } + + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + + clear_buffer_async_write(bh); + unlock_buffer(bh); +@@ -398,15 +393,12 @@ void end_buffer_async_write(struct buffe + } + tmp = tmp->b_this_page; + } +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + end_page_writeback(page); + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + EXPORT_SYMBOL(end_buffer_async_write); + +@@ -3336,6 +3328,7 @@ struct buffer_head *alloc_buffer_head(gf + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); + if (ret) { + INIT_LIST_HEAD(&ret->b_assoc_buffers); ++ buffer_head_init_locks(ret); + preempt_disable(); + __this_cpu_inc(bh_accounting.nr); + recalc_bh_state(); +--- a/fs/ntfs/aops.c ++++ b/fs/ntfs/aops.c +@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s + "0x%llx.", (unsigned long long)bh->b_blocknr); + } + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + /* + * If none of the buffers had errors then we can set the page uptodate, + * but we first have to perform the post read mst fixups, if the +@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s + unlock_page(page); + return; + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /** +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h +@@ -75,8 +75,42 @@ struct buffer_head { + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t b_uptodate_lock; ++#endif + }; + ++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) ++{ ++ unsigned long flags; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ local_irq_save(flags); ++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); ++#else ++ spin_lock_irqsave(&bh->b_uptodate_lock, flags); ++#endif ++ return flags; ++} ++ ++static inline void ++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) ++{ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); ++ local_irq_restore(flags); ++#else ++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); ++#endif ++} ++ ++static inline void buffer_head_init_locks(struct buffer_head *bh) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spin_lock_init(&bh->b_uptodate_lock); ++#endif ++} ++ + /* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. diff --git a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch new file mode 100644 index 000000000..2059a195a --- /dev/null +++ b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch @@ -0,0 +1,82 @@ +Subject: ftrace-migrate-disable-tracing.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:56:42 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/ftrace_event.h | 2 ++ + kernel/trace/trace.c | 11 +++++++---- + kernel/trace/trace_events.c | 1 + + kernel/trace/trace_output.c | 5 +++++ + 4 files changed, 15 insertions(+), 4 deletions(-) + +--- a/include/linux/ftrace_event.h ++++ b/include/linux/ftrace_event.h +@@ -57,6 +57,8 @@ struct trace_entry { + unsigned char flags; + unsigned char preempt_count; + int pid; ++ unsigned short migrate_disable; ++ unsigned short padding; + }; + + #define FTRACE_MAX_EVENT \ +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -462,7 +462,7 @@ int __trace_puts(unsigned long ip, const + + local_save_flags(irq_flags); + buffer = global_trace.trace_buffer.buffer; +- event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, ++ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, + irq_flags, preempt_count()); + if (!event) + return 0; +@@ -1552,6 +1552,8 @@ tracing_generic_entry_update(struct trac + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | + (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); ++ ++ entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0; + } + EXPORT_SYMBOL_GPL(tracing_generic_entry_update); + +@@ -2462,9 +2464,10 @@ static void print_lat_help_header(struct + seq_puts(m, "# | / _----=> need-resched \n"); + seq_puts(m, "# || / _---=> hardirq/softirq \n"); + seq_puts(m, "# ||| / _--=> preempt-depth \n"); +- seq_puts(m, "# |||| / delay \n"); +- seq_puts(m, "# cmd pid ||||| time | caller \n"); +- seq_puts(m, "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# |||| / _--=> migrate-disable\n"); ++ seq_puts(m, "# ||||| / delay \n"); ++ seq_puts(m, "# cmd pid |||||| time | caller \n"); ++ seq_puts(m, "# \\ / ||||| \\ | / \n"); + } + + static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -160,6 +160,7 @@ static int trace_define_common_fields(vo + __common_field(unsigned char, flags); + __common_field(unsigned char, preempt_count); + __common_field(int, pid); ++ __common_field(unsigned short, migrate_disable); + + return ret; + } +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -650,6 +650,11 @@ int trace_print_lat_fmt(struct trace_seq + else + ret = trace_seq_putc(s, '.'); + ++ if (entry->migrate_disable) ++ ret = trace_seq_printf(s, "%x", entry->migrate_disable); ++ else ++ ret = trace_seq_putc(s, '.'); ++ + return ret; + } + diff --git a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch new file mode 100644 index 000000000..9fa3edb31 --- /dev/null +++ b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch @@ -0,0 +1,113 @@ +From: Steven Rostedt +Subject: futex: Fix bug on when a requeued RT task times out +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Requeue with timeout causes a bug with PREEMPT_RT_FULL. + +The bug comes from a timed out condition. + + + TASK 1 TASK 2 + ------ ------ + futex_wait_requeue_pi() + futex_wait_queue_me() + + + double_lock_hb(); + + raw_spin_lock(pi_lock); + if (current->pi_blocked_on) { + } else { + current->pi_blocked_on = PI_WAKE_INPROGRESS; + run_spin_unlock(pi_lock); + spin_lock(hb->lock); <-- blocked! + + + plist_for_each_entry_safe(this) { + rt_mutex_start_proxy_lock(); + task_blocks_on_rt_mutex(); + BUG_ON(task->pi_blocked_on)!!!! + +The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the +problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to +grab the hb->lock, which it fails to do so. As the hb->lock is a mutex, +it will block and set the "pi_blocked_on" to the hb->lock. + +When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails +because the task1's pi_blocked_on is no longer set to that, but instead, +set to the hb->lock. + +The fix: + +When calling rt_mutex_start_proxy_lock() a check is made to see +if the proxy tasks pi_blocked_on is set. If so, exit out early. +Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies +the proxy task that it is being requeued, and will handle things +appropriately. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner +--- + kernel/locking/rtmutex.c | 32 +++++++++++++++++++++++++++++++- + kernel/locking/rtmutex_common.h | 1 + + 2 files changed, 32 insertions(+), 1 deletion(-) + +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -71,7 +71,8 @@ static void fixup_rt_mutex_waiters(struc + + static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) + { +- return waiter && waiter != PI_WAKEUP_INPROGRESS; ++ return waiter && waiter != PI_WAKEUP_INPROGRESS && ++ waiter != PI_REQUEUE_INPROGRESS; + } + + /* +@@ -1110,6 +1111,35 @@ int rt_mutex_start_proxy_lock(struct rt_ + return 1; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * In PREEMPT_RT there's an added race. ++ * If the task, that we are about to requeue, times out, ++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue ++ * to skip this task. But right after the task sets ++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then ++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. ++ * This will replace the PI_WAKEUP_INPROGRESS with the actual ++ * lock that it blocks on. We *must not* place this task ++ * on this proxy lock in that case. ++ * ++ * To prevent this race, we first take the task's pi_lock ++ * and check if it has updated its pi_blocked_on. If it has, ++ * we assume that it woke up and we return -EAGAIN. ++ * Otherwise, we set the task's pi_blocked_on to ++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up ++ * it will know that we are in the process of requeuing it. ++ */ ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->pi_blocked_on) { ++ raw_spin_unlock_irq(&task->pi_lock); ++ raw_spin_unlock(&lock->wait_lock); ++ return -EAGAIN; ++ } ++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; ++ raw_spin_unlock_irq(&task->pi_lock); ++#endif ++ + ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); + + if (ret && !rt_mutex_owner(lock)) { +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -105,6 +105,7 @@ static inline struct task_struct *rt_mut + * PI-futex support (proxy locking functions, etc.): + */ + #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) + + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, diff --git a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch new file mode 100644 index 000000000..b018326a2 --- /dev/null +++ b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch @@ -0,0 +1,38 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:57 -0500 +Subject: genirq: disable irqpoll on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Creates long latencies for no value + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/irq/spurious.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/kernel/irq/spurious.c ++++ b/kernel/irq/spurious.c +@@ -346,6 +346,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir + + static int __init irqfixup_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); +@@ -358,6 +362,10 @@ module_param(irqfixup, int, 0644); + + static int __init irqpoll_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); diff --git a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch new file mode 100644 index 000000000..ba5e2d016 --- /dev/null +++ b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch @@ -0,0 +1,146 @@ +From 76666dbbdd40e963e7df84c123fc9aea4a2bcc69 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 21 Aug 2013 17:48:46 +0200 +Subject: [PATCH] genirq: do not invoke the affinity callback via a workqueue +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Joe Korty reported, that __irq_set_affinity_locked() schedules a +workqueue while holding a rawlock which results in a might_sleep() +warning. +This patch moves the invokation into a process context so that we only +wakeup() a process while holding the lock. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/interrupt.h | 1 + kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++-- + 2 files changed, 77 insertions(+), 3 deletions(-) + +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -224,6 +224,7 @@ struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; ++ struct list_head list; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); + }; +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -164,6 +164,62 @@ int irq_do_set_affinity(struct irq_data + return ret; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void _irq_affinity_notify(struct irq_affinity_notify *notify); ++static struct task_struct *set_affinity_helper; ++static LIST_HEAD(affinity_list); ++static DEFINE_RAW_SPINLOCK(affinity_list_lock); ++ ++static int set_affinity_thread(void *unused) ++{ ++ while (1) { ++ struct irq_affinity_notify *notify; ++ int empty; ++ ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ raw_spin_lock_irq(&affinity_list_lock); ++ empty = list_empty(&affinity_list); ++ raw_spin_unlock_irq(&affinity_list_lock); ++ ++ if (empty) ++ schedule(); ++ if (kthread_should_stop()) ++ break; ++ set_current_state(TASK_RUNNING); ++try_next: ++ notify = NULL; ++ ++ raw_spin_lock_irq(&affinity_list_lock); ++ if (!list_empty(&affinity_list)) { ++ notify = list_first_entry(&affinity_list, ++ struct irq_affinity_notify, list); ++ list_del_init(¬ify->list); ++ } ++ raw_spin_unlock_irq(&affinity_list_lock); ++ ++ if (!notify) ++ continue; ++ _irq_affinity_notify(notify); ++ goto try_next; ++ } ++ return 0; ++} ++ ++static void init_helper_thread(void) ++{ ++ if (set_affinity_helper) ++ return; ++ set_affinity_helper = kthread_run(set_affinity_thread, NULL, ++ "affinity-cb"); ++ WARN_ON(IS_ERR(set_affinity_helper)); ++} ++#else ++ ++static inline void init_helper_thread(void) { } ++ ++#endif ++ + int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) + { + struct irq_chip *chip = irq_data_get_irq_chip(data); +@@ -182,7 +238,17 @@ int __irq_set_affinity_locked(struct irq + + if (desc->affinity_notify) { + kref_get(&desc->affinity_notify->kref); ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ raw_spin_lock(&affinity_list_lock); ++ if (list_empty(&desc->affinity_notify->list)) ++ list_add_tail(&affinity_list, ++ &desc->affinity_notify->list); ++ raw_spin_unlock(&affinity_list_lock); ++ wake_up_process(set_affinity_helper); ++#else + schedule_work(&desc->affinity_notify->work); ++#endif + } + irqd_set(data, IRQD_AFFINITY_SET); + +@@ -223,10 +289,8 @@ int irq_set_affinity_hint(unsigned int i + } + EXPORT_SYMBOL_GPL(irq_set_affinity_hint); + +-static void irq_affinity_notify(struct work_struct *work) ++static void _irq_affinity_notify(struct irq_affinity_notify *notify) + { +- struct irq_affinity_notify *notify = +- container_of(work, struct irq_affinity_notify, work); + struct irq_desc *desc = irq_to_desc(notify->irq); + cpumask_var_t cpumask; + unsigned long flags; +@@ -248,6 +312,13 @@ static void irq_affinity_notify(struct w + kref_put(¬ify->kref, notify->release); + } + ++static void irq_affinity_notify(struct work_struct *work) ++{ ++ struct irq_affinity_notify *notify = ++ container_of(work, struct irq_affinity_notify, work); ++ _irq_affinity_notify(notify); ++} ++ + /** + * irq_set_affinity_notifier - control notification of IRQ affinity changes + * @irq: Interrupt for which to enable/disable notification +@@ -277,6 +348,8 @@ irq_set_affinity_notifier(unsigned int i + notify->irq = irq; + kref_init(¬ify->kref); + INIT_WORK(¬ify->work, irq_affinity_notify); ++ INIT_LIST_HEAD(¬ify->list); ++ init_helper_thread(); + } + + raw_spin_lock_irqsave(&desc->lock, flags); diff --git a/debian/patches/features/all/rt/genirq-force-threading.patch b/debian/patches/features/all/rt/genirq-force-threading.patch new file mode 100644 index 000000000..6068c80a1 --- /dev/null +++ b/debian/patches/features/all/rt/genirq-force-threading.patch @@ -0,0 +1,46 @@ +Subject: genirq-force-threading.patch +From: Thomas Gleixner +Date: Sun, 03 Apr 2011 11:57:29 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/interrupt.h | 6 +++++- + kernel/irq/manage.c | 2 ++ + 2 files changed, 7 insertions(+), 1 deletion(-) + +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -318,9 +318,13 @@ static inline int disable_irq_wake(unsig + + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + extern bool force_irqthreads; ++# else ++# define force_irqthreads (true) ++# endif + #else +-#define force_irqthreads (0) ++#define force_irqthreads (false) + #endif + + #ifndef __ARCH_SET_SOFTIRQ_PENDING +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -22,6 +22,7 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + __read_mostly bool force_irqthreads; + + static int __init setup_forced_irqthreads(char *arg) +@@ -30,6 +31,7 @@ static int __init setup_forced_irqthread + return 0; + } + early_param("threadirqs", setup_forced_irqthreads); ++# endif + #endif + + /** diff --git a/debian/patches/features/all/rt/genirq-nodebug-shirq.patch b/debian/patches/features/all/rt/genirq-nodebug-shirq.patch new file mode 100644 index 000000000..e6a894d85 --- /dev/null +++ b/debian/patches/features/all/rt/genirq-nodebug-shirq.patch @@ -0,0 +1,21 @@ +From: Thomas Gleixner +Date: Sun, 18 Mar 2011 10:22:04 +0100 +Subject: genirq: Disable DEBUG_SHIRQ for rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + lib/Kconfig.debug | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -606,7 +606,7 @@ endmenu # "Memory Debugging" + + config DEBUG_SHIRQ + bool "Debug shared IRQ handlers" +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && !PREEMPT_RT_BASE + help + Enable this to generate a spurious interrupt as soon as a shared + interrupt handler is registered, and just before one is deregistered. diff --git a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch new file mode 100644 index 000000000..a03d5fbde --- /dev/null +++ b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch @@ -0,0 +1,205 @@ +Subject: hotplug: Lightweight get online cpus +From: Thomas Gleixner +Date: Wed, 15 Jun 2011 12:36:06 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +get_online_cpus() is a heavy weight function which involves a global +mutex. migrate_disable() wants a simpler construct which prevents only +a CPU from going doing while a task is in a migrate disabled section. + +Implement a per cpu lockless mechanism, which serializes only in the +real unplug case on a global mutex. That serialization affects only +tasks on the cpu which should be brought down. + +Signed-off-by: Thomas Gleixner +--- + include/linux/cpu.h | 4 + + kernel/cpu.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++++++- + 2 files changed, 124 insertions(+), 2 deletions(-) + +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -182,6 +182,8 @@ extern void get_online_cpus(void); + extern void put_online_cpus(void); + extern void cpu_hotplug_disable(void); + extern void cpu_hotplug_enable(void); ++extern void pin_current_cpu(void); ++extern void unpin_current_cpu(void); + #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) + #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) + #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) +@@ -196,6 +198,8 @@ static inline void cpu_hotplug_done(void + #define put_online_cpus() do { } while (0) + #define cpu_hotplug_disable() do { } while (0) + #define cpu_hotplug_enable() do { } while (0) ++static inline void pin_current_cpu(void) { } ++static inline void unpin_current_cpu(void) { } + #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + /* These aren't inline functions due to a GCC bug. */ + #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -63,6 +63,101 @@ static struct { + .refcount = 0, + }; + ++struct hotplug_pcp { ++ struct task_struct *unplug; ++ int refcount; ++ struct completion synced; ++}; ++ ++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); ++ ++/** ++ * pin_current_cpu - Prevent the current cpu from being unplugged ++ * ++ * Lightweight version of get_online_cpus() to prevent cpu from being ++ * unplugged when code runs in a migration disabled region. ++ * ++ * Must be called with preemption disabled (preempt_count = 1)! ++ */ ++void pin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); ++ ++retry: ++ if (!hp->unplug || hp->refcount || preempt_count() > 1 || ++ hp->unplug == current) { ++ hp->refcount++; ++ return; ++ } ++ preempt_enable(); ++ mutex_lock(&cpu_hotplug.lock); ++ mutex_unlock(&cpu_hotplug.lock); ++ preempt_disable(); ++ goto retry; ++} ++ ++/** ++ * unpin_current_cpu - Allow unplug of current cpu ++ * ++ * Must be called with preemption or interrupts disabled! ++ */ ++void unpin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); ++ ++ WARN_ON(hp->refcount <= 0); ++ ++ /* This is safe. sync_unplug_thread is pinned to this cpu */ ++ if (!--hp->refcount && hp->unplug && hp->unplug != current) ++ wake_up_process(hp->unplug); ++} ++ ++/* ++ * FIXME: Is this really correct under all circumstances ? ++ */ ++static int sync_unplug_thread(void *data) ++{ ++ struct hotplug_pcp *hp = data; ++ ++ preempt_disable(); ++ hp->unplug = current; ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (hp->refcount) { ++ schedule_preempt_disabled(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ set_current_state(TASK_RUNNING); ++ preempt_enable(); ++ complete(&hp->synced); ++ return 0; ++} ++ ++/* ++ * Start the sync_unplug_thread on the target cpu and wait for it to ++ * complete. ++ */ ++static int cpu_unplug_begin(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ struct task_struct *tsk; ++ ++ init_completion(&hp->synced); ++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu); ++ if (IS_ERR(tsk)) ++ return (PTR_ERR(tsk)); ++ kthread_bind(tsk, cpu); ++ wake_up_process(tsk); ++ wait_for_completion(&hp->synced); ++ return 0; ++} ++ ++static void cpu_unplug_done(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ hp->unplug = NULL; ++} ++ + void get_online_cpus(void) + { + might_sleep(); +@@ -282,13 +377,14 @@ static int __ref take_cpu_down(void *_pa + /* Requires cpu_add_remove_lock to be held */ + static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) + { +- int err, nr_calls = 0; ++ int mycpu, err, nr_calls = 0; + void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + struct take_cpu_down_param tcd_param = { + .mod = mod, + .hcpu = hcpu, + }; ++ cpumask_var_t cpumask; + + if (num_online_cpus() == 1) + return -EBUSY; +@@ -296,7 +392,27 @@ static int __ref _cpu_down(unsigned int + if (!cpu_online(cpu)) + return -EINVAL; + ++ /* Move the downtaker off the unplug cpu */ ++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) ++ return -ENOMEM; ++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); ++ set_cpus_allowed_ptr(current, cpumask); ++ free_cpumask_var(cpumask); ++ preempt_disable(); ++ mycpu = smp_processor_id(); ++ if (mycpu == cpu) { ++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); ++ preempt_enable(); ++ return -EBUSY; ++ } ++ preempt_enable(); ++ + cpu_hotplug_begin(); ++ err = cpu_unplug_begin(cpu); ++ if (err) { ++ printk("cpu_unplug_begin(%d) failed\n", cpu); ++ goto out_cancel; ++ } + + err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); + if (err) { +@@ -333,7 +449,7 @@ static int __ref _cpu_down(unsigned int + /* CPU didn't die: tell everyone. Can't complain. */ + smpboot_unpark_threads(cpu); + cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); +- goto out_release; ++ goto out_cancel; + } + BUG_ON(cpu_online(cpu)); + +@@ -356,6 +472,8 @@ static int __ref _cpu_down(unsigned int + check_for_tasks(cpu); + + out_release: ++ cpu_unplug_done(cpu); ++out_cancel: + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch new file mode 100644 index 000000000..9894ee487 --- /dev/null +++ b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch @@ -0,0 +1,25 @@ +Subject: hotplug: sync_unplug: No "\n" in task name +From: Yong Zhang +Date: Sun, 16 Oct 2011 18:56:43 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Otherwise the output will look a little odd. + +Signed-off-by: Yong Zhang +Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -142,7 +142,7 @@ static int cpu_unplug_begin(unsigned int + struct task_struct *tsk; + + init_completion(&hp->synced); +- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu); ++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); + if (IS_ERR(tsk)) + return (PTR_ERR(tsk)); + kthread_bind(tsk, cpu); diff --git a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch new file mode 100644 index 000000000..43115e983 --- /dev/null +++ b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch @@ -0,0 +1,37 @@ +Subject: hotplug-use-migrate-disable.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 19:35:29 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -400,14 +400,13 @@ static int __ref _cpu_down(unsigned int + cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); + set_cpus_allowed_ptr(current, cpumask); + free_cpumask_var(cpumask); +- preempt_disable(); ++ migrate_disable(); + mycpu = smp_processor_id(); + if (mycpu == cpu) { + printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); +- preempt_enable(); ++ migrate_enable(); + return -EBUSY; + } +- preempt_enable(); + + cpu_hotplug_begin(); + err = cpu_unplug_begin(cpu); +@@ -476,6 +475,7 @@ static int __ref _cpu_down(unsigned int + out_release: + cpu_unplug_done(cpu); + out_cancel: ++ migrate_enable(); + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch new file mode 100644 index 000000000..1ae31c0be --- /dev/null +++ b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch @@ -0,0 +1,119 @@ +From 180cdb93d796bf52c919f5e3df30af83aa6d46ca Mon Sep 17 00:00:00 2001 +From: Yang Shi +Date: Mon, 16 Sep 2013 14:09:19 -0700 +Subject: [PATCH] hrtimer: Move schedule_work call to helper thread +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +When run ltp leapsec_timer test, the following call trace is caught: + +BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 +in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: swapper/1 +Preemption disabled at:[] cpu_startup_entry+0x133/0x310 + +CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.10.10-rt3 #2 +Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 +ffffffff81c2f800 ffff880076843e40 ffffffff8169918d ffff880076843e58 +ffffffff8106db31 ffff88007684b4a0 ffff880076843e70 ffffffff8169d9c0 +ffff88007684b4a0 ffff880076843eb0 ffffffff81059da1 0000001876851200 +Call Trace: + [] dump_stack+0x19/0x1b +[] __might_sleep+0xf1/0x170 +[] rt_spin_lock+0x20/0x50 +[] queue_work_on+0x61/0x100 +[] clock_was_set_delayed+0x21/0x30 +[] do_timer+0x40e/0x660 +[] tick_do_update_jiffies64+0xf7/0x140 +[] tick_check_idle+0x92/0xc0 +[] irq_enter+0x57/0x70 +[] smp_apic_timer_interrupt+0x3e/0x9b +[] apic_timer_interrupt+0x6a/0x70 + [] ? cpuidle_enter_state+0x4c/0xc0 +[] cpuidle_idle_call+0xd8/0x2d0 +[] arch_cpu_idle+0xe/0x30 +[] cpu_startup_entry+0x19e/0x310 +[] start_secondary+0x1ad/0x1b0 + +The clock_was_set_delayed is called in hard IRQ handler (timer interrupt), which +calls schedule_work. + +Under PREEMPT_RT_FULL, schedule_work calls spinlocks which could sleep, so it's +not safe to call schedule_work in interrupt context. + +Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca +(rt,ntp: Move call to schedule_delayed_work() to helper thread) +from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which +makes a similar change. + +add a helper thread which does the call to schedule_work and wake up that +thread instead of calling schedule_work directly. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Yang Shi +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/hrtimer.c | 40 ++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 40 insertions(+) + +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -48,6 +48,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -742,6 +743,44 @@ static void clock_was_set_work(struct wo + + static DECLARE_WORK(hrtimer_work, clock_was_set_work); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT can not call schedule_work from real interrupt context. ++ * Need to make a thread to do the real work. ++ */ ++static struct task_struct *clock_set_delay_thread; ++static bool do_clock_set_delay; ++ ++static int run_clock_set_delay(void *ignore) ++{ ++ while (!kthread_should_stop()) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (do_clock_set_delay) { ++ do_clock_set_delay = false; ++ schedule_work(&hrtimer_work); ++ } ++ schedule(); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++void clock_was_set_delayed(void) ++{ ++ do_clock_set_delay = true; ++ /* Make visible before waking up process */ ++ smp_wmb(); ++ wake_up_process(clock_set_delay_thread); ++} ++ ++static __init int create_clock_set_delay_thread(void) ++{ ++ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd"); ++ BUG_ON(!clock_set_delay_thread); ++ return 0; ++} ++early_initcall(create_clock_set_delay_thread); ++#else /* PREEMPT_RT_FULL */ + /* + * Called from timekeeping and resume code to reprogramm the hrtimer + * interrupt device on all cpus. +@@ -750,6 +789,7 @@ void clock_was_set_delayed(void) + { + schedule_work(&hrtimer_work); + } ++#endif + + #else + diff --git a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch new file mode 100644 index 000000000..b4716ecab --- /dev/null +++ b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch @@ -0,0 +1,453 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:44:31 -0500 +Subject: hrtimer: fixup hrtimer callback changes for preempt-rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +In preempt-rt we can not call the callbacks which take sleeping locks +from the timer interrupt context. + +Bring back the softirq split for now, until we fixed the signal +delivery problem for real. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Ingo Molnar + +--- + include/linux/hrtimer.h | 3 + kernel/hrtimer.c | 216 ++++++++++++++++++++++++++++++++++++++++------- + kernel/sched/core.c | 1 + kernel/sched/rt.c | 1 + kernel/time/tick-sched.c | 1 + kernel/watchdog.c | 1 + 6 files changed, 195 insertions(+), 28 deletions(-) + +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -111,6 +111,8 @@ struct hrtimer { + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + unsigned long state; ++ struct list_head cb_entry; ++ int irqsafe; + #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + ktime_t praecox; + #endif +@@ -150,6 +152,7 @@ struct hrtimer_clock_base { + int index; + clockid_t clockid; + struct timerqueue_head active; ++ struct list_head expired; + ktime_t resolution; + ktime_t (*get_time)(void); + ktime_t softirq_time; +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -609,8 +609,7 @@ static int hrtimer_reprogram(struct hrti + * When the callback is running, we do not reprogram the clock event + * device. The timer callback is either running on a different CPU or + * the callback is executed in the hrtimer_interrupt context. The +- * reprogramming is handled either by the softirq, which called the +- * callback or at the end of the hrtimer_interrupt. ++ * reprogramming is handled at the end of the hrtimer_interrupt. + */ + if (hrtimer_callback_running(timer)) + return 0; +@@ -645,6 +644,9 @@ static int hrtimer_reprogram(struct hrti + return res; + } + ++static void __run_hrtimer(struct hrtimer *timer, ktime_t *now); ++static int hrtimer_rt_defer(struct hrtimer *timer); ++ + /* + * Initialize the high resolution related parts of cpu_base + */ +@@ -661,9 +663,18 @@ static inline void hrtimer_init_hres(str + * and expiry check is done in the hrtimer_interrupt or in the softirq. + */ + static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) ++ struct hrtimer_clock_base *base, ++ int wakeup) + { +- return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); ++ if (!(base->cpu_base->hres_active && hrtimer_reprogram(timer, base))) ++ return 0; ++ if (!wakeup) ++ return -ETIME; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ if (!hrtimer_rt_defer(timer)) ++ return -ETIME; ++#endif ++ return 1; + } + + static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) +@@ -748,12 +759,18 @@ static inline int hrtimer_switch_to_hres + static inline void + hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } + static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) ++ struct hrtimer_clock_base *base, ++ int wakeup) + { + return 0; + } + static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } + static inline void retrigger_next_event(void *arg) { } ++static inline int hrtimer_reprogram(struct hrtimer *timer, ++ struct hrtimer_clock_base *base) ++{ ++ return 0; ++} + + #endif /* CONFIG_HIGH_RES_TIMERS */ + +@@ -889,9 +906,9 @@ void hrtimer_wait_for_timer(const struct + { + struct hrtimer_clock_base *base = timer->base; + +- if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base)) ++ if (base && base->cpu_base && !timer->irqsafe) + wait_event(base->cpu_base->wait, +- !(timer->state & HRTIMER_STATE_CALLBACK)); ++ !(timer->state & HRTIMER_STATE_CALLBACK)); + } + + #else +@@ -941,6 +958,11 @@ static void __remove_hrtimer(struct hrti + if (!(timer->state & HRTIMER_STATE_ENQUEUED)) + goto out; + ++ if (unlikely(!list_empty(&timer->cb_entry))) { ++ list_del_init(&timer->cb_entry); ++ goto out; ++ } ++ + next_timer = timerqueue_getnext(&base->active); + timerqueue_del(&base->active, &timer->node); + if (&timer->node == next_timer) { +@@ -1048,9 +1070,19 @@ int __hrtimer_start_range_ns(struct hrti + * + * XXX send_remote_softirq() ? + */ +- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) +- && hrtimer_enqueue_reprogram(timer, new_base)) { +- if (wakeup) { ++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) { ++ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); ++ if (ret < 0) { ++ /* ++ * In case we failed to reprogram the timer (mostly ++ * because out current timer is already elapsed), ++ * remove it again and report a failure. This avoids ++ * stale base->first entries. ++ */ ++ debug_deactivate(timer); ++ __remove_hrtimer(timer, new_base, ++ timer->state & HRTIMER_STATE_CALLBACK, 0); ++ } else if (ret > 0) { + /* + * We need to drop cpu_base->lock to avoid a + * lock ordering issue vs. rq->lock. +@@ -1058,9 +1090,7 @@ int __hrtimer_start_range_ns(struct hrti + raw_spin_unlock(&new_base->cpu_base->lock); + raise_softirq_irqoff(HRTIMER_SOFTIRQ); + local_irq_restore(flags); +- return ret; +- } else { +- __raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ return 0; + } + } + +@@ -1229,6 +1259,7 @@ static void __hrtimer_init(struct hrtime + + base = hrtimer_clockid_to_base(clock_id); + timer->base = &cpu_base->clock_base[base]; ++ INIT_LIST_HEAD(&timer->cb_entry); + timerqueue_init(&timer->node); + + #ifdef CONFIG_TIMER_STATS +@@ -1312,10 +1343,128 @@ static void __run_hrtimer(struct hrtimer + timer->state &= ~HRTIMER_STATE_CALLBACK; + } + +-#ifdef CONFIG_HIGH_RES_TIMERS +- + static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, ++ struct hrtimer_clock_base *base) ++{ ++ /* ++ * Note, we clear the callback flag before we requeue the ++ * timer otherwise we trigger the callback_running() check ++ * in hrtimer_reprogram(). ++ */ ++ timer->state &= ~HRTIMER_STATE_CALLBACK; ++ ++ if (restart != HRTIMER_NORESTART) { ++ BUG_ON(hrtimer_active(timer)); ++ /* ++ * Enqueue the timer, if it's the leftmost timer then ++ * we need to reprogram it. ++ */ ++ if (!enqueue_hrtimer(timer, base)) ++ return; ++ ++#ifndef CONFIG_HIGH_RES_TIMERS ++ } ++#else ++ if (base->cpu_base->hres_active && ++ hrtimer_reprogram(timer, base)) ++ goto requeue; ++ ++ } else if (hrtimer_active(timer)) { ++ /* ++ * If the timer was rearmed on another CPU, reprogram ++ * the event device. ++ */ ++ if (&timer->node == base->active.next && ++ base->cpu_base->hres_active && ++ hrtimer_reprogram(timer, base)) ++ goto requeue; ++ } ++ return; ++ ++requeue: ++ /* ++ * Timer is expired. Thus move it from tree to pending list ++ * again. ++ */ ++ __remove_hrtimer(timer, base, timer->state, 0); ++ list_add_tail(&timer->cb_entry, &base->expired); ++#endif ++} ++ ++/* ++ * The changes in mainline which removed the callback modes from ++ * hrtimer are not yet working with -rt. The non wakeup_process() ++ * based callbacks which involve sleeping locks need to be treated ++ * seperately. ++ */ ++static void hrtimer_rt_run_pending(void) ++{ ++ enum hrtimer_restart (*fn)(struct hrtimer *); ++ struct hrtimer_cpu_base *cpu_base; ++ struct hrtimer_clock_base *base; ++ struct hrtimer *timer; ++ int index, restart; ++ ++ local_irq_disable(); ++ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); ++ ++ raw_spin_lock(&cpu_base->lock); ++ ++ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { ++ base = &cpu_base->clock_base[index]; ++ ++ while (!list_empty(&base->expired)) { ++ timer = list_first_entry(&base->expired, ++ struct hrtimer, cb_entry); ++ ++ /* ++ * Same as the above __run_hrtimer function ++ * just we run with interrupts enabled. ++ */ ++ debug_hrtimer_deactivate(timer); ++ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); ++ timer_stats_account_hrtimer(timer); ++ fn = timer->function; ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ restart = fn(timer); ++ raw_spin_lock_irq(&cpu_base->lock); ++ ++ hrtimer_rt_reprogram(restart, timer, base); ++ } ++ } ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ ++ wake_up_timer_waiters(cpu_base); ++} ++ ++static int hrtimer_rt_defer(struct hrtimer *timer) ++{ ++ if (timer->irqsafe) ++ return 0; ++ ++ __remove_hrtimer(timer, timer->base, timer->state, 0); ++ list_add_tail(&timer->cb_entry, &timer->base->expired); ++ return 1; ++} ++ ++#else ++ ++static inline void hrtimer_rt_run_pending(void) ++{ ++ hrtimer_peek_ahead_timers(); ++} ++ ++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } ++ ++#endif ++ ++#ifdef CONFIG_HIGH_RES_TIMERS ++ + /* + * High resolution timer interrupt + * Called with interrupts disabled +@@ -1324,7 +1473,7 @@ void hrtimer_interrupt(struct clock_even + { + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + ktime_t expires_next, now, entry_time, delta; +- int i, retries = 0; ++ int i, retries = 0, raise = 0; + + BUG_ON(!cpu_base->hres_active); + cpu_base->nr_events++; +@@ -1393,7 +1542,10 @@ void hrtimer_interrupt(struct clock_even + break; + } + +- __run_hrtimer(timer, &basenow); ++ if (!hrtimer_rt_defer(timer)) ++ __run_hrtimer(timer, &basenow); ++ else ++ raise = 1; + } + } + +@@ -1408,6 +1560,10 @@ void hrtimer_interrupt(struct clock_even + if (expires_next.tv64 == KTIME_MAX || + !tick_program_event(expires_next, 0)) { + cpu_base->hang_detected = 0; ++ ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ + return; + } + +@@ -1487,18 +1643,18 @@ void hrtimer_peek_ahead_timers(void) + __hrtimer_peek_ahead_timers(); + local_irq_restore(flags); + } +- +-static void run_hrtimer_softirq(struct softirq_action *h) +-{ +- hrtimer_peek_ahead_timers(); +-} +- + #else /* CONFIG_HIGH_RES_TIMERS */ + + static inline void __hrtimer_peek_ahead_timers(void) { } + + #endif /* !CONFIG_HIGH_RES_TIMERS */ + ++ ++static void run_hrtimer_softirq(struct softirq_action *h) ++{ ++ hrtimer_rt_run_pending(); ++} ++ + /* + * Called from timer softirq every jiffy, expire hrtimers: + * +@@ -1531,7 +1687,7 @@ void hrtimer_run_queues(void) + struct timerqueue_node *node; + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + struct hrtimer_clock_base *base; +- int index, gettime = 1; ++ int index, gettime = 1, raise = 0; + + if (hrtimer_hres_active()) + return; +@@ -1556,12 +1712,16 @@ void hrtimer_run_queues(void) + hrtimer_get_expires_tv64(timer)) + break; + +- __run_hrtimer(timer, &base->softirq_time); ++ if (!hrtimer_rt_defer(timer)) ++ __run_hrtimer(timer, &base->softirq_time); ++ else ++ raise = 1; + } + raw_spin_unlock(&cpu_base->lock); + } + +- wake_up_timer_waiters(cpu_base); ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); + } + + /* +@@ -1583,6 +1743,7 @@ static enum hrtimer_restart hrtimer_wake + void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) + { + sl->timer.function = hrtimer_wakeup; ++ sl->timer.irqsafe = 1; + sl->task = task; + } + EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); +@@ -1719,6 +1880,7 @@ static void init_hrtimers_cpu(int cpu) + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { + cpu_base->clock_base[i].cpu_base = cpu_base; + timerqueue_init_head(&cpu_base->clock_base[i].active); ++ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); + } + + hrtimer_init_hres(cpu_base); +@@ -1837,9 +1999,7 @@ void __init hrtimers_init(void) + hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + register_cpu_notifier(&hrtimers_nb); +-#ifdef CONFIG_HIGH_RES_TIMERS + open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); +-#endif + } + + /** +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -489,6 +489,7 @@ static void init_rq_hrtick(struct rq *rq + + hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rq->hrtick_timer.function = hrtick; ++ rq->hrtick_timer.irqsafe = 1; + } + #else /* CONFIG_SCHED_HRTICK */ + static inline void hrtick_clear(struct rq *rq) +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -43,6 +43,7 @@ void init_rt_bandwidth(struct rt_bandwid + + hrtimer_init(&rt_b->rt_period_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rt_b->rt_period_timer.irqsafe = 1; + rt_b->rt_period_timer.function = sched_rt_period_timer; + } + +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -1111,6 +1111,7 @@ void tick_setup_sched_timer(void) + * Emulate tick processing via per-CPU hrtimers: + */ + hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ ts->sched_timer.irqsafe = 1; + ts->sched_timer.function = tick_sched_timer; + + /* Get the next period (per cpu) */ +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -357,6 +357,7 @@ static void watchdog_enable(unsigned int + /* kick off the timer for the hardlockup detector */ + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; ++ hrtimer->irqsafe = 1; + + /* Enable the perf event */ + watchdog_nmi_enable(cpu); diff --git a/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch b/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch new file mode 100644 index 000000000..f8d247dd3 --- /dev/null +++ b/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch @@ -0,0 +1,38 @@ +Subject: hrtimer: Raise softirq if hrtimer irq stalled +From: Watanabe +Date: Sun, 28 Oct 2012 11:13:44 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +When the hrtimer stall detection hits the softirq is not raised. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + kernel/hrtimer.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1560,11 +1560,7 @@ void hrtimer_interrupt(struct clock_even + if (expires_next.tv64 == KTIME_MAX || + !tick_program_event(expires_next, 0)) { + cpu_base->hang_detected = 0; +- +- if (raise) +- raise_softirq_irqoff(HRTIMER_SOFTIRQ); +- +- return; ++ goto out; + } + + /* +@@ -1608,6 +1604,9 @@ void hrtimer_interrupt(struct clock_even + tick_program_event(expires_next, 1); + printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", + ktime_to_ns(delta)); ++out: ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); + } + + /* diff --git a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch new file mode 100644 index 000000000..88f183b04 --- /dev/null +++ b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch @@ -0,0 +1,196 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:34 -0500 +Subject: hrtimers: prepare full preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Make cancellation of a running callback in softirq context safe +against preemption. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/linux/hrtimer.h | 10 ++++++++++ + kernel/hrtimer.c | 33 ++++++++++++++++++++++++++++++++- + kernel/itimer.c | 1 + + kernel/posix-timers.c | 33 +++++++++++++++++++++++++++++++++ + 4 files changed, 76 insertions(+), 1 deletion(-) + +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -193,6 +193,9 @@ struct hrtimer_cpu_base { + unsigned long nr_hangs; + ktime_t max_hang_time; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ wait_queue_head_t wait; ++#endif + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + }; + +@@ -388,6 +391,13 @@ static inline int hrtimer_restart(struct + return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + } + ++/* Softirq preemption could deadlock timer removal */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); ++#else ++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) ++#endif ++ + /* Query timers: */ + extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); + extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -872,6 +872,32 @@ u64 hrtimer_forward(struct hrtimer *time + } + EXPORT_SYMBOL_GPL(hrtimer_forward); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define wake_up_timer_waiters(b) wake_up(&(b)->wait) ++ ++/** ++ * hrtimer_wait_for_timer - Wait for a running timer ++ * ++ * @timer: timer to wait for ++ * ++ * The function waits in case the timers callback function is ++ * currently executed on the waitqueue of the timer base. The ++ * waitqueue is woken up after the timer callback function has ++ * finished execution. ++ */ ++void hrtimer_wait_for_timer(const struct hrtimer *timer) ++{ ++ struct hrtimer_clock_base *base = timer->base; ++ ++ if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base)) ++ wait_event(base->cpu_base->wait, ++ !(timer->state & HRTIMER_STATE_CALLBACK)); ++} ++ ++#else ++# define wake_up_timer_waiters(b) do { } while (0) ++#endif ++ + /* + * enqueue_hrtimer - internal function to (re)start a timer + * +@@ -1124,7 +1150,7 @@ int hrtimer_cancel(struct hrtimer *timer + + if (ret >= 0) + return ret; +- cpu_relax(); ++ hrtimer_wait_for_timer(timer); + } + } + EXPORT_SYMBOL_GPL(hrtimer_cancel); +@@ -1534,6 +1560,8 @@ void hrtimer_run_queues(void) + } + raw_spin_unlock(&cpu_base->lock); + } ++ ++ wake_up_timer_waiters(cpu_base); + } + + /* +@@ -1694,6 +1722,9 @@ static void init_hrtimers_cpu(int cpu) + } + + hrtimer_init_hres(cpu_base); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ init_waitqueue_head(&cpu_base->wait); ++#endif + } + + #ifdef CONFIG_HOTPLUG_CPU +--- a/kernel/itimer.c ++++ b/kernel/itimer.c +@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime + /* We are sharing ->siglock with it_real_fn() */ + if (hrtimer_try_to_cancel(timer) < 0) { + spin_unlock_irq(&tsk->sighand->siglock); ++ hrtimer_wait_for_timer(&tsk->signal->real_timer); + goto again; + } + expires = timeval_to_ktime(value->it_value); +--- a/kernel/posix-timers.c ++++ b/kernel/posix-timers.c +@@ -818,6 +818,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_ + return overrun; + } + ++/* ++ * Protected by RCU! ++ */ ++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (kc->timer_set == common_timer_set) ++ hrtimer_wait_for_timer(&timr->it.real.timer); ++ else ++ /* FIXME: Whacky hack for posix-cpu-timers */ ++ schedule_timeout(1); ++#endif ++} ++ + /* Set a POSIX.1b interval timer. */ + /* timr->it_lock is taken. */ + static int +@@ -895,6 +909,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, + if (!timr) + return -EINVAL; + ++ rcu_read_lock(); + kc = clockid_to_kclock(timr->it_clock); + if (WARN_ON_ONCE(!kc || !kc->timer_set)) + error = -EINVAL; +@@ -903,9 +918,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, + + unlock_timer(timr, flag); + if (error == TIMER_RETRY) { ++ timer_wait_for_callback(kc, timr); + rtn = NULL; // We already got the old time... ++ rcu_read_unlock(); + goto retry; + } ++ rcu_read_unlock(); + + if (old_setting && !error && + copy_to_user(old_setting, &old_spec, sizeof (old_spec))) +@@ -943,10 +961,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t + if (!timer) + return -EINVAL; + ++ rcu_read_lock(); + if (timer_delete_hook(timer) == TIMER_RETRY) { + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } ++ rcu_read_unlock(); + + spin_lock(¤t->sighand->siglock); + list_del(&timer->list); +@@ -972,8 +995,18 @@ static void itimer_delete(struct k_itime + retry_delete: + spin_lock_irqsave(&timer->it_lock, flags); + ++ /* On RT we can race with a deletion */ ++ if (!timer->it_signal) { ++ unlock_timer(timer, flags); ++ return; ++ } ++ + if (timer_delete_hook(timer) == TIMER_RETRY) { ++ rcu_read_lock(); + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } + list_del(&timer->list); diff --git a/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch b/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch new file mode 100644 index 000000000..16a7f3301 --- /dev/null +++ b/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch @@ -0,0 +1,27 @@ +From c19bf3baaa55918486b868ab17aae0c0c220e51f Mon Sep 17 00:00:00 2001 +From: Mike Galbraith +Date: Fri, 30 Aug 2013 07:57:25 +0200 +Subject: [PATCH] hwlat-detector: Don't ignore threshold module parameter +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +If the user specified a threshold at module load time, use it. + +Cc: stable-rt@vger.kernel.org +Acked-by: Steven Rostedt +Signed-off-by: Mike Galbraith +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/misc/hwlat_detector.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/misc/hwlat_detector.c ++++ b/drivers/misc/hwlat_detector.c +@@ -414,7 +414,7 @@ static int init_stats(void) + goto out; + + __reset_stats(); +- data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ ++ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */ + data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ + data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ + diff --git a/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch b/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch new file mode 100644 index 000000000..690b2eb09 --- /dev/null +++ b/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch @@ -0,0 +1,128 @@ +From 7a036d4dfcf3f2d3247ff7f739284f4b5056bdcb Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Mon, 19 Aug 2013 17:33:25 -0400 +Subject: [PATCH 1/3] hwlat-detector: Update hwlat_detector to add outer loop + detection +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The hwlat_detector reads two timestamps in a row, then reports any +gap between those calls. The problem is, it misses everything between +the second reading of the time stamp to the first reading of the time stamp +in the next loop. That's were most of the time is spent, which means, +chances are likely that it will miss all hardware latencies. This +defeats the purpose. + +By also testing the first time stamp from the previous loop second +time stamp (the outer loop), we are more likely to find a latency. + +Setting the threshold to 1, here's what the report now looks like: + +1347415723.0232202770 0 2 +1347415725.0234202822 0 2 +1347415727.0236202875 0 2 +1347415729.0238202928 0 2 +1347415731.0240202980 0 2 +1347415734.0243203061 0 2 +1347415736.0245203113 0 2 +1347415738.0247203166 2 0 +1347415740.0249203219 0 3 +1347415742.0251203272 0 3 +1347415743.0252203299 0 3 +1347415745.0254203351 0 2 +1347415747.0256203404 0 2 +1347415749.0258203457 0 2 +1347415751.0260203510 0 2 +1347415754.0263203589 0 2 +1347415756.0265203642 0 2 +1347415758.0267203695 0 2 +1347415760.0269203748 0 2 +1347415762.0271203801 0 2 +1347415764.0273203853 2 0 + +There's some hardware latency that takes 2 microseconds to run. + +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/misc/hwlat_detector.c | 32 ++++++++++++++++++++++++++------ + 1 file changed, 26 insertions(+), 6 deletions(-) + +--- a/drivers/misc/hwlat_detector.c ++++ b/drivers/misc/hwlat_detector.c +@@ -143,6 +143,7 @@ static void detector_exit(void); + struct sample { + u64 seqnum; /* unique sequence */ + u64 duration; /* ktime delta */ ++ u64 outer_duration; /* ktime delta (outer loop) */ + struct timespec timestamp; /* wall time */ + unsigned long lost; + }; +@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample( + */ + static int get_sample(void *unused) + { +- ktime_t start, t1, t2; ++ ktime_t start, t1, t2, last_t2; + s64 diff, total = 0; + u64 sample = 0; ++ u64 outer_sample = 0; + int ret = 1; + ++ last_t2.tv64 = 0; + start = ktime_get(); /* start timestamp */ + + do { +@@ -231,7 +234,22 @@ static int get_sample(void *unused) + t1 = ktime_get(); /* we'll look for a discontinuity */ + t2 = ktime_get(); + ++ if (last_t2.tv64) { ++ /* Check the delta from outer loop (t2 to next t1) */ ++ diff = ktime_to_us(ktime_sub(t1, last_t2)); ++ /* This shouldn't happen */ ++ if (diff < 0) { ++ pr_err(BANNER "time running backwards\n"); ++ goto out; ++ } ++ if (diff > outer_sample) ++ outer_sample = diff; ++ } ++ last_t2 = t2; ++ + total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ ++ ++ /* This checks the inner loop (t1 to t2) */ + diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ + + /* This shouldn't happen */ +@@ -246,12 +264,13 @@ static int get_sample(void *unused) + } while (total <= data.sample_width); + + /* If we exceed the threshold value, we have found a hardware latency */ +- if (sample > data.threshold) { ++ if (sample > data.threshold || outer_sample > data.threshold) { + struct sample s; + + data.count++; + s.seqnum = data.count; + s.duration = sample; ++ s.outer_duration = outer_sample; + s.timestamp = CURRENT_TIME; + __buffer_add_sample(&s); + +@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct + } + } + +- len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", +- sample->timestamp.tv_sec, +- sample->timestamp.tv_nsec, +- sample->duration); ++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n", ++ sample->timestamp.tv_sec, ++ sample->timestamp.tv_nsec, ++ sample->duration, ++ sample->outer_duration); + + + /* handling partial reads is more trouble than it's worth */ diff --git a/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch b/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch new file mode 100644 index 000000000..bcd87f6b8 --- /dev/null +++ b/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch @@ -0,0 +1,185 @@ +From 42b3963c5d3dcdb54226fc6bbb6b5fbcf3f2ddee Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Mon, 19 Aug 2013 17:33:27 -0400 +Subject: [PATCH 3/3] hwlat-detector: Use thread instead of stop machine +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +There's no reason to use stop machine to search for hardware latency. +Simply disabling interrupts while running the loop will do enough to +check if something comes in that wasn't disabled by interrupts being +off, which is exactly what stop machine does. + +Instead of using stop machine, just have the thread disable interrupts +while it checks for hardware latency. + +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/misc/hwlat_detector.c | 60 ++++++++++++++++++------------------------ + 1 file changed, 26 insertions(+), 34 deletions(-) + +--- a/drivers/misc/hwlat_detector.c ++++ b/drivers/misc/hwlat_detector.c +@@ -41,7 +41,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -107,7 +106,6 @@ struct data; /* Global state */ + /* Sampling functions */ + static int __buffer_add_sample(struct sample *sample); + static struct sample *buffer_get_sample(struct sample *sample); +-static int get_sample(void *unused); + + /* Threading and state */ + static int kthread_fn(void *unused); +@@ -149,7 +147,7 @@ struct sample { + unsigned long lost; + }; + +-/* keep the global state somewhere. Mostly used under stop_machine. */ ++/* keep the global state somewhere. */ + static struct data { + + struct mutex lock; /* protect changes */ +@@ -172,7 +170,7 @@ static struct data { + * @sample: The new latency sample value + * + * This receives a new latency sample and records it in a global ring buffer. +- * No additional locking is used in this case - suited for stop_machine use. ++ * No additional locking is used in this case. + */ + static int __buffer_add_sample(struct sample *sample) + { +@@ -229,18 +227,18 @@ static struct sample *buffer_get_sample( + #endif + /** + * get_sample - sample the CPU TSC and look for likely hardware latencies +- * @unused: This is not used but is a part of the stop_machine API + * + * Used to repeatedly capture the CPU TSC (or similar), looking for potential +- * hardware-induced latency. Called under stop_machine, with data.lock held. ++ * hardware-induced latency. Called with interrupts disabled and with ++ * data.lock held. + */ +-static int get_sample(void *unused) ++static int get_sample(void) + { + time_type start, t1, t2, last_t2; + s64 diff, total = 0; + u64 sample = 0; + u64 outer_sample = 0; +- int ret = 1; ++ int ret = -1; + + init_time(last_t2, 0); + start = time_get(); /* start timestamp */ +@@ -279,10 +277,14 @@ static int get_sample(void *unused) + + } while (total <= data.sample_width); + ++ ret = 0; ++ + /* If we exceed the threshold value, we have found a hardware latency */ + if (sample > data.threshold || outer_sample > data.threshold) { + struct sample s; + ++ ret = 1; ++ + data.count++; + s.seqnum = data.count; + s.duration = sample; +@@ -295,7 +297,6 @@ static int get_sample(void *unused) + data.max_sample = sample; + } + +- ret = 0; + out: + return ret; + } +@@ -305,32 +306,30 @@ static int get_sample(void *unused) + * @unused: A required part of the kthread API. + * + * Used to periodically sample the CPU TSC via a call to get_sample. We +- * use stop_machine, whith does (intentionally) introduce latency since we ++ * disable interrupts, which does (intentionally) introduce latency since we + * need to ensure nothing else might be running (and thus pre-empting). + * Obviously this should never be used in production environments. + * +- * stop_machine will schedule us typically only on CPU0 which is fine for +- * almost every real-world hardware latency situation - but we might later +- * generalize this if we find there are any actualy systems with alternate +- * SMI delivery or other non CPU0 hardware latencies. ++ * Currently this runs on which ever CPU it was scheduled on, but most ++ * real-worald hardware latency situations occur across several CPUs, ++ * but we might later generalize this if we find there are any actualy ++ * systems with alternate SMI delivery or other hardware latencies. + */ + static int kthread_fn(void *unused) + { +- int err = 0; +- u64 interval = 0; ++ int ret; ++ u64 interval; + + while (!kthread_should_stop()) { + + mutex_lock(&data.lock); + +- err = stop_machine(get_sample, unused, 0); +- if (err) { +- /* Houston, we have a problem */ +- mutex_unlock(&data.lock); +- goto err_out; +- } ++ local_irq_disable(); ++ ret = get_sample(); ++ local_irq_enable(); + +- wake_up(&data.wq); /* wake up reader(s) */ ++ if (ret > 0) ++ wake_up(&data.wq); /* wake up reader(s) */ + + interval = data.sample_window - data.sample_width; + do_div(interval, USEC_PER_MSEC); /* modifies interval value */ +@@ -338,15 +337,10 @@ static int kthread_fn(void *unused) + mutex_unlock(&data.lock); + + if (msleep_interruptible(interval)) +- goto out; ++ break; + } +- goto out; +-err_out: +- pr_err(BANNER "could not call stop_machine, disabling\n"); +- enabled = 0; +-out: +- return err; + ++ return 0; + } + + /** +@@ -442,8 +436,7 @@ static int init_stats(void) + * This function provides a generic read implementation for the global state + * "data" structure debugfs filesystem entries. It would be nice to use + * simple_attr_read directly, but we need to make sure that the data.lock +- * spinlock is held during the actual read (even though we likely won't ever +- * actually race here as the updater runs under a stop_machine context). ++ * is held during the actual read. + */ + static ssize_t simple_data_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos, const u64 *entry) +@@ -478,8 +471,7 @@ static ssize_t simple_data_read(struct f + * This function provides a generic write implementation for the global state + * "data" structure debugfs filesystem entries. It would be nice to use + * simple_attr_write directly, but we need to make sure that the data.lock +- * spinlock is held during the actual write (even though we likely won't ever +- * actually race here as the updater runs under a stop_machine context). ++ * is held during the actual write. + */ + static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos, u64 *entry) diff --git a/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch b/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch new file mode 100644 index 000000000..4d1a60fd9 --- /dev/null +++ b/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch @@ -0,0 +1,94 @@ +From 4aaca90c0255caee9a55371afaecb32365123762 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Mon, 19 Aug 2013 17:33:26 -0400 +Subject: [PATCH 2/3] hwlat-detector: Use trace_clock_local if available +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +As ktime_get() calls into the timing code which does a read_seq(), it +may be affected by other CPUS that touch that lock. To remove this +dependency, use the trace_clock_local() which is already exported +for module use. If CONFIG_TRACING is enabled, use that as the clock, +otherwise use ktime_get(). + +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/misc/hwlat_detector.c | 34 +++++++++++++++++++++++++--------- + 1 file changed, 25 insertions(+), 9 deletions(-) + +--- a/drivers/misc/hwlat_detector.c ++++ b/drivers/misc/hwlat_detector.c +@@ -51,6 +51,7 @@ + #include + #include + #include ++#include + + #define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ + #define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ +@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample( + return sample; + } + ++#ifndef CONFIG_TRACING ++#define time_type ktime_t ++#define time_get() ktime_get() ++#define time_to_us(x) ktime_to_us(x) ++#define time_sub(a, b) ktime_sub(a, b) ++#define init_time(a, b) (a).tv64 = b ++#define time_u64(a) ((a).tv64) ++#else ++#define time_type u64 ++#define time_get() trace_clock_local() ++#define time_to_us(x) div_u64(x, 1000) ++#define time_sub(a, b) ((a) - (b)) ++#define init_time(a, b) (a = b) ++#define time_u64(a) a ++#endif + /** + * get_sample - sample the CPU TSC and look for likely hardware latencies + * @unused: This is not used but is a part of the stop_machine API +@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample( + */ + static int get_sample(void *unused) + { +- ktime_t start, t1, t2, last_t2; ++ time_type start, t1, t2, last_t2; + s64 diff, total = 0; + u64 sample = 0; + u64 outer_sample = 0; + int ret = 1; + +- last_t2.tv64 = 0; +- start = ktime_get(); /* start timestamp */ ++ init_time(last_t2, 0); ++ start = time_get(); /* start timestamp */ + + do { + +- t1 = ktime_get(); /* we'll look for a discontinuity */ +- t2 = ktime_get(); ++ t1 = time_get(); /* we'll look for a discontinuity */ ++ t2 = time_get(); + +- if (last_t2.tv64) { ++ if (time_u64(last_t2)) { + /* Check the delta from outer loop (t2 to next t1) */ +- diff = ktime_to_us(ktime_sub(t1, last_t2)); ++ diff = time_to_us(time_sub(t1, last_t2)); + /* This shouldn't happen */ + if (diff < 0) { + pr_err(BANNER "time running backwards\n"); +@@ -247,10 +263,10 @@ static int get_sample(void *unused) + } + last_t2 = t2; + +- total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ ++ total = time_to_us(time_sub(t2, start)); /* sample width */ + + /* This checks the inner loop (t1 to t2) */ +- diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ ++ diff = time_to_us(time_sub(t2, t1)); /* current diff */ + + /* This shouldn't happen */ + if (diff < 0) { diff --git a/debian/patches/features/all/rt/hwlatdetect.patch b/debian/patches/features/all/rt/hwlatdetect.patch new file mode 100644 index 000000000..26f6b2b95 --- /dev/null +++ b/debian/patches/features/all/rt/hwlatdetect.patch @@ -0,0 +1,1348 @@ +Subject: hwlatdetect.patch +From: Carsten Emde +Date: Tue, 19 Jul 2011 13:53:12 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Jon Masters developed this wonderful SMI detector. For details please +consult Documentation/hwlat_detector.txt. It could be ported to Linux +3.0 RT without any major change. + +Signed-off-by: Carsten Emde + +--- + Documentation/hwlat_detector.txt | 64 ++ + drivers/misc/Kconfig | 29 + drivers/misc/Makefile | 1 + drivers/misc/hwlat_detector.c | 1212 +++++++++++++++++++++++++++++++++++++++ + 4 files changed, 1306 insertions(+) + +--- /dev/null ++++ b/Documentation/hwlat_detector.txt +@@ -0,0 +1,64 @@ ++Introduction: ++------------- ++ ++The module hwlat_detector is a special purpose kernel module that is used to ++detect large system latencies induced by the behavior of certain underlying ++hardware or firmware, independent of Linux itself. The code was developed ++originally to detect SMIs (System Management Interrupts) on x86 systems, ++however there is nothing x86 specific about this patchset. It was ++originally written for use by the "RT" patch since the Real Time ++kernel is highly latency sensitive. ++ ++SMIs are usually not serviced by the Linux kernel, which typically does not ++even know that they are occuring. SMIs are instead are set up by BIOS code ++and are serviced by BIOS code, usually for "critical" events such as ++management of thermal sensors and fans. Sometimes though, SMIs are used for ++other tasks and those tasks can spend an inordinate amount of time in the ++handler (sometimes measured in milliseconds). Obviously this is a problem if ++you are trying to keep event service latencies down in the microsecond range. ++ ++The hardware latency detector works by hogging all of the cpus for configurable ++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter ++for some period, then looking for gaps in the TSC data. Any gap indicates a ++time when the polling was interrupted and since the machine is stopped and ++interrupts turned off the only thing that could do that would be an SMI. ++ ++Note that the SMI detector should *NEVER* be used in a production environment. ++It is intended to be run manually to determine if the hardware platform has a ++problem with long system firmware service routines. ++ ++Usage: ++------ ++ ++Loading the module hwlat_detector passing the parameter "enabled=1" (or by ++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only ++step required to start the hwlat_detector. It is possible to redefine the ++threshold in microseconds (us) above which latency spikes will be taken ++into account (parameter "threshold="). ++ ++Example: ++ ++ # modprobe hwlat_detector enabled=1 threshold=100 ++ ++After the module is loaded, it creates a directory named "hwlat_detector" under ++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary ++to have debugfs mounted, which might be on /sys/debug on your system. ++ ++The /debug/hwlat_detector interface contains the following files: ++ ++count - number of latency spikes observed since last reset ++enable - a global enable/disable toggle (0/1), resets count ++max - maximum hardware latency actually observed (usecs) ++sample - a pipe from which to read current raw sample data ++ in the format ++ (can be opened O_NONBLOCK for a single sample) ++threshold - minimum latency value to be considered (usecs) ++width - time period to sample with CPUs held (usecs) ++ must be less than the total window size (enforced) ++window - total period of sampling, width being inside (usecs) ++ ++By default we will set width to 500,000 and window to 1,000,000, meaning that ++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we ++observe any latencies that exceed the threshold (initially 100 usecs), ++then we write to a global sample ring buffer of 8K samples, which is ++consumed by reading from the "sample" (pipe) debugfs file interface. +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -130,6 +130,35 @@ config IBM_ASM + for information on the specific driver level and support statement + for your IBM server. + ++config HWLAT_DETECTOR ++ tristate "Testing module to detect hardware-induced latencies" ++ depends on DEBUG_FS ++ depends on RING_BUFFER ++ default m ++ ---help--- ++ A simple hardware latency detector. Use this module to detect ++ large latencies introduced by the behavior of the underlying ++ system firmware external to Linux. We do this using periodic ++ use of stop_machine to grab all available CPUs and measure ++ for unexplainable gaps in the CPU timestamp counter(s). By ++ default, the module is not enabled until the "enable" file ++ within the "hwlat_detector" debugfs directory is toggled. ++ ++ This module is often used to detect SMI (System Management ++ Interrupts) on x86 systems, though is not x86 specific. To ++ this end, we default to using a sample window of 1 second, ++ during which we will sample for 0.5 seconds. If an SMI or ++ similar event occurs during that time, it is recorded ++ into an 8K samples global ring buffer until retreived. ++ ++ WARNING: This software should never be enabled (it can be built ++ but should not be turned on after it is loaded) in a production ++ environment where high latencies are a concern since the ++ sampling mechanism actually introduces latencies for ++ regular tasks while the CPU(s) are being held. ++ ++ If unsure, say N ++ + config PHANTOM + tristate "Sensable PHANToM (PCI)" + depends on PCI +--- a/drivers/misc/Makefile ++++ b/drivers/misc/Makefile +@@ -39,6 +39,7 @@ obj-$(CONFIG_C2PORT) += c2port/ + obj-$(CONFIG_HMC6352) += hmc6352.o + obj-y += eeprom/ + obj-y += cb710/ ++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o + obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o + obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o + obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o +--- /dev/null ++++ b/drivers/misc/hwlat_detector.c +@@ -0,0 +1,1212 @@ ++/* ++ * hwlat_detector.c - A simple Hardware Latency detector. ++ * ++ * Use this module to detect large system latencies induced by the behavior of ++ * certain underlying system hardware or firmware, independent of Linux itself. ++ * The code was developed originally to detect the presence of SMIs on Intel ++ * and AMD systems, although there is no dependency upon x86 herein. ++ * ++ * The classical example usage of this module is in detecting the presence of ++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a ++ * somewhat special form of hardware interrupt spawned from earlier CPU debug ++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge ++ * LPC (or other device) to generate a special interrupt under certain ++ * circumstances, for example, upon expiration of a special SMI timer device, ++ * due to certain external thermal readings, on certain I/O address accesses, ++ * and other situations. An SMI hits a special CPU pin, triggers a special ++ * SMI mode (complete with special memory map), and the OS is unaware. ++ * ++ * Although certain hardware-inducing latencies are necessary (for example, ++ * a modern system often requires an SMI handler for correct thermal control ++ * and remote management) they can wreak havoc upon any OS-level performance ++ * guarantees toward low-latency, especially when the OS is not even made ++ * aware of the presence of these interrupts. For this reason, we need a ++ * somewhat brute force mechanism to detect these interrupts. In this case, ++ * we do it by hogging all of the CPU(s) for configurable timer intervals, ++ * sampling the built-in CPU timer, looking for discontiguous readings. ++ * ++ * WARNING: This implementation necessarily introduces latencies. Therefore, ++ * you should NEVER use this module in a production environment ++ * requiring any kind of low-latency performance guarantee(s). ++ * ++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. ++ * ++ * Includes useful feedback from Clark Williams ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ ++#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ ++#define U64STR_SIZE 22 /* 20 digits max */ ++ ++#define VERSION "1.0.0" ++#define BANNER "hwlat_detector: " ++#define DRVNAME "hwlat_detector" ++#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ ++#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ ++#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ ++ ++/* Module metadata */ ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Jon Masters "); ++MODULE_DESCRIPTION("A simple hardware latency detector"); ++MODULE_VERSION(VERSION); ++ ++/* Module parameters */ ++ ++static int debug; ++static int enabled; ++static int threshold; ++ ++module_param(debug, int, 0); /* enable debug */ ++module_param(enabled, int, 0); /* enable detector */ ++module_param(threshold, int, 0); /* latency threshold */ ++ ++/* Buffering and sampling */ ++ ++static struct ring_buffer *ring_buffer; /* sample buffer */ ++static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ ++static unsigned long buf_size = BUF_SIZE_DEFAULT; ++static struct task_struct *kthread; /* sampling thread */ ++ ++/* DebugFS filesystem entries */ ++ ++static struct dentry *debug_dir; /* debugfs directory */ ++static struct dentry *debug_max; /* maximum TSC delta */ ++static struct dentry *debug_count; /* total detect count */ ++static struct dentry *debug_sample_width; /* sample width us */ ++static struct dentry *debug_sample_window; /* sample window us */ ++static struct dentry *debug_sample; /* raw samples us */ ++static struct dentry *debug_threshold; /* threshold us */ ++static struct dentry *debug_enable; /* enable/disable */ ++ ++/* Individual samples and global state */ ++ ++struct sample; /* latency sample */ ++struct data; /* Global state */ ++ ++/* Sampling functions */ ++static int __buffer_add_sample(struct sample *sample); ++static struct sample *buffer_get_sample(struct sample *sample); ++static int get_sample(void *unused); ++ ++/* Threading and state */ ++static int kthread_fn(void *unused); ++static int start_kthread(void); ++static int stop_kthread(void); ++static void __reset_stats(void); ++static int init_stats(void); ++ ++/* Debugfs interface */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry); ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry); ++static int debug_sample_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static int debug_sample_release(struct inode *inode, struct file *filp); ++static int debug_enable_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static ssize_t debug_enable_fwrite(struct file *file, ++ const char __user *user_buffer, ++ size_t user_size, loff_t *offset); ++ ++/* Initialization functions */ ++static int init_debugfs(void); ++static void free_debugfs(void); ++static int detector_init(void); ++static void detector_exit(void); ++ ++/* Individual latency samples are stored here when detected and packed into ++ * the ring_buffer circular buffer, where they are overwritten when ++ * more than buf_size/sizeof(sample) samples are received. */ ++struct sample { ++ u64 seqnum; /* unique sequence */ ++ u64 duration; /* ktime delta */ ++ struct timespec timestamp; /* wall time */ ++ unsigned long lost; ++}; ++ ++/* keep the global state somewhere. Mostly used under stop_machine. */ ++static struct data { ++ ++ struct mutex lock; /* protect changes */ ++ ++ u64 count; /* total since reset */ ++ u64 max_sample; /* max hardware latency */ ++ u64 threshold; /* sample threshold level */ ++ ++ u64 sample_window; /* total sampling window (on+off) */ ++ u64 sample_width; /* active sampling portion of window */ ++ ++ atomic_t sample_open; /* whether the sample file is open */ ++ ++ wait_queue_head_t wq; /* waitqeue for new sample values */ ++ ++} data; ++ ++/** ++ * __buffer_add_sample - add a new latency sample recording to the ring buffer ++ * @sample: The new latency sample value ++ * ++ * This receives a new latency sample and records it in a global ring buffer. ++ * No additional locking is used in this case - suited for stop_machine use. ++ */ ++static int __buffer_add_sample(struct sample *sample) ++{ ++ return ring_buffer_write(ring_buffer, ++ sizeof(struct sample), sample); ++} ++ ++/** ++ * buffer_get_sample - remove a hardware latency sample from the ring buffer ++ * @sample: Pre-allocated storage for the sample ++ * ++ * This retrieves a hardware latency sample from the global circular buffer ++ */ ++static struct sample *buffer_get_sample(struct sample *sample) ++{ ++ struct ring_buffer_event *e = NULL; ++ struct sample *s = NULL; ++ unsigned int cpu = 0; ++ ++ if (!sample) ++ return NULL; ++ ++ mutex_lock(&ring_buffer_mutex); ++ for_each_online_cpu(cpu) { ++ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); ++ if (e) ++ break; ++ } ++ ++ if (e) { ++ s = ring_buffer_event_data(e); ++ memcpy(sample, s, sizeof(struct sample)); ++ } else ++ sample = NULL; ++ mutex_unlock(&ring_buffer_mutex); ++ ++ return sample; ++} ++ ++/** ++ * get_sample - sample the CPU TSC and look for likely hardware latencies ++ * @unused: This is not used but is a part of the stop_machine API ++ * ++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential ++ * hardware-induced latency. Called under stop_machine, with data.lock held. ++ */ ++static int get_sample(void *unused) ++{ ++ ktime_t start, t1, t2; ++ s64 diff, total = 0; ++ u64 sample = 0; ++ int ret = 1; ++ ++ start = ktime_get(); /* start timestamp */ ++ ++ do { ++ ++ t1 = ktime_get(); /* we'll look for a discontinuity */ ++ t2 = ktime_get(); ++ ++ total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ ++ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ ++ ++ /* This shouldn't happen */ ++ if (diff < 0) { ++ pr_err(BANNER "time running backwards\n"); ++ goto out; ++ } ++ ++ if (diff > sample) ++ sample = diff; /* only want highest value */ ++ ++ } while (total <= data.sample_width); ++ ++ /* If we exceed the threshold value, we have found a hardware latency */ ++ if (sample > data.threshold) { ++ struct sample s; ++ ++ data.count++; ++ s.seqnum = data.count; ++ s.duration = sample; ++ s.timestamp = CURRENT_TIME; ++ __buffer_add_sample(&s); ++ ++ /* Keep a running maximum ever recorded hardware latency */ ++ if (sample > data.max_sample) ++ data.max_sample = sample; ++ } ++ ++ ret = 0; ++out: ++ return ret; ++} ++ ++/* ++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread ++ * @unused: A required part of the kthread API. ++ * ++ * Used to periodically sample the CPU TSC via a call to get_sample. We ++ * use stop_machine, whith does (intentionally) introduce latency since we ++ * need to ensure nothing else might be running (and thus pre-empting). ++ * Obviously this should never be used in production environments. ++ * ++ * stop_machine will schedule us typically only on CPU0 which is fine for ++ * almost every real-world hardware latency situation - but we might later ++ * generalize this if we find there are any actualy systems with alternate ++ * SMI delivery or other non CPU0 hardware latencies. ++ */ ++static int kthread_fn(void *unused) ++{ ++ int err = 0; ++ u64 interval = 0; ++ ++ while (!kthread_should_stop()) { ++ ++ mutex_lock(&data.lock); ++ ++ err = stop_machine(get_sample, unused, 0); ++ if (err) { ++ /* Houston, we have a problem */ ++ mutex_unlock(&data.lock); ++ goto err_out; ++ } ++ ++ wake_up(&data.wq); /* wake up reader(s) */ ++ ++ interval = data.sample_window - data.sample_width; ++ do_div(interval, USEC_PER_MSEC); /* modifies interval value */ ++ ++ mutex_unlock(&data.lock); ++ ++ if (msleep_interruptible(interval)) ++ goto out; ++ } ++ goto out; ++err_out: ++ pr_err(BANNER "could not call stop_machine, disabling\n"); ++ enabled = 0; ++out: ++ return err; ++ ++} ++ ++/** ++ * start_kthread - Kick off the hardware latency sampling/detector kthread ++ * ++ * This starts a kernel thread that will sit and sample the CPU timestamp ++ * counter (TSC or similar) and look for potential hardware latencies. ++ */ ++static int start_kthread(void) ++{ ++ kthread = kthread_run(kthread_fn, NULL, ++ DRVNAME); ++ if (IS_ERR(kthread)) { ++ pr_err(BANNER "could not start sampling thread\n"); ++ enabled = 0; ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/** ++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop ++ * ++ * This kicks the running hardware latency sampling/detector kernel thread and ++ * tells it to stop sampling now. Use this on unload and at system shutdown. ++ */ ++static int stop_kthread(void) ++{ ++ int ret; ++ ++ ret = kthread_stop(kthread); ++ ++ return ret; ++} ++ ++/** ++ * __reset_stats - Reset statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We call this ++ * function in order to reset those when "enable" is toggled on or off, and ++ * also at initialization. Should be called with data.lock held. ++ */ ++static void __reset_stats(void) ++{ ++ data.count = 0; ++ data.max_sample = 0; ++ ring_buffer_reset(ring_buffer); /* flush out old sample entries */ ++} ++ ++/** ++ * init_stats - Setup global state statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We also use ++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware ++ * induced system latencies. This function initializes these structures and ++ * allocates the global ring buffer also. ++ */ ++static int init_stats(void) ++{ ++ int ret = -ENOMEM; ++ ++ mutex_init(&data.lock); ++ init_waitqueue_head(&data.wq); ++ atomic_set(&data.sample_open, 0); ++ ++ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); ++ ++ if (WARN(!ring_buffer, KERN_ERR BANNER ++ "failed to allocate ring buffer!\n")) ++ goto out; ++ ++ __reset_stats(); ++ data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ ++ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ ++ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ ++ ++ ret = 0; ++ ++out: ++ return ret; ++ ++} ++ ++/* ++ * simple_data_read - Wrapper read function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * @entry: The entry to read from ++ * ++ * This function provides a generic read implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_read directly, but we need to make sure that the data.lock ++ * spinlock is held during the actual read (even though we likely won't ever ++ * actually race here as the updater runs under a stop_machine context). ++ */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ u64 val = 0; ++ int len = 0; ++ ++ memset(buf, 0, sizeof(buf)); ++ ++ if (!entry) ++ return -EFAULT; ++ ++ mutex_lock(&data.lock); ++ val = *entry; ++ mutex_unlock(&data.lock); ++ ++ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); ++ ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); ++ ++} ++ ++/* ++ * simple_data_write - Wrapper write function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to write value from ++ * @cnt: The maximum number of bytes to write ++ * @ppos: The current "file" position ++ * @entry: The entry to write to ++ * ++ * This function provides a generic write implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_write directly, but we need to make sure that the data.lock ++ * spinlock is held during the actual write (even though we likely won't ever ++ * actually race here as the updater runs under a stop_machine context). ++ */ ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ *entry = val; ++ mutex_unlock(&data.lock); ++ ++ return csize; ++} ++ ++/** ++ * debug_count_fopen - Open function for "count" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "count" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_count_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_count_fread - Read function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to read the ++ * number of latency readings exceeding the configured threshold since ++ * the detector was last reset (e.g. by writing a zero into "count"). ++ */ ++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.count); ++} ++ ++/** ++ * debug_count_fwrite - Write function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to write a ++ * desired value, especially to zero the total count. ++ */ ++static ssize_t debug_count_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.count); ++} ++ ++/** ++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "enable" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_enable_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_enable_fread - Read function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * whether the detector is currently enabled ("0\n" or "1\n" returned). ++ */ ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[4]; ++ ++ if ((cnt < sizeof(buf)) || (*ppos)) ++ return 0; ++ ++ buf[0] = enabled ? '1' : '0'; ++ buf[1] = '\n'; ++ buf[2] = '\0'; ++ if (copy_to_user(ubuf, buf, strlen(buf))) ++ return -EFAULT; ++ return *ppos = strlen(buf); ++} ++ ++/** ++ * debug_enable_fwrite - Write function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to enable or ++ * disable the detector, which will have the side-effect of possibly ++ * also resetting the global stats and kicking off the measuring ++ * kthread (on an enable) or the converse (upon a disable). ++ */ ++static ssize_t debug_enable_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[4]; ++ int csize = min(cnt, sizeof(buf)); ++ long val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[sizeof(buf)-1] = '\0'; /* just in case */ ++ err = kstrtoul(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ if (val) { ++ if (enabled) ++ goto unlock; ++ enabled = 1; ++ __reset_stats(); ++ if (start_kthread()) ++ return -EFAULT; ++ } else { ++ if (!enabled) ++ goto unlock; ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) { ++ pr_err(BANNER "cannot stop kthread\n"); ++ return -EFAULT; ++ } ++ wake_up(&data.wq); /* reader(s) should return */ ++ } ++unlock: ++ return csize; ++} ++ ++/** ++ * debug_max_fopen - Open function for "max" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "max" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_max_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_max_fread - Read function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * the maximum latency value observed since it was last reset. ++ */ ++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); ++} ++ ++/** ++ * debug_max_fwrite - Write function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to reset the ++ * maximum or set it to some other desired value - if, then, subsequent ++ * measurements exceed this value, the maximum will be updated. ++ */ ++static ssize_t debug_max_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); ++} ++ ++ ++/** ++ * debug_sample_fopen - An open function for "sample" debugfs interface ++ * @inode: The in-kernel inode representation of this debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function handles opening the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. Can be opened blocking or non-blocking, ++ * affecting whether it behaves as a buffer read pipe, or does not. ++ * Implements simple locking to prevent multiple simultaneous use. ++ */ ++static int debug_sample_fopen(struct inode *inode, struct file *filp) ++{ ++ if (!atomic_add_unless(&data.sample_open, 1, 1)) ++ return -EBUSY; ++ else ++ return 0; ++} ++ ++/** ++ * debug_sample_fread - A read function for "sample" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that will contain the samples read ++ * @cnt: The maximum bytes to read from the debugfs "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function handles reading from the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. By default this will block pending a new ++ * value written into the sample buffer, unless there are already a ++ * number of value(s) waiting in the buffer, or the sample file was ++ * previously opened in a non-blocking mode of operation. ++ */ ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ int len = 0; ++ char buf[64]; ++ struct sample *sample = NULL; ++ ++ if (!enabled) ++ return 0; ++ ++ sample = kzalloc(sizeof(struct sample), GFP_KERNEL); ++ if (!sample) ++ return -ENOMEM; ++ ++ while (!buffer_get_sample(sample)) { ++ ++ DEFINE_WAIT(wait); ++ ++ if (filp->f_flags & O_NONBLOCK) { ++ len = -EAGAIN; ++ goto out; ++ } ++ ++ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); ++ schedule(); ++ finish_wait(&data.wq, &wait); ++ ++ if (signal_pending(current)) { ++ len = -EINTR; ++ goto out; ++ } ++ ++ if (!enabled) { /* enable was toggled */ ++ len = 0; ++ goto out; ++ } ++ } ++ ++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", ++ sample->timestamp.tv_sec, ++ sample->timestamp.tv_nsec, ++ sample->duration); ++ ++ ++ /* handling partial reads is more trouble than it's worth */ ++ if (len > cnt) ++ goto out; ++ ++ if (copy_to_user(ubuf, buf, len)) ++ len = -EFAULT; ++ ++out: ++ kfree(sample); ++ return len; ++} ++ ++/** ++ * debug_sample_release - Release function for "sample" debugfs interface ++ * @inode: The in-kernel inode represenation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function completes the close of the debugfs interface "sample" file. ++ * Frees the sample_open "lock" so that other users may open the interface. ++ */ ++static int debug_sample_release(struct inode *inode, struct file *filp) ++{ ++ atomic_dec(&data.sample_open); ++ ++ return 0; ++} ++ ++/** ++ * debug_threshold_fopen - Open function for "threshold" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_threshold_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_threshold_fread - Read function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * the current threshold level at which a latency will be recorded in the ++ * global ring buffer, typically on the order of 10us. ++ */ ++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); ++} ++ ++/** ++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * the threshold level at which any subsequently detected latencies will ++ * be recorded into the global ring buffer. ++ */ ++static ssize_t debug_threshold_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ int ret; ++ ++ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); ++ ++ if (enabled) ++ wake_up_process(kthread); ++ ++ return ret; ++} ++ ++/** ++ * debug_width_fopen - Open function for "width" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "width" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_width_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_width_fread - Read function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latecy periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. ++ */ ++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); ++} ++ ++/** ++ * debug_width_fwrite - Write function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latency periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. It ++ * is enforced that width is less that the total window size. ++ */ ++static ssize_t debug_width_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ if (val < data.sample_window) ++ data.sample_width = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); ++ ++ if (enabled) ++ wake_up_process(kthread); ++ ++ return csize; ++} ++ ++/** ++ * debug_window_fopen - Open function for "window" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. ++ */ ++static int debug_window_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_window_fread - Read function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to read the total window size. ++ */ ++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); ++} ++ ++/** ++ * debug_window_fwrite - Write function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "window" debufds ++ * interface to the hardware latency detetector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to write a new total window size. It ++ * is enfoced that any value written must be greater than the sample width ++ * size, or an error results. ++ */ ++static ssize_t debug_window_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ if (data.sample_width < val) ++ data.sample_window = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); ++ ++ return csize; ++} ++ ++/* ++ * Function pointers for the "count" debugfs file operations ++ */ ++static const struct file_operations count_fops = { ++ .open = debug_count_fopen, ++ .read = debug_count_fread, ++ .write = debug_count_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "enable" debugfs file operations ++ */ ++static const struct file_operations enable_fops = { ++ .open = debug_enable_fopen, ++ .read = debug_enable_fread, ++ .write = debug_enable_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "max" debugfs file operations ++ */ ++static const struct file_operations max_fops = { ++ .open = debug_max_fopen, ++ .read = debug_max_fread, ++ .write = debug_max_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "sample" debugfs file operations ++ */ ++static const struct file_operations sample_fops = { ++ .open = debug_sample_fopen, ++ .read = debug_sample_fread, ++ .release = debug_sample_release, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "threshold" debugfs file operations ++ */ ++static const struct file_operations threshold_fops = { ++ .open = debug_threshold_fopen, ++ .read = debug_threshold_fread, ++ .write = debug_threshold_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "width" debugfs file operations ++ */ ++static const struct file_operations width_fops = { ++ .open = debug_width_fopen, ++ .read = debug_width_fread, ++ .write = debug_width_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "window" debugfs file operations ++ */ ++static const struct file_operations window_fops = { ++ .open = debug_window_fopen, ++ .read = debug_window_fread, ++ .write = debug_window_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/** ++ * init_debugfs - A function to initialize the debugfs interface files ++ * ++ * This function creates entries in debugfs for "hwlat_detector", including ++ * files to read values from the detector, current samples, and the ++ * maximum sample that has been captured since the hardware latency ++ * dectector was started. ++ */ ++static int init_debugfs(void) ++{ ++ int ret = -ENOMEM; ++ ++ debug_dir = debugfs_create_dir(DRVNAME, NULL); ++ if (!debug_dir) ++ goto err_debug_dir; ++ ++ debug_sample = debugfs_create_file("sample", 0444, ++ debug_dir, NULL, ++ &sample_fops); ++ if (!debug_sample) ++ goto err_sample; ++ ++ debug_count = debugfs_create_file("count", 0444, ++ debug_dir, NULL, ++ &count_fops); ++ if (!debug_count) ++ goto err_count; ++ ++ debug_max = debugfs_create_file("max", 0444, ++ debug_dir, NULL, ++ &max_fops); ++ if (!debug_max) ++ goto err_max; ++ ++ debug_sample_window = debugfs_create_file("window", 0644, ++ debug_dir, NULL, ++ &window_fops); ++ if (!debug_sample_window) ++ goto err_window; ++ ++ debug_sample_width = debugfs_create_file("width", 0644, ++ debug_dir, NULL, ++ &width_fops); ++ if (!debug_sample_width) ++ goto err_width; ++ ++ debug_threshold = debugfs_create_file("threshold", 0644, ++ debug_dir, NULL, ++ &threshold_fops); ++ if (!debug_threshold) ++ goto err_threshold; ++ ++ debug_enable = debugfs_create_file("enable", 0644, ++ debug_dir, &enabled, ++ &enable_fops); ++ if (!debug_enable) ++ goto err_enable; ++ ++ else { ++ ret = 0; ++ goto out; ++ } ++ ++err_enable: ++ debugfs_remove(debug_threshold); ++err_threshold: ++ debugfs_remove(debug_sample_width); ++err_width: ++ debugfs_remove(debug_sample_window); ++err_window: ++ debugfs_remove(debug_max); ++err_max: ++ debugfs_remove(debug_count); ++err_count: ++ debugfs_remove(debug_sample); ++err_sample: ++ debugfs_remove(debug_dir); ++err_debug_dir: ++out: ++ return ret; ++} ++ ++/** ++ * free_debugfs - A function to cleanup the debugfs file interface ++ */ ++static void free_debugfs(void) ++{ ++ /* could also use a debugfs_remove_recursive */ ++ debugfs_remove(debug_enable); ++ debugfs_remove(debug_threshold); ++ debugfs_remove(debug_sample_width); ++ debugfs_remove(debug_sample_window); ++ debugfs_remove(debug_max); ++ debugfs_remove(debug_count); ++ debugfs_remove(debug_sample); ++ debugfs_remove(debug_dir); ++} ++ ++/** ++ * detector_init - Standard module initialization code ++ */ ++static int detector_init(void) ++{ ++ int ret = -ENOMEM; ++ ++ pr_info(BANNER "version %s\n", VERSION); ++ ++ ret = init_stats(); ++ if (0 != ret) ++ goto out; ++ ++ ret = init_debugfs(); ++ if (0 != ret) ++ goto err_stats; ++ ++ if (enabled) ++ ret = start_kthread(); ++ ++ goto out; ++ ++err_stats: ++ ring_buffer_free(ring_buffer); ++out: ++ return ret; ++ ++} ++ ++/** ++ * detector_exit - Standard module cleanup code ++ */ ++static void detector_exit(void) ++{ ++ int err; ++ ++ if (enabled) { ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) ++ pr_err(BANNER "cannot stop kthread\n"); ++ } ++ ++ free_debugfs(); ++ ring_buffer_free(ring_buffer); /* free up the ring buffer */ ++ ++} ++ ++module_init(detector_init); ++module_exit(detector_exit); diff --git a/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch b/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch new file mode 100644 index 000000000..16ff6564c --- /dev/null +++ b/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch @@ -0,0 +1,35 @@ +From 5145351047b216cca13aaca99f939a9a594c6c4d Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 21 Mar 2013 11:35:49 +0100 +Subject: [PATCH 2/3] i2c/omap: drop the lock hard irq context +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The lock is taken while reading two registers. On RT the first lock is +taken in hard irq where it might sleep and in the threaded irq. +The threaded irq runs in oneshot mode so the hard irq does not run until +the thread the completes so there is no reason to grab the lock. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/i2c/busses/i2c-omap.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +--- a/drivers/i2c/busses/i2c-omap.c ++++ b/drivers/i2c/busses/i2c-omap.c +@@ -879,15 +879,12 @@ omap_i2c_isr(int irq, void *dev_id) + u16 mask; + u16 stat; + +- spin_lock(&dev->lock); +- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); ++ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + + if (stat & mask) + ret = IRQ_WAKE_THREAD; + +- spin_unlock(&dev->lock); +- + return ret; + } + diff --git a/debian/patches/features/all/rt/i915_compile_fix.patch b/debian/patches/features/all/rt/i915_compile_fix.patch new file mode 100644 index 000000000..0afe5416b --- /dev/null +++ b/debian/patches/features/all/rt/i915_compile_fix.patch @@ -0,0 +1,23 @@ +From: Sebastian Andrzej Siewior +Subject: gpu/i915: don't open code these things +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions") +the owner check is still there. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/gpu/drm/i915/i915_gem.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -4975,7 +4975,7 @@ static bool mutex_is_locked_by(struct mu + if (!mutex_is_locked(mutex)) + return false; + +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) ++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) + return mutex->owner == task; + #else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch new file mode 100644 index 000000000..e7dd59be5 --- /dev/null +++ b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch @@ -0,0 +1,170 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:16 -0500 +Subject: ide: Do not disable interrupts for PREEMPT-RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Use the local_irq_*_nort variants. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/ide/alim15x3.c | 4 ++-- + drivers/ide/hpt366.c | 4 ++-- + drivers/ide/ide-io-std.c | 8 ++++---- + drivers/ide/ide-io.c | 2 +- + drivers/ide/ide-iops.c | 4 ++-- + drivers/ide/ide-probe.c | 4 ++-- + drivers/ide/ide-taskfile.c | 6 +++--- + 7 files changed, 16 insertions(+), 16 deletions(-) + +--- a/drivers/ide/alim15x3.c ++++ b/drivers/ide/alim15x3.c +@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p + + isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + if (m5229_revision < 0xC2) { + /* +@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct p + } + pci_dev_put(north); + pci_dev_put(isa_dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return 0; + } + +--- a/drivers/ide/hpt366.c ++++ b/drivers/ide/hpt366.c +@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *h + + dma_old = inb(base + 2); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + dma_new = dma_old; + pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); +@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *h + if (dma_new != dma_old) + outb(dma_new, base + 2); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", + hwif->name, base, base + 7); +--- a/drivers/ide/ide-io-std.c ++++ b/drivers/ide/ide-io-std.c +@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, + insl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, + outsl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +--- a/drivers/ide/ide-io.c ++++ b/drivers/ide/ide-io.c +@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat + /* disable_irq_nosync ?? */ + disable_irq(hwif->irq); + /* local CPU only, as if we were handling an interrupt */ +- local_irq_disable(); ++ local_irq_disable_nort(); + if (hwif->polling) { + startstop = handler(drive); + } else if (drive_is_ready(drive)) { +--- a/drivers/ide/ide-iops.c ++++ b/drivers/ide/ide-iops.c +@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, + if ((stat & ATA_BUSY) == 0) + break; + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + *rstat = stat; + return -EBUSY; + } + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + /* + * Allow status to settle, then read it again. +--- a/drivers/ide/ide-probe.c ++++ b/drivers/ide/ide-probe.c +@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri + int bswap = 1; + + /* local CPU only; some systems need this */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + /* read 512 bytes of id info */ + hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + drive->dev_flags |= IDE_DFLAG_ID_READ; + #ifdef DEBUG +--- a/drivers/ide/ide-taskfile.c ++++ b/drivers/ide/ide-taskfile.c +@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, s + + page_is_high = PageHighMem(page); + if (page_is_high) +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + buf = kmap_atomic(page) + offset; + +@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, s + kunmap_atomic(buf); + + if (page_is_high) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + len -= nr_bytes; + } +@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr + } + + if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) +- local_irq_disable(); ++ local_irq_disable_nort(); + + ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); + diff --git a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch new file mode 100644 index 000000000..afe00c378 --- /dev/null +++ b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch @@ -0,0 +1,97 @@ +From: Thomas Gleixner +Subject: idr: Use local lock instead of preempt enable/disable +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +We need to protect the per cpu variable and prevent migration. + +Signed-off-by: Thomas Gleixner +--- + include/linux/idr.h | 4 ++++ + lib/idr.c | 37 ++++++++++++++++++++++++++++++++++--- + 2 files changed, 38 insertions(+), 3 deletions(-) + +--- a/include/linux/idr.h ++++ b/include/linux/idr.h +@@ -92,10 +92,14 @@ void idr_init(struct idr *idp); + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++void idr_preload_end(void); ++#else + static inline void idr_preload_end(void) + { + preempt_enable(); + } ++#endif + + /** + * idr_find - return pointer for given id +--- a/lib/idr.c ++++ b/lib/idr.c +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + + #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) + #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) +@@ -389,6 +390,36 @@ int __idr_get_new_above(struct idr *idp, + } + EXPORT_SYMBOL(__idr_get_new_above); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static DEFINE_LOCAL_IRQ_LOCK(idr_lock); ++ ++static inline void idr_preload_lock(void) ++{ ++ local_lock(idr_lock); ++} ++ ++static inline void idr_preload_unlock(void) ++{ ++ local_unlock(idr_lock); ++} ++ ++void idr_preload_end(void) ++{ ++ idr_preload_unlock(); ++} ++EXPORT_SYMBOL(idr_preload_end); ++#else ++static inline void idr_preload_lock(void) ++{ ++ preempt_disable(); ++} ++ ++static inline void idr_preload_unlock(void) ++{ ++ preempt_enable(); ++} ++#endif ++ + /** + * idr_preload - preload for idr_alloc() + * @gfp_mask: allocation mask to use for preloading +@@ -423,7 +454,7 @@ void idr_preload(gfp_t gfp_mask) + WARN_ON_ONCE(in_interrupt()); + might_sleep_if(gfp_mask & __GFP_WAIT); + +- preempt_disable(); ++ idr_preload_lock(); + + /* + * idr_alloc() is likely to succeed w/o full idr_layer buffer and +@@ -435,9 +466,9 @@ void idr_preload(gfp_t gfp_mask) + while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { + struct idr_layer *new; + +- preempt_enable(); ++ idr_preload_unlock(); + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); +- preempt_disable(); ++ idr_preload_lock(); + if (!new) + break; + diff --git a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch new file mode 100644 index 000000000..ad74c0815 --- /dev/null +++ b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch @@ -0,0 +1,41 @@ +From: Sven-Thorsten Dietrich +Date: Fri, 3 Jul 2009 08:30:35 -0500 +Subject: infiniband: Mellanox IB driver patch use _nort() primitives +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT +Kernel. + +Michael S. Tsirkin sayeth: +"Basically, if you just make spin_lock_irqsave (and spin_lock_irq) not disable +interrupts for non-raw spinlocks, I think all of infiniband will be fine without +changes." + +Signed-off-by: Sven-Thorsten Dietrich +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +@@ -804,7 +804,7 @@ void ipoib_mcast_restart_task(struct wor + + ipoib_mcast_stop_thread(dev, 0); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + netif_addr_lock(dev); + spin_lock(&priv->lock); + +@@ -886,7 +886,7 @@ void ipoib_mcast_restart_task(struct wor + + spin_unlock(&priv->lock); + netif_addr_unlock(dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + /* We have to cancel outside of the spinlock */ + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { diff --git a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch new file mode 100644 index 000000000..ac083719d --- /dev/null +++ b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch @@ -0,0 +1,45 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:16 -0500 +Subject: input: gameport: Do not disable interrupts on PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Use the _nort() primitives. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/input/gameport/gameport.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/input/gameport/gameport.c ++++ b/drivers/input/gameport/gameport.c +@@ -87,12 +87,12 @@ static int gameport_measure_speed(struct + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + GET_TIME(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + GET_TIME(t2); + GET_TIME(t3); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; + } +@@ -111,11 +111,11 @@ static int gameport_measure_speed(struct + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + rdtscl(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + rdtscl(t2); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if (t2 - t1 < tx) tx = t2 - t1; + } diff --git a/debian/patches/features/all/rt/ipc-make-rt-aware.patch b/debian/patches/features/all/rt/ipc-make-rt-aware.patch new file mode 100644 index 000000000..c3f8406ba --- /dev/null +++ b/debian/patches/features/all/rt/ipc-make-rt-aware.patch @@ -0,0 +1,89 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:12 -0500 +Subject: ipc: Make the ipc code -rt aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +RT serializes the code with the (rt)spinlock but keeps preemption +enabled. Some parts of the code need to be atomic nevertheless. + +Protect it with preempt_disable/enable_rt pairts. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + ipc/mqueue.c | 5 +++++ + ipc/msg.c | 16 +++++++++++++++- + 2 files changed, 20 insertions(+), 1 deletion(-) + +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -923,12 +923,17 @@ static inline void pipelined_send(struct + struct msg_msg *message, + struct ext_wait_queue *receiver) + { ++ /* ++ * Keep them in one critical section for PREEMPT_RT: ++ */ ++ preempt_disable_rt(); + receiver->msg = message; + list_del(&receiver->list); + receiver->state = STATE_PENDING; + wake_up_process(receiver->task); + smp_wmb(); + receiver->state = STATE_READY; ++ preempt_enable_rt(); + } + + /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -253,6 +253,12 @@ static void expunge_all(struct msg_queue + struct msg_receiver *msr, *t; + + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { ++ /* ++ * Make sure that the wakeup doesnt preempt ++ * this CPU prematurely. (on PREEMPT_RT) ++ */ ++ preempt_disable_rt(); ++ + msr->r_msg = NULL; /* initialize expunge ordering */ + wake_up_process(msr->r_tsk); + /* +@@ -263,6 +269,8 @@ static void expunge_all(struct msg_queue + */ + smp_mb(); + msr->r_msg = ERR_PTR(res); ++ ++ preempt_enable_rt(); + } + } + +@@ -641,6 +649,11 @@ static inline int pipelined_send(struct + if (testmsg(msg, msr->r_msgtype, msr->r_mode) && + !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, + msr->r_msgtype, msr->r_mode)) { ++ /* ++ * Make sure that the wakeup doesnt preempt ++ * this CPU prematurely. (on PREEMPT_RT) ++ */ ++ preempt_disable_rt(); + + list_del(&msr->r_list); + if (msr->r_maxsize < msg->m_ts) { +@@ -662,12 +675,13 @@ static inline int pipelined_send(struct + */ + smp_mb(); + msr->r_msg = msg; ++ preempt_enable_rt(); + + return 1; + } ++ preempt_enable_rt(); + } + } +- + return 0; + } + diff --git a/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch b/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch new file mode 100644 index 000000000..e4915d97d --- /dev/null +++ b/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch @@ -0,0 +1,65 @@ +Subject: ipc/mqueue: Add a critical section to avoid a deadlock +From: KOBAYASHI Yoshitake +Date: Sat, 23 Jul 2011 11:57:36 +0900 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +(Repost for v3.0-rt1 and changed the distination addreses) +I have tested the following patch on v3.0-rt1 with PREEMPT_RT_FULL. +In POSIX message queue, if a sender process uses SCHED_FIFO and +has a higher priority than a receiver process, the sender will +be stuck at ipc/mqueue.c:452 + + 452 while (ewp->state == STATE_PENDING) + 453 cpu_relax(); + +Description of the problem + (receiver process) + 1. receiver changes sender's state to STATE_PENDING (mqueue.c:846) + 2. wake up sender process and "switch to sender" (mqueue.c:847) + Note: This context switch only happens in PREEMPT_RT_FULL kernel. + (sender process) + 3. sender check the own state in above loop (mqueue.c:452-453) + *. receiver will never wake up and cannot change sender's state to + STATE_READY because sender has higher priority + + +Signed-off-by: Yoshitake Kobayashi +Cc: viro@zeniv.linux.org.uk +Cc: dchinner@redhat.com +Cc: npiggin@kernel.dk +Cc: hch@lst.de +Cc: arnd@arndb.de +Link: http://lkml.kernel.org/r/4E2A38A0.1090601@toshiba.co.jp +Signed-off-by: Thomas Gleixner +--- + ipc/mqueue.c | 19 ++++++++++++------- + 1 file changed, 12 insertions(+), 7 deletions(-) + +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -947,13 +947,18 @@ static inline void pipelined_receive(str + wake_up_interruptible(&info->wait_q); + return; + } +- if (msg_insert(sender->msg, info)) +- return; +- list_del(&sender->list); +- sender->state = STATE_PENDING; +- wake_up_process(sender->task); +- smp_wmb(); +- sender->state = STATE_READY; ++ /* ++ * Keep them in one critical section for PREEMPT_RT: ++ */ ++ preempt_disable_rt(); ++ if (!msg_insert(sender->msg, info)) { ++ list_del(&sender->list); ++ sender->state = STATE_PENDING; ++ wake_up_process(sender->task); ++ smp_wmb(); ++ sender->state = STATE_READY; ++ } ++ preempt_enable_rt(); + } + + SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, diff --git a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch new file mode 100644 index 000000000..06540ecef --- /dev/null +++ b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch @@ -0,0 +1,70 @@ +Subject: ipc/sem: Rework semaphore wakeups +From: Peter Zijlstra +Date: Wed, 14 Sep 2011 11:57:04 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Current sysv sems have a weird ass wakeup scheme that involves keeping +preemption disabled over a potential O(n^2) loop and busy waiting on +that on other CPUs. + +Kill this and simply wake the task directly from under the sem_lock. + +This was discovered by a migrate_disable() debug feature that +disallows: + + spin_lock(); + preempt_disable(); + spin_unlock() + preempt_enable(); + +Cc: Manfred Spraul +Suggested-by: Thomas Gleixner +Reported-by: Mike Galbraith +Signed-off-by: Peter Zijlstra +Cc: Manfred Spraul +Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins +Signed-off-by: Thomas Gleixner +--- + ipc/sem.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -667,6 +667,13 @@ static int perform_atomic_semop(struct s + static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *p = q->sleeper; ++ get_task_struct(p); ++ q->status = error; ++ wake_up_process(p); ++ put_task_struct(p); ++#else + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the +@@ -678,6 +685,7 @@ static void wake_up_sem_queue_prepare(st + q->pid = error; + + list_add_tail(&q->list, pt); ++#endif + } + + /** +@@ -691,6 +699,7 @@ static void wake_up_sem_queue_prepare(st + */ + static void wake_up_sem_queue_do(struct list_head *pt) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + struct sem_queue *q, *t; + int did_something; + +@@ -703,6 +712,7 @@ static void wake_up_sem_queue_do(struct + } + if (did_something) + preempt_enable(); ++#endif + } + + static void unlink_queue(struct sem_array *sma, struct sem_queue *q) diff --git a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch new file mode 100644 index 000000000..681841579 --- /dev/null +++ b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch @@ -0,0 +1,145 @@ +Subject: genirq: Allow disabling of softirq processing in irq thread context +From: Thomas Gleixner +Date: Tue, 31 Jan 2012 13:01:27 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The processing of softirqs in irq thread context is a performance gain +for the non-rt workloads of a system, but it's counterproductive for +interrupts which are explicitely related to the realtime +workload. Allow such interrupts to prevent softirq processing in their +thread context. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + include/linux/interrupt.h | 2 ++ + include/linux/irq.h | 4 +++- + kernel/irq/manage.c | 13 ++++++++++++- + kernel/irq/settings.h | 12 ++++++++++++ + kernel/softirq.c | 7 +++++++ + 5 files changed, 36 insertions(+), 2 deletions(-) + +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -57,6 +57,7 @@ + * IRQF_NO_THREAD - Interrupt cannot be threaded + * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device + * resume time. ++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) + */ + #define IRQF_DISABLED 0x00000020 + #define IRQF_SHARED 0x00000080 +@@ -70,6 +71,7 @@ + #define IRQF_FORCE_RESUME 0x00008000 + #define IRQF_NO_THREAD 0x00010000 + #define IRQF_EARLY_RESUME 0x00020000 ++#define IRQF_NO_SOFTIRQ_CALL 0x00080000 + + #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) + +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -73,6 +73,7 @@ typedef void (*irq_preflow_handler_t)(st + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. ++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) + */ + enum { + IRQ_TYPE_NONE = 0x00000000, +@@ -98,13 +99,14 @@ enum { + IRQ_NOTHREAD = (1 << 16), + IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), ++ IRQ_NO_SOFTIRQ_CALL = (1 << 19), + }; + + #define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ +- IRQ_IS_POLLED) ++ IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL) + + #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -856,7 +856,15 @@ irq_forced_thread_fn(struct irq_desc *de + local_bh_disable(); + ret = action->thread_fn(action->irq, action->dev_id); + irq_finalize_oneshot(desc, action); +- local_bh_enable(); ++ /* ++ * Interrupts which have real time requirements can be set up ++ * to avoid softirq processing in the thread handler. This is ++ * safe as these interrupts do not raise soft interrupts. ++ */ ++ if (irq_settings_no_softirq_call(desc)) ++ _local_bh_enable(); ++ else ++ local_bh_enable(); + return ret; + } + +@@ -1201,6 +1209,9 @@ static int + irqd_set(&desc->irq_data, IRQD_NO_BALANCING); + } + ++ if (new->flags & IRQF_NO_SOFTIRQ_CALL) ++ irq_settings_set_no_softirq_call(desc); ++ + /* Set default affinity mask once everything is setup */ + setup_affinity(irq, desc, mask); + +--- a/kernel/irq/settings.h ++++ b/kernel/irq/settings.h +@@ -15,6 +15,7 @@ enum { + _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, + _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, + _IRQ_IS_POLLED = IRQ_IS_POLLED, ++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, + _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, + }; + +@@ -28,6 +29,7 @@ enum { + #define IRQ_NESTED_THREAD GOT_YOU_MORON + #define IRQ_PER_CPU_DEVID GOT_YOU_MORON + #define IRQ_IS_POLLED GOT_YOU_MORON ++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON + #undef IRQF_MODIFY_MASK + #define IRQF_MODIFY_MASK GOT_YOU_MORON + +@@ -38,6 +40,16 @@ irq_settings_clr_and_set(struct irq_desc + desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); + } + ++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) ++{ ++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; ++} ++ ++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) ++{ ++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; ++} ++ + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) + { + return desc->status_use_accessors & _IRQ_PER_CPU; +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -463,6 +463,13 @@ void local_bh_enable_ip(unsigned long ip + } + EXPORT_SYMBOL(local_bh_enable_ip); + ++void _local_bh_enable(void) ++{ ++ current->softirq_nestcnt--; ++ migrate_enable(); ++} ++EXPORT_SYMBOL(_local_bh_enable); ++ + /* For tracing */ + int notrace __in_softirq(void) + { diff --git a/debian/patches/features/all/rt/irq_work-allow-certain-work-in-hard-irq-context.patch b/debian/patches/features/all/rt/irq_work-allow-certain-work-in-hard-irq-context.patch new file mode 100644 index 000000000..2abb6e2e6 --- /dev/null +++ b/debian/patches/features/all/rt/irq_work-allow-certain-work-in-hard-irq-context.patch @@ -0,0 +1,159 @@ +From 8e48945ae26991b40973b233f34937477dcf97ab Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 31 Jan 2014 14:20:31 +0100 +Subject: [PATCH 1/7] irq_work: allow certain work in hard irq context +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +irq_work is processed in softirq context on -RT because we want to avoid +long latencies which might arise from processing lots of perf events. +The noHZ-full mode requires its callback to be called from real hardirq +context (commit 76c24fb ("nohz: New APIs to re-evaluate the tick on full +dynticks CPUs")). If it is called from a thread context we might get +wrong results for checks like "is_idle_task(current)". +This patch introduces a second list (hirq_work_list) which will be used +if irq_work_run() has been invoked from hardirq context and process only +work items marked with IRQ_WORK_HARD_IRQ. + +This patch also removes arch_irq_work_raise() from sparc & powerpc like +it is already done for x86. Atleast for powerpc it is somehow +superfluous because it is called from the timer interrupt which should +invoke update_process_times(). + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/arm/kernel/smp.c | 2 ++ + arch/powerpc/kernel/time.c | 2 +- + arch/sparc/kernel/pcr.c | 2 ++ + include/linux/irq_work.h | 1 + + kernel/irq_work.c | 22 +++++++++++++++++++--- + kernel/time/tick-sched.c | 1 + + kernel/timer.c | 2 +- + 7 files changed, 27 insertions(+), 5 deletions(-) + +--- a/arch/arm/kernel/smp.c ++++ b/arch/arm/kernel/smp.c +@@ -454,12 +454,14 @@ void arch_send_call_function_single_ipi( + } + + #ifdef CONFIG_IRQ_WORK ++#ifndef CONFIG_PREEMPT_RT_FULL + void arch_irq_work_raise(void) + { + if (is_smp()) + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); + } + #endif ++#endif + + static const char *ipi_types[NR_IPI] = { + #define S(x,s) [x] = s +--- a/arch/powerpc/kernel/time.c ++++ b/arch/powerpc/kernel/time.c +@@ -423,7 +423,7 @@ unsigned long profile_pc(struct pt_regs + EXPORT_SYMBOL(profile_pc); + #endif + +-#ifdef CONFIG_IRQ_WORK ++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) + + /* + * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... +--- a/arch/sparc/kernel/pcr.c ++++ b/arch/sparc/kernel/pcr.c +@@ -43,10 +43,12 @@ void __irq_entry deferred_pcr_work_irq(i + set_irq_regs(old_regs); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void arch_irq_work_raise(void) + { + set_softint(1 << PIL_DEFERRED_PCR_WORK); + } ++#endif + + const struct pcr_ops *pcr_ops; + EXPORT_SYMBOL_GPL(pcr_ops); +--- a/include/linux/irq_work.h ++++ b/include/linux/irq_work.h +@@ -16,6 +16,7 @@ + #define IRQ_WORK_BUSY 2UL + #define IRQ_WORK_FLAGS 3UL + #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ ++#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ + + struct irq_work { + unsigned long flags; +--- a/kernel/irq_work.c ++++ b/kernel/irq_work.c +@@ -20,6 +20,9 @@ + + + static DEFINE_PER_CPU(struct llist_head, irq_work_list); ++#ifdef CONFIG_PREEMPT_RT_FULL ++static DEFINE_PER_CPU(struct llist_head, hirq_work_list); ++#endif + static DEFINE_PER_CPU(int, irq_work_raised); + + /* +@@ -48,7 +51,11 @@ static bool irq_work_claim(struct irq_wo + return true; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++void arch_irq_work_raise(void) ++#else + void __weak arch_irq_work_raise(void) ++#endif + { + /* + * Lame architectures will get the timer tick callback +@@ -70,8 +77,12 @@ void irq_work_queue(struct irq_work *wor + /* Queue the entry and raise the IPI if needed. */ + preempt_disable(); + +- llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); +- ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (work->flags & IRQ_WORK_HARD_IRQ) ++ llist_add(&work->llnode, &__get_cpu_var(hirq_work_list)); ++ else ++#endif ++ llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); + /* + * If the work is not "lazy" or the tick is stopped, raise the irq + * work interrupt (if supported by the arch), otherwise, just wait +@@ -115,7 +126,12 @@ static void __irq_work_run(void) + __this_cpu_write(irq_work_raised, 0); + barrier(); + +- this_list = &__get_cpu_var(irq_work_list); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (in_irq()) ++ this_list = &__get_cpu_var(hirq_work_list); ++ else ++#endif ++ this_list = &__get_cpu_var(irq_work_list); + if (llist_empty(this_list)) + return; + +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -222,6 +222,7 @@ static void nohz_full_kick_work_func(str + + static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { + .func = nohz_full_kick_work_func, ++ .flags = IRQ_WORK_HARD_IRQ, + }; + + /* +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1425,7 +1425,7 @@ void update_process_times(int user_tick) + scheduler_tick(); + run_local_timers(); + rcu_check_callbacks(cpu, user_tick); +-#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) ++#if defined(CONFIG_IRQ_WORK) + if (in_irq()) + irq_work_run(); + #endif diff --git a/debian/patches/features/all/rt/jump-label-rt.patch b/debian/patches/features/all/rt/jump-label-rt.patch new file mode 100644 index 000000000..e5a56364b --- /dev/null +++ b/debian/patches/features/all/rt/jump-label-rt.patch @@ -0,0 +1,22 @@ +Subject: jump-label-rt.patch +From: Thomas Gleixner +Date: Wed, 13 Jul 2011 11:03:16 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/jump_label.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/include/linux/jump_label.h ++++ b/include/linux/jump_label.h +@@ -56,7 +56,8 @@ extern bool static_key_initialized; + "%s used before call to jump_label_init", \ + __func__) + +-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) ++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \ ++ !defined(CONFIG_PREEMPT_BASE) + + struct static_key { + atomic_t enabled; diff --git a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch new file mode 100644 index 000000000..b69f8ac62 --- /dev/null +++ b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch @@ -0,0 +1,45 @@ +Subject: kconfig-disable-a-few-options-rt.patch +From: Thomas Gleixner +Date: Sun, 24 Jul 2011 12:11:43 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Disable stuff which is known to have issues on RT + +Signed-off-by: Thomas Gleixner +--- + arch/Kconfig | 1 + + drivers/net/Kconfig | 1 + + mm/Kconfig | 2 +- + 3 files changed, 3 insertions(+), 1 deletion(-) + +--- a/arch/Kconfig ++++ b/arch/Kconfig +@@ -6,6 +6,7 @@ config OPROFILE + tristate "OProfile system profiling" + depends on PROFILING + depends on HAVE_OPROFILE ++ depends on !PREEMPT_RT_FULL + select RING_BUFFER + select RING_BUFFER_ALLOW_SWAP + help +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -160,6 +160,7 @@ config VXLAN + + config NETCONSOLE + tristate "Network console logging support" ++ depends on !PREEMPT_RT_FULL + ---help--- + If you want to log kernel messages over the network, enable this. + See for details. +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -393,7 +393,7 @@ config NOMMU_INITIAL_TRIM_EXCESS + + config TRANSPARENT_HUGEPAGE + bool "Transparent Hugepage Support" +- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE ++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL + select COMPACTION + help + Transparent Hugepages allows the kernel to use huge pages and diff --git a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch new file mode 100644 index 000000000..807db4666 --- /dev/null +++ b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch @@ -0,0 +1,57 @@ +Subject: kconfig-preempt-rt-full.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 14:58:57 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + init/Makefile | 2 +- + kernel/Kconfig.preempt | 8 ++++++++ + scripts/mkcompile_h | 4 +++- + 3 files changed, 12 insertions(+), 2 deletions(-) + +--- a/init/Makefile ++++ b/init/Makefile +@@ -33,4 +33,4 @@ mounts-$(CONFIG_BLK_DEV_MD) += do_mounts + include/generated/compile.h: FORCE + @$($(quiet)chk_compile.h) + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ +- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" ++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -67,6 +67,14 @@ config PREEMPT_RTB + enables changes which are preliminary for the full preemptible + RT kernel. + ++config PREEMPT_RT_FULL ++ bool "Fully Preemptible Kernel (RT)" ++ depends on IRQ_FORCED_THREADING ++ select PREEMPT_RT_BASE ++ select PREEMPT_RCU ++ help ++ All and everything ++ + endchoice + + config PREEMPT_COUNT +--- a/scripts/mkcompile_h ++++ b/scripts/mkcompile_h +@@ -4,7 +4,8 @@ TARGET=$1 + ARCH=$2 + SMP=$3 + PREEMPT=$4 +-CC=$5 ++RT=$5 ++CC=$6 + + vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } + +@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION" + CONFIG_FLAGS="" + if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi + if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi ++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi + UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" + + # Truncate to maximum length diff --git a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch new file mode 100644 index 000000000..5cff1bfa2 --- /dev/null +++ b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch @@ -0,0 +1,88 @@ +From 24136a819693ae36039d6b4286bf1f775e062bcc Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 7 Jun 2013 22:37:06 +0200 +Subject: [PATCH] kernel/cpu: fix cpu down problem if kthread's cpu is + going down +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +If kthread is pinned to CPUx and CPUx is going down then we get into +trouble: +- first the unplug thread is created +- it will set itself to hp->unplug. As a result, every task that is + going to take a lock, has to leave the CPU. +- the CPU_DOWN_PREPARE notifier are started. The worker thread will + start a new process for the "high priority worker". + Now kthread would like to take a lock but since it can't leave the CPU + it will never complete its task. + +We could fire the unplug thread after the notifier but then the cpu is +no longer marked "online" and the unplug thread will run on CPU0 which +was fixed before :) + +So instead the unplug thread is started and kept waiting until the +notfier complete their work. + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/cpu.c | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -83,6 +83,7 @@ struct hotplug_pcp { + int refcount; + int grab_lock; + struct completion synced; ++ struct completion unplug_wait; + #ifdef CONFIG_PREEMPT_RT_FULL + /* + * Note, on PREEMPT_RT, the hotplug lock must save the state of +@@ -186,6 +187,7 @@ static int sync_unplug_thread(void *data + { + struct hotplug_pcp *hp = data; + ++ wait_for_completion(&hp->unplug_wait); + preempt_disable(); + hp->unplug = current; + wait_for_pinned_cpus(hp); +@@ -251,6 +253,14 @@ static void __cpu_unplug_sync(struct hot + wait_for_completion(&hp->synced); + } + ++static void __cpu_unplug_wait(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ complete(&hp->unplug_wait); ++ wait_for_completion(&hp->synced); ++} ++ + /* + * Start the sync_unplug_thread on the target cpu and wait for it to + * complete. +@@ -274,6 +284,7 @@ static int cpu_unplug_begin(unsigned int + tell_sched_cpu_down_begin(cpu); + + init_completion(&hp->synced); ++ init_completion(&hp->unplug_wait); + + hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); + if (IS_ERR(hp->sync_tsk)) { +@@ -289,8 +300,7 @@ static int cpu_unplug_begin(unsigned int + * wait for tasks that are going to enter these sections and + * we must not have them block. + */ +- __cpu_unplug_sync(hp); +- ++ wake_up_process(hp->sync_tsk); + return 0; + } + +@@ -615,6 +625,7 @@ static int __ref _cpu_down(unsigned int + #endif + synchronize_rcu(); + ++ __cpu_unplug_wait(cpu); + smpboot_park_threads(cpu); + + /* Notifiers are done. Don't let any more tasks pin this CPU. */ diff --git a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch new file mode 100644 index 000000000..9d08166f8 --- /dev/null +++ b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch @@ -0,0 +1,60 @@ +From 4c6df3d78817c20a147c0291f6600d002c0910d3 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 14 Jun 2013 17:16:35 +0200 +Subject: [PATCH] kernel/hotplug: restore original cpu mask oncpu/down +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +If a task which is allowed to run only on CPU X puts CPU Y down then it +will be allowed on all CPUs but the on CPU Y after it comes back from +kernel. This patch ensures that we don't lose the initial setting unless +the CPU the task is running is going down. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/cpu.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -573,6 +573,7 @@ static int __ref _cpu_down(unsigned int + .hcpu = hcpu, + }; + cpumask_var_t cpumask; ++ cpumask_var_t cpumask_org; + + if (num_online_cpus() == 1) + return -EBUSY; +@@ -583,6 +584,12 @@ static int __ref _cpu_down(unsigned int + /* Move the downtaker off the unplug cpu */ + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; ++ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { ++ free_cpumask_var(cpumask); ++ return -ENOMEM; ++ } ++ ++ cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); + cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); + set_cpus_allowed_ptr(current, cpumask); + free_cpumask_var(cpumask); +@@ -591,7 +598,8 @@ static int __ref _cpu_down(unsigned int + if (mycpu == cpu) { + printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); + migrate_enable(); +- return -EBUSY; ++ err = -EBUSY; ++ goto restore_cpus; + } + + cpu_hotplug_begin(); +@@ -669,6 +677,9 @@ static int __ref _cpu_down(unsigned int + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); ++restore_cpus: ++ set_cpus_allowed_ptr(current, cpumask_org); ++ free_cpumask_var(cpumask_org); + return err; + } + diff --git a/debian/patches/features/all/rt/kernel-hrtimer-be-non-freezeable-in-cpu_chill.patch b/debian/patches/features/all/rt/kernel-hrtimer-be-non-freezeable-in-cpu_chill.patch new file mode 100644 index 000000000..4df224b86 --- /dev/null +++ b/debian/patches/features/all/rt/kernel-hrtimer-be-non-freezeable-in-cpu_chill.patch @@ -0,0 +1,59 @@ +From 9a1a63848887fd99e6be49a3925d65751e424c8a Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 19 Feb 2014 11:56:06 +0100 +Subject: [PATCH] kernel/hrtimer: be non-freezeable in cpu_chill() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Since we replaced msleep() by hrtimer I see now and then (rarely) this: + +| [....] Waiting for /dev to be fully populated... +| ===================================== +| [ BUG: udevd/229 still has locks held! ] +| 3.12.11-rt17 #23 Not tainted +| ------------------------------------- +| 1 lock held by udevd/229: +| #0: (&type->i_mutex_dir_key#2){+.+.+.}, at: lookup_slow+0x28/0x98 +| +| stack backtrace: +| CPU: 0 PID: 229 Comm: udevd Not tainted 3.12.11-rt17 #23 +| (unwind_backtrace+0x0/0xf8) from (show_stack+0x10/0x14) +| (show_stack+0x10/0x14) from (dump_stack+0x74/0xbc) +| (dump_stack+0x74/0xbc) from (do_nanosleep+0x120/0x160) +| (do_nanosleep+0x120/0x160) from (hrtimer_nanosleep+0x90/0x110) +| (hrtimer_nanosleep+0x90/0x110) from (cpu_chill+0x30/0x38) +| (cpu_chill+0x30/0x38) from (dentry_kill+0x158/0x1ec) +| (dentry_kill+0x158/0x1ec) from (dput+0x74/0x15c) +| (dput+0x74/0x15c) from (lookup_real+0x4c/0x50) +| (lookup_real+0x4c/0x50) from (__lookup_hash+0x34/0x44) +| (__lookup_hash+0x34/0x44) from (lookup_slow+0x38/0x98) +| (lookup_slow+0x38/0x98) from (path_lookupat+0x208/0x7fc) +| (path_lookupat+0x208/0x7fc) from (filename_lookup+0x20/0x60) +| (filename_lookup+0x20/0x60) from (user_path_at_empty+0x50/0x7c) +| (user_path_at_empty+0x50/0x7c) from (user_path_at+0x14/0x1c) +| (user_path_at+0x14/0x1c) from (vfs_fstatat+0x48/0x94) +| (vfs_fstatat+0x48/0x94) from (SyS_stat64+0x14/0x30) +| (SyS_stat64+0x14/0x30) from (ret_fast_syscall+0x0/0x48) + +For now I see no better way but to disable the freezer the sleep the period. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/hrtimer.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1900,8 +1900,12 @@ void cpu_chill(void) + struct timespec tu = { + .tv_nsec = NSEC_PER_MSEC, + }; ++ unsigned int freeze_flag = current->flags & PF_NOFREEZE; + ++ current->flags |= PF_NOFREEZE; + hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); ++ if (!freeze_flag) ++ current->flags &= ~PF_NOFREEZE; + } + EXPORT_SYMBOL(cpu_chill); + #endif diff --git a/debian/patches/features/all/rt/kgb-serial-hackaround.patch b/debian/patches/features/all/rt/kgb-serial-hackaround.patch new file mode 100644 index 000000000..64c9c3e2a --- /dev/null +++ b/debian/patches/features/all/rt/kgb-serial-hackaround.patch @@ -0,0 +1,103 @@ +From: Jason Wessel +Date: Thu, 28 Jul 2011 12:42:23 -0500 +Subject: kgdb/serial: Short term workaround +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +On 07/27/2011 04:37 PM, Thomas Gleixner wrote: +> - KGDB (not yet disabled) is reportedly unusable on -rt right now due +> to missing hacks in the console locking which I dropped on purpose. +> + +To work around this in the short term you can use this patch, in +addition to the clocksource watchdog patch that Thomas brewed up. + +Comments are welcome of course. Ultimately the right solution is to +change separation between the console and the HW to have a polled mode ++ work queue so as not to introduce any kind of latency. + +Thanks, +Jason. + +--- + drivers/tty/serial/8250/8250_core.c | 3 ++- + include/linux/kdb.h | 3 ++- + kernel/debug/kdb/kdb_io.c | 6 ++---- + 3 files changed, 6 insertions(+), 6 deletions(-) + +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + #ifdef CONFIG_SPARC + #include + #endif +@@ -2891,7 +2892,7 @@ serial8250_console_write(struct console + + touch_nmi_watchdog(); + +- if (port->sysrq || oops_in_progress) ++ if (port->sysrq || oops_in_progress || in_kdb_printk()) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); +--- a/include/linux/kdb.h ++++ b/include/linux/kdb.h +@@ -116,7 +116,7 @@ extern int kdb_trap_printk; + extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); + extern __printf(1, 2) int kdb_printf(const char *, ...); + typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); +- ++#define in_kdb_printk() (kdb_trap_printk) + extern void kdb_init(int level); + + /* Access to kdb specific polling devices */ +@@ -151,6 +151,7 @@ extern int kdb_register_repeat(char *, k + extern int kdb_unregister(char *); + #else /* ! CONFIG_KGDB_KDB */ + static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } ++#define in_kdb_printk() (0) + static inline void kdb_init(int level) {} + static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +--- a/kernel/debug/kdb/kdb_io.c ++++ b/kernel/debug/kdb/kdb_io.c +@@ -554,7 +554,6 @@ int vkdb_printf(const char *fmt, va_list + int linecount; + int colcount; + int logging, saved_loglevel = 0; +- int saved_trap_printk; + int got_printf_lock = 0; + int retlen = 0; + int fnd, len; +@@ -565,8 +564,6 @@ int vkdb_printf(const char *fmt, va_list + unsigned long uninitialized_var(flags); + + preempt_disable(); +- saved_trap_printk = kdb_trap_printk; +- kdb_trap_printk = 0; + + /* Serialize kdb_printf if multiple cpus try to write at once. + * But if any cpu goes recursive in kdb, just print the output, +@@ -833,7 +830,6 @@ int vkdb_printf(const char *fmt, va_list + } else { + __release(kdb_printf_lock); + } +- kdb_trap_printk = saved_trap_printk; + preempt_enable(); + return retlen; + } +@@ -843,9 +839,11 @@ int kdb_printf(const char *fmt, ...) + va_list ap; + int r; + ++ kdb_trap_printk++; + va_start(ap, fmt); + r = vkdb_printf(fmt, ap); + va_end(ap); ++ kdb_trap_printk--; + + return r; + } diff --git a/debian/patches/features/all/rt/latency-hist.patch b/debian/patches/features/all/rt/latency-hist.patch new file mode 100644 index 000000000..00f488eca --- /dev/null +++ b/debian/patches/features/all/rt/latency-hist.patch @@ -0,0 +1,1810 @@ +Subject: latency-hist.patch +From: Carsten Emde +Date: Tue, 19 Jul 2011 14:03:41 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +This patch provides a recording mechanism to store data of potential +sources of system latencies. The recordings separately determine the +latency caused by a delayed timer expiration, by a delayed wakeup of the +related user space program and by the sum of both. The histograms can be +enabled and reset individually. The data are accessible via the debug +filesystem. For details please consult Documentation/trace/histograms.txt. + +Signed-off-by: Carsten Emde +Signed-off-by: Thomas Gleixner + +--- + Documentation/trace/histograms.txt | 186 +++++ + include/linux/hrtimer.h | 3 + include/linux/sched.h | 6 + include/trace/events/hist.h | 72 ++ + include/trace/events/latency_hist.h | 29 + kernel/hrtimer.c | 23 + kernel/trace/Kconfig | 104 +++ + kernel/trace/Makefile | 4 + kernel/trace/latency_hist.c | 1178 ++++++++++++++++++++++++++++++++++++ + kernel/trace/trace_irqsoff.c | 11 + 10 files changed, 1616 insertions(+) + +--- /dev/null ++++ b/Documentation/trace/histograms.txt +@@ -0,0 +1,186 @@ ++ Using the Linux Kernel Latency Histograms ++ ++ ++This document gives a short explanation how to enable, configure and use ++latency histograms. Latency histograms are primarily relevant in the ++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) ++and are used in the quality management of the Linux real-time ++capabilities. ++ ++ ++* Purpose of latency histograms ++ ++A latency histogram continuously accumulates the frequencies of latency ++data. There are two types of histograms ++- potential sources of latencies ++- effective latencies ++ ++ ++* Potential sources of latencies ++ ++Potential sources of latencies are code segments where interrupts, ++preemption or both are disabled (aka critical sections). To create ++histograms of potential sources of latency, the kernel stores the time ++stamp at the start of a critical section, determines the time elapsed ++when the end of the section is reached, and increments the frequency ++counter of that latency value - irrespective of whether any concurrently ++running process is affected by latency or not. ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_INTERRUPT_OFF_LATENCY ++ CONFIG_PREEMPT_OFF_LATENCY ++ ++ ++* Effective latencies ++ ++Effective latencies are actually occuring during wakeup of a process. To ++determine effective latencies, the kernel stores the time stamp when a ++process is scheduled to be woken up, and determines the duration of the ++wakeup time shortly before control is passed over to this process. Note ++that the apparent latency in user space may be somewhat longer, since the ++process may be interrupted after control is passed over to it but before ++the execution in user space takes place. Simply measuring the interval ++between enqueuing and wakeup may also not appropriate in cases when a ++process is scheduled as a result of a timer expiration. The timer may have ++missed its deadline, e.g. due to disabled interrupts, but this latency ++would not be registered. Therefore, the offsets of missed timers are ++recorded in a separate histogram. If both wakeup latency and missed timer ++offsets are configured and enabled, a third histogram may be enabled that ++records the overall latency as a sum of the timer latency, if any, and the ++wakeup latency. This histogram is called "timerandwakeup". ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_WAKEUP_LATENCY ++ CONFIG_MISSED_TIMER_OFSETS ++ ++ ++* Usage ++ ++The interface to the administration of the latency histograms is located ++in the debugfs file system. To mount it, either enter ++ ++mount -t sysfs nodev /sys ++mount -t debugfs nodev /sys/kernel/debug ++ ++from shell command line level, or add ++ ++nodev /sys sysfs defaults 0 0 ++nodev /sys/kernel/debug debugfs defaults 0 0 ++ ++to the file /etc/fstab. All latency histogram related files are then ++available in the directory /sys/kernel/debug/tracing/latency_hist. A ++particular histogram type is enabled by writing non-zero to the related ++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. ++Select "preemptirqsoff" for the histograms of potential sources of ++latencies and "wakeup" for histograms of effective latencies etc. The ++histogram data - one per CPU - are available in the files ++ ++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx ++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx ++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx ++ ++The histograms are reset by writing non-zero to the file "reset" in a ++particular latency directory. To reset all latency data, use ++ ++#!/bin/sh ++ ++TRACINGDIR=/sys/kernel/debug/tracing ++HISTDIR=$TRACINGDIR/latency_hist ++ ++if test -d $HISTDIR ++then ++ cd $HISTDIR ++ for i in `find . | grep /reset$` ++ do ++ echo 1 >$i ++ done ++fi ++ ++ ++* Data format ++ ++Latency data are stored with a resolution of one microsecond. The ++maximum latency is 10,240 microseconds. The data are only valid, if the ++overflow register is empty. Every output line contains the latency in ++microseconds in the first row and the number of samples in the second ++row. To display only lines with a positive latency count, use, for ++example, ++ ++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 ++ ++#Minimum latency: 0 microseconds. ++#Average latency: 0 microseconds. ++#Maximum latency: 25 microseconds. ++#Total samples: 3104770694 ++#There are 0 samples greater or equal than 10240 microseconds ++#usecs samples ++ 0 2984486876 ++ 1 49843506 ++ 2 58219047 ++ 3 5348126 ++ 4 2187960 ++ 5 3388262 ++ 6 959289 ++ 7 208294 ++ 8 40420 ++ 9 4485 ++ 10 14918 ++ 11 18340 ++ 12 25052 ++ 13 19455 ++ 14 5602 ++ 15 969 ++ 16 47 ++ 17 18 ++ 18 14 ++ 19 1 ++ 20 3 ++ 21 2 ++ 22 5 ++ 23 2 ++ 25 1 ++ ++ ++* Wakeup latency of a selected process ++ ++To only collect wakeup latency data of a particular process, write the ++PID of the requested process to ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/pid ++ ++PIDs are not considered, if this variable is set to 0. ++ ++ ++* Details of the process with the highest wakeup latency so far ++ ++Selected data of the process that suffered from the highest wakeup ++latency that occurred in a particular CPU are available in the file ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. ++ ++In addition, other relevant system data at the time when the ++latency occurred are given. ++ ++The format of the data is (all in one line): ++ () \ ++<- ++ ++The value of is only relevant in the combined timer ++and wakeup latency recording. In the wakeup recording, it is ++always 0, in the missed_timer_offsets recording, it is the same ++as . ++ ++When retrospectively searching for the origin of a latency and ++tracing was not enabled, it may be helpful to know the name and ++some basic data of the task that (finally) was switching to the ++late real-tlme task. In addition to the victim's data, also the ++data of the possible culprit are therefore displayed after the ++"<-" symbol. ++ ++Finally, the timestamp of the time when the latency occurred ++in . after the most recent system boot ++is provided. ++ ++These data are also reset when the wakeup histogram is reset. +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -111,6 +111,9 @@ struct hrtimer { + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + unsigned long state; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ ktime_t praecox; ++#endif + #ifdef CONFIG_TIMER_STATS + int start_pid; + void *start_site; +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1568,6 +1568,12 @@ struct task_struct { + unsigned long trace; + /* bitmask and counter of trace recursion */ + unsigned long trace_recursion; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ u64 preempt_timestamp_hist; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ long timer_offset; ++#endif ++#endif + #endif /* CONFIG_TRACING */ + #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ + struct memcg_batch_info { +--- /dev/null ++++ b/include/trace/events/hist.h +@@ -0,0 +1,72 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM hist ++ ++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_HIST_H ++ ++#include "latency_hist.h" ++#include ++ ++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) ++#define trace_preemptirqsoff_hist(a, b) ++#else ++TRACE_EVENT(preemptirqsoff_hist, ++ ++ TP_PROTO(int reason, int starthist), ++ ++ TP_ARGS(reason, starthist), ++ ++ TP_STRUCT__entry( ++ __field(int, reason) ++ __field(int, starthist) ++ ), ++ ++ TP_fast_assign( ++ __entry->reason = reason; ++ __entry->starthist = starthist; ++ ), ++ ++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), ++ __entry->starthist ? "start" : "stop") ++); ++#endif ++ ++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST ++#define trace_hrtimer_interrupt(a, b, c, d) ++#else ++TRACE_EVENT(hrtimer_interrupt, ++ ++ TP_PROTO(int cpu, long long offset, struct task_struct *curr, ++ struct task_struct *task), ++ ++ TP_ARGS(cpu, offset, curr, task), ++ ++ TP_STRUCT__entry( ++ __field(int, cpu) ++ __field(long long, offset) ++ __array(char, ccomm, TASK_COMM_LEN) ++ __field(int, cprio) ++ __array(char, tcomm, TASK_COMM_LEN) ++ __field(int, tprio) ++ ), ++ ++ TP_fast_assign( ++ __entry->cpu = cpu; ++ __entry->offset = offset; ++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); ++ __entry->cprio = curr->prio; ++ memcpy(__entry->tcomm, task != NULL ? task->comm : "", ++ task != NULL ? TASK_COMM_LEN : 7); ++ __entry->tprio = task != NULL ? task->prio : -1; ++ ), ++ ++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", ++ __entry->cpu, __entry->offset, __entry->ccomm, ++ __entry->cprio, __entry->tcomm, __entry->tprio) ++); ++#endif ++ ++#endif /* _TRACE_HIST_H */ ++ ++/* This part must be outside protection */ ++#include +--- /dev/null ++++ b/include/trace/events/latency_hist.h +@@ -0,0 +1,29 @@ ++#ifndef _LATENCY_HIST_H ++#define _LATENCY_HIST_H ++ ++enum hist_action { ++ IRQS_ON, ++ PREEMPT_ON, ++ TRACE_STOP, ++ IRQS_OFF, ++ PREEMPT_OFF, ++ TRACE_START, ++}; ++ ++static char *actions[] = { ++ "IRQS_ON", ++ "PREEMPT_ON", ++ "TRACE_STOP", ++ "IRQS_OFF", ++ "PREEMPT_OFF", ++ "TRACE_START", ++}; ++ ++static inline char *getaction(int action) ++{ ++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) ++ return actions[action]; ++ return "unknown"; ++} ++ ++#endif /* _LATENCY_HIST_H */ +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -53,6 +53,7 @@ + #include + + #include ++#include + + /* + * The timer bases: +@@ -998,6 +999,17 @@ int __hrtimer_start_range_ns(struct hrti + #endif + } + ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ { ++ ktime_t now = new_base->get_time(); ++ ++ if (ktime_to_ns(tim) < ktime_to_ns(now)) ++ timer->praecox = now; ++ else ++ timer->praecox = ktime_set(0, 0); ++ } ++#endif ++ + hrtimer_set_expires_range_ns(timer, tim, delta_ns); + + timer_stats_hrtimer_set_start_info(timer); +@@ -1276,6 +1288,8 @@ static void __run_hrtimer(struct hrtimer + + #ifdef CONFIG_HIGH_RES_TIMERS + ++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); ++ + /* + * High resolution timer interrupt + * Called with interrupts disabled +@@ -1319,6 +1333,15 @@ void hrtimer_interrupt(struct clock_even + + timer = container_of(node, struct hrtimer, node); + ++ trace_hrtimer_interrupt(raw_smp_processor_id(), ++ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? ++ timer->praecox : hrtimer_get_expires(timer), ++ basenow)), ++ current, ++ timer->function == hrtimer_wakeup ? ++ container_of(timer, struct hrtimer_sleeper, ++ timer)->task : NULL); ++ + /* + * The immediate goal for using the softexpires is + * minimizing wakeups, not running timers at the +--- a/kernel/trace/Kconfig ++++ b/kernel/trace/Kconfig +@@ -192,6 +192,24 @@ config IRQSOFF_TRACER + enabled. This option and the preempt-off timing option can be + used together or separately.) + ++config INTERRUPT_OFF_HIST ++ bool "Interrupts-off Latency Histogram" ++ depends on IRQSOFF_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with interrupts disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If PREEMPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/irqsoff ++ + config PREEMPT_TRACER + bool "Preemption-off Latency Tracer" + default n +@@ -216,6 +234,24 @@ config PREEMPT_TRACER + enabled. This option and the irqs-off timing option can be + used together or separately.) + ++config PREEMPT_OFF_HIST ++ bool "Preemption-off Latency Histogram" ++ depends on PREEMPT_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with preemption disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If INTERRUPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/preemptoff ++ + config SCHED_TRACER + bool "Scheduling Latency Tracer" + select GENERIC_TRACER +@@ -226,6 +262,74 @@ config SCHED_TRACER + This tracer tracks the latency of the highest priority task + to be scheduled in, starting from the point it has woken up. + ++config WAKEUP_LATENCY_HIST ++ bool "Scheduling Latency Histogram" ++ depends on SCHED_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the scheduling latency of the highest priority task. ++ The histograms are disabled by default. To enable them, write a ++ non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/wakeup ++ ++ Two different algorithms are used, one to determine the latency of ++ processes that exclusively use the highest priority of the system and ++ another one to determine the latency of processes that share the ++ highest system priority with other processes. The former is used to ++ improve hardware and system software, the latter to optimize the ++ priority design of a given system. The histogram data will be ++ located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup ++ ++ and ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ ++config MISSED_TIMER_OFFSETS_HIST ++ depends on HIGH_RES_TIMERS ++ select GENERIC_TRACER ++ bool "Missed Timer Offsets Histogram" ++ help ++ Generate a histogram of missed timer offsets in microseconds. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets ++ ++ The histogram data will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ + config ENABLE_DEFAULT_TRACERS + bool "Trace process context switches and events" + depends on !GENERIC_TRACER +--- a/kernel/trace/Makefile ++++ b/kernel/trace/Makefile +@@ -34,6 +34,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f + obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o + obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o + obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o ++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o ++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o + obj-$(CONFIG_NOP_TRACER) += trace_nop.o + obj-$(CONFIG_STACK_TRACER) += trace_stack.o + obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o +--- /dev/null ++++ b/kernel/trace/latency_hist.c +@@ -0,0 +1,1178 @@ ++/* ++ * kernel/trace/latency_hist.c ++ * ++ * Add support for histograms of preemption-off latency and ++ * interrupt-off latency and wakeup latency, it depends on ++ * Real-Time Preemption Support. ++ * ++ * Copyright (C) 2005 MontaVista Software, Inc. ++ * Yi Yang ++ * ++ * Converted to work with the new latency tracer. ++ * Copyright (C) 2008 Red Hat, Inc. ++ * Steven Rostedt ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "trace.h" ++#include ++ ++#define NSECS_PER_USECS 1000L ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++enum { ++ IRQSOFF_LATENCY = 0, ++ PREEMPTOFF_LATENCY, ++ PREEMPTIRQSOFF_LATENCY, ++ WAKEUP_LATENCY, ++ WAKEUP_LATENCY_SHAREDPRIO, ++ MISSED_TIMER_OFFSETS, ++ TIMERANDWAKEUP_LATENCY, ++ MAX_LATENCY_TYPE, ++}; ++ ++#define MAX_ENTRY_NUM 10240 ++ ++struct hist_data { ++ atomic_t hist_mode; /* 0 log, 1 don't log */ ++ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ ++ long min_lat; ++ long max_lat; ++ unsigned long long below_hist_bound_samples; ++ unsigned long long above_hist_bound_samples; ++ long long accumulate_lat; ++ unsigned long long total_samples; ++ unsigned long long hist_array[MAX_ENTRY_NUM]; ++}; ++ ++struct enable_data { ++ int latency_type; ++ int enabled; ++}; ++ ++static char *latency_hist_dir_root = "latency_hist"; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); ++static char *irqsoff_hist_dir = "irqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); ++static DEFINE_PER_CPU(int, hist_irqsoff_counting); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); ++static char *preemptoff_hist_dir = "preemptoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); ++static DEFINE_PER_CPU(int, hist_preemptoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); ++static char *preemptirqsoff_hist_dir = "preemptirqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); ++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); ++static struct enable_data preemptirqsoff_enabled_data = { ++ .latency_type = PREEMPTIRQSOFF_LATENCY, ++ .enabled = 0, ++}; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++struct maxlatproc_data { ++ char comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ int pid; ++ int current_pid; ++ int prio; ++ int current_prio; ++ long latency; ++ long timeroffset; ++ cycle_t timestamp; ++}; ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); ++static char *wakeup_latency_hist_dir = "wakeup"; ++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p, int success); ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ struct task_struct *prev, struct task_struct *next); ++static notrace void probe_sched_migrate_task(void *, ++ struct task_struct *task, int cpu); ++static struct enable_data wakeup_latency_enabled_data = { ++ .latency_type = WAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); ++static DEFINE_PER_CPU(struct task_struct *, wakeup_task); ++static DEFINE_PER_CPU(int, wakeup_sharedprio); ++static unsigned long wakeup_pid; ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); ++static char *missed_timer_offsets_dir = "missed_timer_offsets"; ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long offset, struct task_struct *curr, struct task_struct *task); ++static struct enable_data missed_timer_offsets_enabled_data = { ++ .latency_type = MISSED_TIMER_OFFSETS, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); ++static unsigned long missed_timer_offsets_pid; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); ++static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; ++static struct enable_data timerandwakeup_enabled_data = { ++ .latency_type = TIMERANDWAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); ++#endif ++ ++void notrace latency_hist(int latency_type, int cpu, long latency, ++ long timeroffset, cycle_t stop, ++ struct task_struct *p) ++{ ++ struct hist_data *my_hist; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ if (!cpu_possible(cpu) || latency_type < 0 || ++ latency_type >= MAX_LATENCY_TYPE) ++ return; ++ ++ switch (latency_type) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ my_hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ my_hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ my_hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ my_hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ my_hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ ++ default: ++ return; ++ } ++ ++ latency += my_hist->offset; ++ ++ if (atomic_read(&my_hist->hist_mode) == 0) ++ return; ++ ++ if (latency < 0 || latency >= MAX_ENTRY_NUM) { ++ if (latency < 0) ++ my_hist->below_hist_bound_samples++; ++ else ++ my_hist->above_hist_bound_samples++; ++ } else ++ my_hist->hist_array[latency]++; ++ ++ if (unlikely(latency > my_hist->max_lat || ++ my_hist->min_lat == LONG_MAX)) { ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) { ++ strncpy(mp->comm, p->comm, sizeof(mp->comm)); ++ strncpy(mp->current_comm, current->comm, ++ sizeof(mp->current_comm)); ++ mp->pid = task_pid_nr(p); ++ mp->current_pid = task_pid_nr(current); ++ mp->prio = p->prio; ++ mp->current_prio = current->prio; ++ mp->latency = latency; ++ mp->timeroffset = timeroffset; ++ mp->timestamp = stop; ++ } ++#endif ++ my_hist->max_lat = latency; ++ } ++ if (unlikely(latency < my_hist->min_lat)) ++ my_hist->min_lat = latency; ++ my_hist->total_samples++; ++ my_hist->accumulate_lat += latency; ++} ++ ++static void *l_start(struct seq_file *m, loff_t *pos) ++{ ++ loff_t *index_ptr = NULL; ++ loff_t index = *pos; ++ struct hist_data *my_hist = m->private; ++ ++ if (index == 0) { ++ char minstr[32], avgstr[32], maxstr[32]; ++ ++ atomic_dec(&my_hist->hist_mode); ++ ++ if (likely(my_hist->total_samples)) { ++ long avg = (long) div64_s64(my_hist->accumulate_lat, ++ my_hist->total_samples); ++ snprintf(minstr, sizeof(minstr), "%ld", ++ my_hist->min_lat - my_hist->offset); ++ snprintf(avgstr, sizeof(avgstr), "%ld", ++ avg - my_hist->offset); ++ snprintf(maxstr, sizeof(maxstr), "%ld", ++ my_hist->max_lat - my_hist->offset); ++ } else { ++ strcpy(minstr, ""); ++ strcpy(avgstr, minstr); ++ strcpy(maxstr, minstr); ++ } ++ ++ seq_printf(m, "#Minimum latency: %s microseconds\n" ++ "#Average latency: %s microseconds\n" ++ "#Maximum latency: %s microseconds\n" ++ "#Total samples: %llu\n" ++ "#There are %llu samples lower than %ld" ++ " microseconds.\n" ++ "#There are %llu samples greater or equal" ++ " than %ld microseconds.\n" ++ "#usecs\t%16s\n", ++ minstr, avgstr, maxstr, ++ my_hist->total_samples, ++ my_hist->below_hist_bound_samples, ++ -my_hist->offset, ++ my_hist->above_hist_bound_samples, ++ MAX_ENTRY_NUM - my_hist->offset, ++ "samples"); ++ } ++ if (index < MAX_ENTRY_NUM) { ++ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); ++ if (index_ptr) ++ *index_ptr = index; ++ } ++ ++ return index_ptr; ++} ++ ++static void *l_next(struct seq_file *m, void *p, loff_t *pos) ++{ ++ loff_t *index_ptr = p; ++ struct hist_data *my_hist = m->private; ++ ++ if (++*pos >= MAX_ENTRY_NUM) { ++ atomic_inc(&my_hist->hist_mode); ++ return NULL; ++ } ++ *index_ptr = *pos; ++ return index_ptr; ++} ++ ++static void l_stop(struct seq_file *m, void *p) ++{ ++ kfree(p); ++} ++ ++static int l_show(struct seq_file *m, void *p) ++{ ++ int index = *(loff_t *) p; ++ struct hist_data *my_hist = m->private; ++ ++ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, ++ my_hist->hist_array[index]); ++ return 0; ++} ++ ++static const struct seq_operations latency_hist_seq_op = { ++ .start = l_start, ++ .next = l_next, ++ .stop = l_stop, ++ .show = l_show ++}; ++ ++static int latency_hist_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ ++ ret = seq_open(file, &latency_hist_seq_op); ++ if (!ret) { ++ struct seq_file *seq = file->private_data; ++ seq->private = inode->i_private; ++ } ++ return ret; ++} ++ ++static const struct file_operations latency_hist_fops = { ++ .open = latency_hist_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = seq_release, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static void clear_maxlatprocdata(struct maxlatproc_data *mp) ++{ ++ mp->comm[0] = mp->current_comm[0] = '\0'; ++ mp->prio = mp->current_prio = mp->pid = mp->current_pid = ++ mp->latency = mp->timeroffset = -1; ++ mp->timestamp = 0; ++} ++#endif ++ ++static void hist_reset(struct hist_data *hist) ++{ ++ atomic_dec(&hist->hist_mode); ++ ++ memset(hist->hist_array, 0, sizeof(hist->hist_array)); ++ hist->below_hist_bound_samples = 0ULL; ++ hist->above_hist_bound_samples = 0ULL; ++ hist->min_lat = LONG_MAX; ++ hist->max_lat = LONG_MIN; ++ hist->total_samples = 0ULL; ++ hist->accumulate_lat = 0LL; ++ ++ atomic_inc(&hist->hist_mode); ++} ++ ++static ssize_t ++latency_hist_reset(struct file *file, const char __user *a, ++ size_t size, loff_t *off) ++{ ++ int cpu; ++ struct hist_data *hist = NULL; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ off_t latency_type = (off_t) file->private_data; ++ ++ for_each_online_cpu(cpu) { ++ ++ switch (latency_type) { ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ } ++ ++ hist_reset(hist); ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) ++ clear_maxlatprocdata(mp); ++#endif ++ } ++ ++ return size; ++} ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ int r; ++ unsigned long *this_pid = file->private_data; ++ ++ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t do_pid(struct file *file, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ unsigned long pid; ++ unsigned long *this_pid = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = '\0'; ++ ++ if (kstrtoul(buf, 10, &pid)) ++ return -EINVAL; ++ ++ *this_pid = pid; ++ ++ return cnt; ++} ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ int r; ++ struct maxlatproc_data *mp = file->private_data; ++ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); ++ unsigned long long t; ++ unsigned long usecs, secs; ++ char *buf; ++ ++ if (mp->pid == -1 || mp->current_pid == -1) { ++ buf = "(none)\n"; ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, ++ strlen(buf)); ++ } ++ ++ buf = kmalloc(strmaxlen, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ ++ t = ns2usecs(mp->timestamp); ++ usecs = do_div(t, USEC_PER_SEC); ++ secs = (unsigned long) t; ++ r = snprintf(buf, strmaxlen, ++ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, ++ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, ++ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, ++ secs, usecs); ++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++ kfree(buf); ++ return r; ++} ++#endif ++ ++static ssize_t ++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ struct enable_data *ed = file->private_data; ++ int r; ++ ++ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t ++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ long enable; ++ struct enable_data *ed = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = 0; ++ ++ if (kstrtoul(buf, 10, &enable)) ++ return -EINVAL; ++ ++ if ((enable && ed->enabled) || (!enable && !ed->enabled)) ++ return cnt; ++ ++ if (enable) { ++ int ret; ++ ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ ret = register_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_preemptirqsoff_hist " ++ "to trace_preemptirqsoff_hist\n"); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ ret = register_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup\n"); ++ return ret; ++ } ++ ret = register_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup_new\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_stop " ++ "to trace_sched_switch\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_sched_migrate_task " ++ "to trace_sched_migrate_task\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ ret = register_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_hrtimer_interrupt " ++ "to trace_hrtimer_interrupt\n"); ++ return ret; ++ } ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ if (!wakeup_latency_enabled_data.enabled || ++ !missed_timer_offsets_enabled_data.enabled) ++ return -EINVAL; ++ break; ++#endif ++ default: ++ break; ++ } ++ } else { ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ for_each_online_cpu(cpu) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ per_cpu(hist_irqsoff_counting, ++ cpu) = 0; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ per_cpu(hist_preemptoff_counting, ++ cpu) = 0; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ per_cpu(hist_preemptirqsoff_counting, ++ cpu) = 0; ++#endif ++ } ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ unregister_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(wakeup_task, cpu) = NULL; ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } ++ } ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ unregister_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++ default: ++ break; ++ } ++ } ++ ed->enabled = enable; ++ return cnt; ++} ++ ++static const struct file_operations latency_hist_reset_fops = { ++ .open = tracing_open_generic, ++ .write = latency_hist_reset, ++}; ++ ++static const struct file_operations enable_fops = { ++ .open = tracing_open_generic, ++ .read = show_enable, ++ .write = do_enable, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static const struct file_operations pid_fops = { ++ .open = tracing_open_generic, ++ .read = show_pid, ++ .write = do_pid, ++}; ++ ++static const struct file_operations maxlatproc_fops = { ++ .open = tracing_open_generic, ++ .read = show_maxlatproc, ++}; ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, ++ int starthist) ++{ ++ int cpu = raw_smp_processor_id(); ++ int time_set = 0; ++ ++ if (starthist) { ++ cycle_t uninitialized_var(start); ++ ++ if (!preempt_count() && !irqs_disabled()) ++ return; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_OFF || reason == TRACE_START) && ++ !per_cpu(hist_irqsoff_counting, cpu)) { ++ per_cpu(hist_irqsoff_counting, cpu) = 1; ++ start = ftrace_now(cpu); ++ time_set++; ++ per_cpu(hist_irqsoff_start, cpu) = start; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_OFF || reason == TRACE_START) && ++ !per_cpu(hist_preemptoff_counting, cpu)) { ++ per_cpu(hist_preemptoff_counting, cpu) = 1; ++ if (!(time_set++)) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptoff_start, cpu) = start; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if (per_cpu(hist_irqsoff_counting, cpu) && ++ per_cpu(hist_preemptoff_counting, cpu) && ++ !per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 1; ++ if (!time_set) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptirqsoff_start, cpu) = start; ++ } ++#endif ++ } else { ++ cycle_t uninitialized_var(stop); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_ON || reason == TRACE_STOP) && ++ per_cpu(hist_irqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_irqsoff_start, cpu); ++ ++ stop = ftrace_now(cpu); ++ time_set++; ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, ++ stop, NULL); ++ } ++ per_cpu(hist_irqsoff_counting, cpu) = 0; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_ON || reason == TRACE_STOP) && ++ per_cpu(hist_preemptoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptoff_start, cpu); ++ ++ if (!(time_set++)) ++ stop = ftrace_now(cpu); ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(PREEMPTOFF_LATENCY, cpu, latency, ++ 0, stop, NULL); ++ } ++ per_cpu(hist_preemptoff_counting, cpu) = 0; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if ((!per_cpu(hist_irqsoff_counting, cpu) || ++ !per_cpu(hist_preemptoff_counting, cpu)) && ++ per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); ++ ++ if (!time_set) ++ stop = ftrace_now(cpu); ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, ++ latency, 0, stop, NULL); ++ } ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 0; ++ } ++#endif ++ } ++} ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_RAW_SPINLOCK(wakeup_lock); ++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, ++ int cpu) ++{ ++ int old_cpu = task_cpu(task); ++ ++ if (cpu != old_cpu) { ++ unsigned long flags; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); ++ if (task == cpu_wakeup_task) { ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, old_cpu) = NULL; ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; ++ get_task_struct(cpu_wakeup_task); ++ } ++ ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++ } ++} ++ ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p, int success) ++{ ++ unsigned long flags; ++ struct task_struct *curr = current; ++ int cpu = task_cpu(p); ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (wakeup_pid) { ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ if (likely(wakeup_pid != task_pid_nr(p))) ++ goto out; ++ } else { ++ if (likely(!rt_task(p)) || ++ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || ++ p->prio > curr->prio) ++ goto out; ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ } ++ ++ if (cpu_wakeup_task) ++ put_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; ++ get_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task->preempt_timestamp_hist = ++ ftrace_now(raw_smp_processor_id()); ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++ ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ struct task_struct *prev, struct task_struct *next) ++{ ++ unsigned long flags; ++ int cpu = task_cpu(next); ++ long latency; ++ cycle_t stop; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (cpu_wakeup_task == NULL) ++ goto out; ++ ++ /* Already running? */ ++ if (unlikely(current == cpu_wakeup_task)) ++ goto out_reset; ++ ++ if (next != cpu_wakeup_task) { ++ if (next->prio < cpu_wakeup_task->prio) ++ goto out_reset; ++ ++ if (next->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ goto out; ++ } ++ ++ if (current->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ /* ++ * The task we are waiting for is about to be switched to. ++ * Calculate latency and store it in histogram. ++ */ ++ stop = ftrace_now(raw_smp_processor_id()); ++ ++ latency = ((long) (stop - next->preempt_timestamp_hist)) / ++ NSECS_PER_USECS; ++ ++ if (per_cpu(wakeup_sharedprio, cpu)) { ++ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, ++ next); ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } else { ++ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ if (timerandwakeup_enabled_data.enabled) { ++ latency_hist(TIMERANDWAKEUP_LATENCY, cpu, ++ next->timer_offset + latency, next->timer_offset, ++ stop, next); ++ } ++#endif ++ } ++ ++out_reset: ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ next->timer_offset = 0; ++#endif ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, cpu) = NULL; ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long latency_ns, struct task_struct *curr, ++ struct task_struct *task) ++{ ++ if (latency_ns <= 0 && task != NULL && rt_task(task) && ++ (task->prio < curr->prio || ++ (task->prio == curr->prio && ++ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { ++ long latency; ++ cycle_t now; ++ ++ if (missed_timer_offsets_pid) { ++ if (likely(missed_timer_offsets_pid != ++ task_pid_nr(task))) ++ return; ++ } ++ ++ now = ftrace_now(cpu); ++ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); ++ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, ++ task); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ task->timer_offset = latency; ++#endif ++ } ++} ++#endif ++ ++static __init int latency_hist_init(void) ++{ ++ struct dentry *latency_hist_root = NULL; ++ struct dentry *dentry; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ struct dentry *dentry_sharedprio; ++#endif ++ struct dentry *entry; ++ struct dentry *enable_root; ++ int i = 0; ++ struct hist_data *my_hist; ++ char name[64]; ++ char *cpufmt = "CPU%d"; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ char *cpufmt_maxlatproc = "max_latency-CPU%d"; ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ dentry = tracing_init_dentry(); ++ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); ++ enable_root = debugfs_create_dir("enable", latency_hist_root); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(irqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(irqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ dentry = debugfs_create_dir(preemptoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ dentry = debugfs_create_dir(preemptirqsoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptirqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ entry = debugfs_create_file("preemptirqsoff", 0644, ++ enable_root, (void *)&preemptirqsoff_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ dentry = debugfs_create_dir(wakeup_latency_hist_dir, ++ latency_hist_root); ++ dentry_sharedprio = debugfs_create_dir( ++ wakeup_latency_hist_dir_sharedprio, dentry); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(wakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, ++ &per_cpu(wakeup_latency_hist_sharedprio, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ ++ mp = &per_cpu(wakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&wakeup_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("reset", 0644, dentry_sharedprio, ++ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); ++ entry = debugfs_create_file("wakeup", 0644, ++ enable_root, (void *)&wakeup_latency_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ dentry = debugfs_create_dir(missed_timer_offsets_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(missed_timer_offsets, i), &latency_hist_fops); ++ my_hist = &per_cpu(missed_timer_offsets, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&missed_timer_offsets_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); ++ entry = debugfs_create_file("missed_timer_offsets", 0644, ++ enable_root, (void *)&missed_timer_offsets_enabled_data, ++ &enable_fops); ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(timerandwakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(timerandwakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(timerandwakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("timerandwakeup", 0644, ++ enable_root, (void *)&timerandwakeup_enabled_data, ++ &enable_fops); ++#endif ++ return 0; ++} ++ ++device_initcall(latency_hist_init); +--- a/kernel/trace/trace_irqsoff.c ++++ b/kernel/trace/trace_irqsoff.c +@@ -17,6 +17,7 @@ + #include + + #include "trace.h" ++#include + + static struct trace_array *irqsoff_trace __read_mostly; + static int tracer_enabled __read_mostly; +@@ -439,11 +440,13 @@ void start_critical_timings(void) + { + if (preempt_trace() || irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist(TRACE_START, 1); + } + EXPORT_SYMBOL_GPL(start_critical_timings); + + void stop_critical_timings(void) + { ++ trace_preemptirqsoff_hist(TRACE_STOP, 0); + if (preempt_trace() || irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -453,6 +456,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings) + #ifdef CONFIG_PROVE_LOCKING + void time_hardirqs_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(a0, a1); + } +@@ -461,6 +465,7 @@ void time_hardirqs_off(unsigned long a0, + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(a0, a1); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + + #else /* !CONFIG_PROVE_LOCKING */ +@@ -486,6 +491,7 @@ inline void print_irqtrace_events(struct + */ + void trace_hardirqs_on(void) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -495,11 +501,13 @@ void trace_hardirqs_off(void) + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off); + + void trace_hardirqs_on_caller(unsigned long caller_addr) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, caller_addr); + } +@@ -509,6 +517,7 @@ void trace_hardirqs_off_caller(unsigned + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, caller_addr); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off_caller); + +@@ -518,12 +527,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller) + #ifdef CONFIG_PREEMPT_TRACER + void trace_preempt_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 0); + if (preempt_trace() && !irq_trace()) + stop_critical_timing(a0, a1); + } + + void trace_preempt_off(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 1); + if (preempt_trace() && !irq_trace()) + start_critical_timing(a0, a1); + } diff --git a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch new file mode 100644 index 000000000..61d469861 --- /dev/null +++ b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch @@ -0,0 +1,37 @@ +From 6c1733bce7ebf560b3b8bd6acaa31cb702fc39ab Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 23 Jan 2014 14:45:59 +0100 +Subject: [PATCH 3/7] leds: trigger: disable CPU trigger on -RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +as it triggers: +|CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141 +|[] (unwind_backtrace+0x0/0xf8) from [] (show_stack+0x1c/0x20) +|[] (show_stack+0x1c/0x20) from [] (dump_stack+0x20/0x2c) +|[] (dump_stack+0x20/0x2c) from [] (__might_sleep+0x13c/0x170) +|[] (__might_sleep+0x13c/0x170) from [] (__rt_spin_lock+0x28/0x38) +|[] (__rt_spin_lock+0x28/0x38) from [] (rt_read_lock+0x68/0x7c) +|[] (rt_read_lock+0x68/0x7c) from [] (led_trigger_event+0x2c/0x5c) +|[] (led_trigger_event+0x2c/0x5c) from [] (ledtrig_cpu+0x54/0x5c) +|[] (ledtrig_cpu+0x54/0x5c) from [] (arch_cpu_idle_exit+0x18/0x1c) +|[] (arch_cpu_idle_exit+0x18/0x1c) from [] (cpu_startup_entry+0xa8/0x234) +|[] (cpu_startup_entry+0xa8/0x234) from [] (rest_init+0xb8/0xe0) +|[] (rest_init+0xb8/0xe0) from [] (start_kernel+0x2c4/0x380) + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/leds/trigger/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/leds/trigger/Kconfig ++++ b/drivers/leds/trigger/Kconfig +@@ -61,7 +61,7 @@ config LEDS_TRIGGER_BACKLIGHT + + config LEDS_TRIGGER_CPU + bool "LED CPU Trigger" +- depends on LEDS_TRIGGERS ++ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE + help + This allows LEDs to be controlled by active CPUs. This shows + the active CPUs across an array of LEDs so you can see which diff --git a/debian/patches/features/all/rt/lglocks-rt.patch b/debian/patches/features/all/rt/lglocks-rt.patch new file mode 100644 index 000000000..9601ceb14 --- /dev/null +++ b/debian/patches/features/all/rt/lglocks-rt.patch @@ -0,0 +1,176 @@ +Subject: lglocks-rt.patch +From: Thomas Gleixner +Date: Wed, 15 Jun 2011 11:02:21 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/lglock.h | 21 ++++++++++++++++-- + kernel/locking/lglock.c | 54 ++++++++++++++++++++++++++++++++---------------- + 2 files changed, 55 insertions(+), 20 deletions(-) + +--- a/include/linux/lglock.h ++++ b/include/linux/lglock.h +@@ -32,22 +32,39 @@ + #endif + + struct lglock { ++#ifndef CONFIG_PREEMPT_RT_FULL + arch_spinlock_t __percpu *lock; ++#else ++ struct rt_mutex __percpu *lock; ++#endif + #ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key lock_key; + struct lockdep_map lock_dep_map; + #endif + }; + +-#define DEFINE_LGLOCK(name) \ ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define DEFINE_LGLOCK(name) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + struct lglock name = { .lock = &name ## _lock } + +-#define DEFINE_STATIC_LGLOCK(name) \ ++# define DEFINE_STATIC_LGLOCK(name) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + static struct lglock name = { .lock = &name ## _lock } ++#else ++ ++# define DEFINE_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ ++ = __RT_MUTEX_INITIALIZER( name ## _lock); \ ++ struct lglock name = { .lock = &name ## _lock } ++ ++# define DEFINE_STATIC_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ ++ = __RT_MUTEX_INITIALIZER( name ## _lock); \ ++ static struct lglock name = { .lock = &name ## _lock } ++#endif + + void lg_lock_init(struct lglock *lg, char *name); + void lg_local_lock(struct lglock *lg); +--- a/kernel/locking/lglock.c ++++ b/kernel/locking/lglock.c +@@ -4,6 +4,15 @@ + #include + #include + ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define lg_lock_ptr arch_spinlock_t ++# define lg_do_lock(l) arch_spin_lock(l) ++# define lg_do_unlock(l) arch_spin_unlock(l) ++#else ++# define lg_lock_ptr struct rt_mutex ++# define lg_do_lock(l) __rt_spin_lock(l) ++# define lg_do_unlock(l) __rt_spin_unlock(l) ++#endif + /* + * Note there is no uninit, so lglocks cannot be defined in + * modules (but it's fine to use them from there) +@@ -12,51 +21,60 @@ + + void lg_lock_init(struct lglock *lg, char *name) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int i; ++ ++ for_each_possible_cpu(i) { ++ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i); ++ ++ rt_mutex_init(lock); ++ } ++#endif + LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); + } + EXPORT_SYMBOL(lg_lock_init); + + void lg_local_lock(struct lglock *lg) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + +- preempt_disable(); ++ migrate_disable(); + lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); + lock = this_cpu_ptr(lg->lock); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + EXPORT_SYMBOL(lg_local_lock); + + void lg_local_unlock(struct lglock *lg) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + + lock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock = this_cpu_ptr(lg->lock); +- arch_spin_unlock(lock); +- preempt_enable(); ++ lg_do_unlock(lock); ++ migrate_enable(); + } + EXPORT_SYMBOL(lg_local_unlock); + + void lg_local_lock_cpu(struct lglock *lg, int cpu) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + +- preempt_disable(); ++ preempt_disable_nort(); + lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); + lock = per_cpu_ptr(lg->lock, cpu); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + EXPORT_SYMBOL(lg_local_lock_cpu); + + void lg_local_unlock_cpu(struct lglock *lg, int cpu) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + + lock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock = per_cpu_ptr(lg->lock, cpu); +- arch_spin_unlock(lock); +- preempt_enable(); ++ lg_do_unlock(lock); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(lg_local_unlock_cpu); + +@@ -64,12 +82,12 @@ void lg_global_lock(struct lglock *lg) + { + int i; + +- preempt_disable(); ++ preempt_disable_nort(); + lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); + for_each_possible_cpu(i) { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + lock = per_cpu_ptr(lg->lock, i); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + } + EXPORT_SYMBOL(lg_global_lock); +@@ -80,10 +98,10 @@ void lg_global_unlock(struct lglock *lg) + + lock_release(&lg->lock_dep_map, 1, _RET_IP_); + for_each_possible_cpu(i) { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + lock = per_cpu_ptr(lg->lock, i); +- arch_spin_unlock(lock); ++ lg_do_unlock(lock); + } +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(lg_global_unlock); diff --git a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch new file mode 100644 index 000000000..f7571f07f --- /dev/null +++ b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch @@ -0,0 +1,115 @@ +From: Paul Gortmaker +Date: Fri, 21 Jun 2013 15:07:25 -0400 +Subject: [PATCH] list_bl.h: make list head locking RT safe +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +As per changes in include/linux/jbd_common.h for avoiding the +bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal +head lock rt safe") we do the same thing here. + +We use the non atomic __set_bit and __clear_bit inside the scope of +the lock to preserve the ability of the existing LIST_DEBUG code to +use the zero'th bit in the sanity checks. + +As a bit spinlock, we had no lockdep visibility into the usage +of the list head locking. Now, if we were to implement it as a +standard non-raw spinlock, we would see: + +BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 +in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd +5 locks held by udevd/122: + #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [] lock_rename+0xe8/0xf0 + #1: (rename_lock){+.+...}, at: [] d_move+0x2c/0x60 + #2: (&dentry->d_lock){+.+...}, at: [] dentry_lock_for_move+0xf3/0x130 + #3: (&dentry->d_lock/2){+.+...}, at: [] dentry_lock_for_move+0xc4/0x130 + #4: (&dentry->d_lock/3){+.+...}, at: [] dentry_lock_for_move+0xd7/0x130 +Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7 +Call Trace: + [] __might_sleep+0x134/0x1f0 + [] rt_spin_lock+0x24/0x60 + [] __d_shrink+0x5c/0xa0 + [] __d_drop+0x1d/0x40 + [] __d_move+0x8e/0x320 + [] d_move+0x3e/0x60 + [] vfs_rename+0x198/0x4c0 + [] sys_renameat+0x213/0x240 + [] ? _raw_spin_unlock+0x35/0x60 + [] ? do_page_fault+0x1ec/0x4b0 + [] ? retint_swapgs+0xe/0x13 + [] ? trace_hardirqs_on_thunk+0x3a/0x3f + [] sys_rename+0x1b/0x20 + [] system_call_fastpath+0x1a/0x1f + +Since we are only taking the lock during short lived list operations, +lets assume for now that it being raw won't be a significant latency +concern. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Paul Gortmaker +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/list_bl.h | 28 ++++++++++++++++++++++++++-- + 1 file changed, 26 insertions(+), 2 deletions(-) + +--- a/include/linux/list_bl.h ++++ b/include/linux/list_bl.h +@@ -2,6 +2,7 @@ + #define _LINUX_LIST_BL_H + + #include ++#include + #include + + /* +@@ -32,13 +33,22 @@ + + struct hlist_bl_head { + struct hlist_bl_node *first; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spinlock_t lock; ++#endif + }; + + struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; + }; +-#define INIT_HLIST_BL_HEAD(ptr) \ +- ((ptr)->first = NULL) ++ ++static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) ++{ ++ h->first = NULL; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spin_lock_init(&h->lock); ++#endif ++} + + static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) + { +@@ -117,12 +127,26 @@ static inline void hlist_bl_del_init(str + + static inline void hlist_bl_lock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(0, (unsigned long *)b); ++#else ++ raw_spin_lock(&b->lock); ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __set_bit(0, (unsigned long *)b); ++#endif ++#endif + } + + static inline void hlist_bl_unlock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + __bit_spin_unlock(0, (unsigned long *)b); ++#else ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __clear_bit(0, (unsigned long *)b); ++#endif ++ raw_spin_unlock(&b->lock); ++#endif + } + + static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) diff --git a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch new file mode 100644 index 000000000..b66459afc --- /dev/null +++ b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch @@ -0,0 +1,53 @@ +From: Thomas Gleixner +Date: Tue, 21 Jul 2009 22:34:14 +0200 +Subject: rt: local_irq_* variants depending on RT/!RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Add local_irq_*_(no)rt variant which are mainly used to break +interrupt disabled sections on PREEMPT_RT or to explicitely disable +interrupts on PREEMPT_RT. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/interrupt.h | 2 +- + include/linux/irqflags.h | 19 +++++++++++++++++++ + 2 files changed, 20 insertions(+), 1 deletion(-) + +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -180,7 +180,7 @@ extern void devm_free_irq(struct device + #ifdef CONFIG_LOCKDEP + # define local_irq_enable_in_hardirq() do { } while (0) + #else +-# define local_irq_enable_in_hardirq() local_irq_enable() ++# define local_irq_enable_in_hardirq() local_irq_enable_nort() + #endif + + extern void disable_irq_nosync(unsigned int irq); +--- a/include/linux/irqflags.h ++++ b/include/linux/irqflags.h +@@ -147,4 +147,23 @@ + + #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + ++/* ++ * local_irq* variants depending on RT/!RT ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define local_irq_disable_nort() do { } while (0) ++# define local_irq_enable_nort() do { } while (0) ++# define local_irq_save_nort(flags) local_save_flags(flags) ++# define local_irq_restore_nort(flags) (void)(flags) ++# define local_irq_disable_rt() local_irq_disable() ++# define local_irq_enable_rt() local_irq_enable() ++#else ++# define local_irq_disable_nort() local_irq_disable() ++# define local_irq_enable_nort() local_irq_enable() ++# define local_irq_save_nort(flags) local_irq_save(flags) ++# define local_irq_restore_nort(flags) local_irq_restore(flags) ++# define local_irq_disable_rt() do { } while (0) ++# define local_irq_enable_rt() do { } while (0) ++#endif ++ + #endif diff --git a/debian/patches/features/all/rt/local-var.patch b/debian/patches/features/all/rt/local-var.patch new file mode 100644 index 000000000..ac2791c6f --- /dev/null +++ b/debian/patches/features/all/rt/local-var.patch @@ -0,0 +1,24 @@ +Subject: local-var.patch +From: Thomas Gleixner +Date: Fri, 24 Jun 2011 18:40:37 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/percpu.h | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/include/linux/percpu.h ++++ b/include/linux/percpu.h +@@ -49,6 +49,11 @@ + preempt_enable(); \ + } while (0) + ++#define get_local_var(var) get_cpu_var(var) ++#define put_local_var(var) put_cpu_var(var) ++#define get_local_ptr(var) get_cpu_ptr(var) ++#define put_local_ptr(var) put_cpu_ptr(var) ++ + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + diff --git a/debian/patches/features/all/rt/local-vars-migrate-disable.patch b/debian/patches/features/all/rt/local-vars-migrate-disable.patch new file mode 100644 index 000000000..ef7fbeed0 --- /dev/null +++ b/debian/patches/features/all/rt/local-vars-migrate-disable.patch @@ -0,0 +1,47 @@ +Subject: local-vars-migrate-disable.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 20:42:16 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/percpu.h | 28 ++++++++++++++++++++++++---- + 1 file changed, 24 insertions(+), 4 deletions(-) + +--- a/include/linux/percpu.h ++++ b/include/linux/percpu.h +@@ -49,10 +49,30 @@ + preempt_enable(); \ + } while (0) + +-#define get_local_var(var) get_cpu_var(var) +-#define put_local_var(var) put_cpu_var(var) +-#define get_local_ptr(var) get_cpu_ptr(var) +-#define put_local_ptr(var) put_cpu_ptr(var) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define get_local_var(var) get_cpu_var(var) ++# define put_local_var(var) put_cpu_var(var) ++# define get_local_ptr(var) get_cpu_ptr(var) ++# define put_local_ptr(var) put_cpu_ptr(var) ++#else ++# define get_local_var(var) (*({ \ ++ migrate_disable(); \ ++ &__get_cpu_var(var); })) ++ ++# define put_local_var(var) do { \ ++ (void)&(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++# define get_local_ptr(var) ({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(var); }) ++ ++# define put_local_ptr(var) do { \ ++ (void)(var); \ ++ migrate_enable(); \ ++} while (0) ++#endif + + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) diff --git a/debian/patches/features/all/rt/localversion.patch b/debian/patches/features/all/rt/localversion.patch new file mode 100644 index 000000000..adbea2ae3 --- /dev/null +++ b/debian/patches/features/all/rt/localversion.patch @@ -0,0 +1,16 @@ +Subject: localversion.patch +From: Thomas Gleixner +Date: Fri, 08 Jul 2011 20:25:16 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org +--- + localversion-rt | 1 + + 1 file changed, 1 insertion(+) + +--- /dev/null ++++ b/localversion-rt +@@ -0,0 +1 @@ ++-rt1 diff --git a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch new file mode 100644 index 000000000..58f643bc1 --- /dev/null +++ b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch @@ -0,0 +1,57 @@ +Subject: lockdep-rt.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 18:51:23 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/irqflags.h | 10 +++++++--- + kernel/locking/lockdep.c | 2 ++ + 2 files changed, 9 insertions(+), 3 deletions(-) + +--- a/include/linux/irqflags.h ++++ b/include/linux/irqflags.h +@@ -25,8 +25,6 @@ + # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) + # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) + # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) + # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, + #else + # define trace_hardirqs_on() do { } while (0) +@@ -39,9 +37,15 @@ + # define trace_softirqs_enabled(p) 0 + # define trace_hardirq_enter() do { } while (0) + # define trace_hardirq_exit() do { } while (0) ++# define INIT_TRACE_IRQFLAGS ++#endif ++ ++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) ++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) ++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) ++#else + # define lockdep_softirq_enter() do { } while (0) + # define lockdep_softirq_exit() do { } while (0) +-# define INIT_TRACE_IRQFLAGS + #endif + + #if defined(CONFIG_IRQSOFF_TRACER) || \ +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -3543,6 +3543,7 @@ static void check_flags(unsigned long fl + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * We dont accurately track softirq state in e.g. + * hardirq contexts (such as on 4KSTACKS), so only +@@ -3557,6 +3558,7 @@ static void check_flags(unsigned long fl + DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); + } + } ++#endif + + if (!debug_locks) + print_irqtrace_events(current); diff --git a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch new file mode 100644 index 000000000..138008a1d --- /dev/null +++ b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch @@ -0,0 +1,57 @@ +Subject: lockdep: Selftest: Only do hardirq context test for raw spinlock +From: Yong Zhang +Date: Mon, 16 Apr 2012 15:01:56 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +From: Yong Zhang + +On -rt there is no softirq context any more and rwlock is sleepable, +disable softirq context test and rwlock+irq test. + +Signed-off-by: Yong Zhang +Cc: Yong Zhang +Link: http://lkml.kernel.org/r/1334559716-18447-3-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +--- + lib/locking-selftest.c | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +--- a/lib/locking-selftest.c ++++ b/lib/locking-selftest.c +@@ -1858,6 +1858,7 @@ void locking_selftest(void) + + printk(" --------------------------------------------------------------------------\n"); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * irq-context testcases: + */ +@@ -1870,6 +1871,28 @@ void locking_selftest(void) + + DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); + // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); ++#else ++ /* On -rt, we only do hardirq context test for raw spinlock */ ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); ++#endif + + ww_tests(); + diff --git a/debian/patches/features/all/rt/md-disable-bcache.patch b/debian/patches/features/all/rt/md-disable-bcache.patch new file mode 100644 index 000000000..10b741350 --- /dev/null +++ b/debian/patches/features/all/rt/md-disable-bcache.patch @@ -0,0 +1,36 @@ +From a94d9b765f54e3ab9d11156c7a899c71a9185f1c Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 29 Aug 2013 11:48:57 +0200 +Subject: [PATCH] md: disable bcache +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +It uses anon semaphores +|drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’: +|drivers/md/bcache/request.c:1007:2: error: implicit declaration of function ‘up_read_non_owner’ [-Werror=implicit-function-declaration] +| up_read_non_owner(&dc->writeback_lock); +| ^ +|drivers/md/bcache/request.c: In function ‘request_write’: +|drivers/md/bcache/request.c:1033:2: error: implicit declaration of function ‘down_read_non_owner’ [-Werror=implicit-function-declaration] +| down_read_non_owner(&dc->writeback_lock); +| ^ + +either we get rid of those or we have to introduce them… + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/md/bcache/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/md/bcache/Kconfig ++++ b/drivers/md/bcache/Kconfig +@@ -1,6 +1,7 @@ + + config BCACHE + tristate "Block device as cache" ++ depends on !PREEMPT_RT_FULL + ---help--- + Allows a block device to be used as cache for other devices; uses + a btree for indexing and the layout is optimized for SSDs. diff --git a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch new file mode 100644 index 000000000..a06509764 --- /dev/null +++ b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch @@ -0,0 +1,62 @@ +From: Thomas Gleixner +Date: Tue, 6 Apr 2010 16:51:31 +0200 +Subject: md: raid5: Make raid5_percpu handling RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +__raid_run_ops() disables preemption with get_cpu() around the access +to the raid5_percpu variables. That causes scheduling while atomic +spews on RT. + +Serialize the access to the percpu data with a lock and keep the code +preemptible. + +Reported-by: Udo van den Heuvel +Signed-off-by: Thomas Gleixner +Tested-by: Udo van den Heuvel + +--- + drivers/md/raid5.c | 7 +++++-- + drivers/md/raid5.h | 1 + + 2 files changed, 6 insertions(+), 2 deletions(-) + +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -1626,8 +1626,9 @@ static void raid_run_ops(struct stripe_h + struct raid5_percpu *percpu; + unsigned long cpu; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + percpu = per_cpu_ptr(conf->percpu, cpu); ++ spin_lock(&percpu->lock); + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; +@@ -1679,7 +1680,8 @@ static void raid_run_ops(struct stripe_h + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +- put_cpu(); ++ spin_unlock(&percpu->lock); ++ put_cpu_light(); + } + + static int grow_one_stripe(struct r5conf *conf, int hash) +@@ -5619,6 +5621,7 @@ static int raid5_alloc_percpu(struct r5c + __func__, cpu); + break; + } ++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); + } + put_online_cpus(); + +--- a/drivers/md/raid5.h ++++ b/drivers/md/raid5.h +@@ -456,6 +456,7 @@ struct r5conf { + int recovery_disabled; + /* per cpu variables */ + struct raid5_percpu { ++ spinlock_t lock; /* Protection for -RT */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + void *scribble; /* space for constructing buffer + * lists and performing address diff --git a/debian/patches/features/all/rt/migrate-disable-rt-variant.patch b/debian/patches/features/all/rt/migrate-disable-rt-variant.patch new file mode 100644 index 000000000..f6c5e407c --- /dev/null +++ b/debian/patches/features/all/rt/migrate-disable-rt-variant.patch @@ -0,0 +1,28 @@ +Subject: migrate-disable-rt-variant.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 19:48:20 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/preempt.h | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -161,11 +161,15 @@ extern void migrate_enable(void); + # define preempt_enable_rt() preempt_enable() + # define preempt_disable_nort() barrier() + # define preempt_enable_nort() barrier() ++# define migrate_disable_rt() migrate_disable() ++# define migrate_enable_rt() migrate_enable() + #else + # define preempt_disable_rt() barrier() + # define preempt_enable_rt() barrier() + # define preempt_disable_nort() preempt_disable() + # define preempt_enable_nort() preempt_enable() ++# define migrate_disable_rt() barrier() ++# define migrate_enable_rt() barrier() + #endif + + #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/debian/patches/features/all/rt/migrate_disable-pushd-down-in-atomic_dec_and_spin_lo.patch b/debian/patches/features/all/rt/migrate_disable-pushd-down-in-atomic_dec_and_spin_lo.patch new file mode 100644 index 000000000..6ea5e94b9 --- /dev/null +++ b/debian/patches/features/all/rt/migrate_disable-pushd-down-in-atomic_dec_and_spin_lo.patch @@ -0,0 +1,30 @@ +From cf189cc2685cbe22602cac460a75debb781e5cdb Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Fri, 29 Nov 2013 00:19:41 -0500 +Subject: [PATCH] migrate_disable pushd down in atomic_dec_and_spin_lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/locking/rtmutex.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1028,12 +1028,12 @@ int atomic_dec_and_spin_lock(atomic_t *a + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; +- migrate_disable(); + rt_spin_lock(lock); +- if (atomic_dec_and_test(atomic)) ++ if (atomic_dec_and_test(atomic)){ ++ migrate_disable(); + return 1; ++ } + rt_spin_unlock(lock); +- migrate_enable(); + return 0; + } + EXPORT_SYMBOL(atomic_dec_and_spin_lock); diff --git a/debian/patches/features/all/rt/migrate_disable-pushd-down-in-rt_spin_trylock_irqsav.patch b/debian/patches/features/all/rt/migrate_disable-pushd-down-in-rt_spin_trylock_irqsav.patch new file mode 100644 index 000000000..c38f52470 --- /dev/null +++ b/debian/patches/features/all/rt/migrate_disable-pushd-down-in-rt_spin_trylock_irqsav.patch @@ -0,0 +1,30 @@ +From 12e7b0fcf6bfc4035cec0a9ec0f30aaf3b3fe905 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Fri, 29 Nov 2013 00:17:27 -0500 +Subject: [PATCH] migrate_disable pushd down in rt_spin_trylock_irqsave +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/locking/rtmutex.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1013,12 +1013,11 @@ int __lockfunc rt_spin_trylock_irqsave(s + int ret; + + *flags = 0; +- migrate_disable(); + ret = rt_mutex_trylock(&lock->lock); +- if (ret) ++ if (ret) { ++ migrate_disable(); + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); +- else +- migrate_enable(); ++ } + return ret; + } + EXPORT_SYMBOL(rt_spin_trylock_irqsave); diff --git a/debian/patches/features/all/rt/migrate_disable-pushd-down-in-rt_write_trylock_irqsa.patch b/debian/patches/features/all/rt/migrate_disable-pushd-down-in-rt_write_trylock_irqsa.patch new file mode 100644 index 000000000..47246a57e --- /dev/null +++ b/debian/patches/features/all/rt/migrate_disable-pushd-down-in-rt_write_trylock_irqsa.patch @@ -0,0 +1,27 @@ +From 1924bc91c8cf91fc4c94047a4b9985cf6b9e31f1 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Fri, 29 Nov 2013 00:21:59 -0500 +Subject: [PATCH] migrate_disable pushd down in rt_write_trylock_irqsave +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/locking/rt.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +--- a/kernel/locking/rt.c ++++ b/kernel/locking/rt.c +@@ -196,10 +196,9 @@ int __lockfunc rt_write_trylock_irqsave( + int ret; + + *flags = 0; +- migrate_disable(); + ret = rt_write_trylock(rwlock); +- if (!ret) +- migrate_enable(); ++ if (ret) ++ migrate_disable(); + return ret; + } + EXPORT_SYMBOL(rt_write_trylock_irqsave); diff --git a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch new file mode 100644 index 000000000..51bae3332 --- /dev/null +++ b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch @@ -0,0 +1,21 @@ +Subject: mips-disable-highmem-on-rt.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 17:10:12 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/mips/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -2094,7 +2094,7 @@ config CPU_R4400_WORKAROUNDS + # + config HIGHMEM + bool "High Memory Support" +- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM ++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL + + config CPU_SUPPORTS_HIGHMEM + bool diff --git a/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch b/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch new file mode 100644 index 000000000..560fdc05d --- /dev/null +++ b/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch @@ -0,0 +1,20 @@ +Subject: mips-enable-interrupts-in-signal.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 21:32:10 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/mips/kernel/signal.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/mips/kernel/signal.c ++++ b/arch/mips/kernel/signal.c +@@ -575,6 +575,7 @@ asmlinkage void do_notify_resume(struct + __u32 thread_info_flags) + { + local_irq_enable(); ++ preempt_check_resched(); + + user_exit(); + diff --git a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch new file mode 100644 index 000000000..632ddae33 --- /dev/null +++ b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch @@ -0,0 +1,28 @@ +Subject: mm: bounce: Use local_irq_save_nort +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 10:33:09 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +kmap_atomic() is preemptible on RT. + +Signed-off-by: Thomas Gleixner +--- + mm/bounce.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/mm/bounce.c ++++ b/mm/bounce.c +@@ -51,11 +51,11 @@ static void bounce_copy_vec(struct bio_v + unsigned long flags; + unsigned char *vto; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + vto = kmap_atomic(to->bv_page); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + #else /* CONFIG_HIGHMEM */ diff --git a/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch b/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch new file mode 100644 index 000000000..d54d86101 --- /dev/null +++ b/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch @@ -0,0 +1,92 @@ +From: Thomas Gleixner +Date: Wed, 19 Aug 2009 09:56:42 +0200 +Subject: mm: Replace cgroup_page bit spinlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Bit spinlocks are not working on RT. Replace them. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/page_cgroup.h | 15 +++++++++++++++ + mm/page_cgroup.c | 11 +++++++++++ + 2 files changed, 26 insertions(+) + +--- a/include/linux/page_cgroup.h ++++ b/include/linux/page_cgroup.h +@@ -24,6 +24,9 @@ enum { + */ + struct page_cgroup { + unsigned long flags; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t pcg_lock; ++#endif + struct mem_cgroup *mem_cgroup; + }; + +@@ -74,12 +77,20 @@ static inline void lock_page_cgroup(stru + * Don't take this lock in IRQ context. + * This lock is for pc->mem_cgroup, USED, MIGRATION + */ ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(PCG_LOCK, &pc->flags); ++#else ++ spin_lock(&pc->pcg_lock); ++#endif + } + + static inline void unlock_page_cgroup(struct page_cgroup *pc) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(PCG_LOCK, &pc->flags); ++#else ++ spin_unlock(&pc->pcg_lock); ++#endif + } + + #else /* CONFIG_MEMCG */ +@@ -102,6 +113,10 @@ static inline void __init page_cgroup_in + { + } + ++static inline void page_cgroup_lock_init(struct page_cgroup *pc) ++{ ++} ++ + #endif /* CONFIG_MEMCG */ + + #include +--- a/mm/page_cgroup.c ++++ b/mm/page_cgroup.c +@@ -13,6 +13,14 @@ + + static unsigned long total_usage; + ++static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ for (; nr_pages; nr_pages--, pc++) ++ spin_lock_init(&pc->pcg_lock); ++#endif ++} ++ + #if !defined(CONFIG_SPARSEMEM) + + +@@ -61,6 +69,7 @@ static int __init alloc_node_page_cgroup + return -ENOMEM; + NODE_DATA(nid)->node_page_cgroup = base; + total_usage += table_size; ++ page_cgroup_lock_init(base, nr_pages); + return 0; + } + +@@ -151,6 +160,8 @@ static int __meminit init_section_page_c + return -ENOMEM; + } + ++ page_cgroup_lock_init(base, PAGES_PER_SECTION); ++ + /* + * The passed "pfn" may not be aligned to SECTION. For the calculation + * we need to apply a mask. diff --git a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch new file mode 100644 index 000000000..a67640446 --- /dev/null +++ b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch @@ -0,0 +1,132 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:51 -0500 +Subject: mm: convert swap to percpu locked +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + mm/swap.c | 34 ++++++++++++++++++++-------------- + 1 file changed, 20 insertions(+), 14 deletions(-) + +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -44,6 +45,9 @@ static DEFINE_PER_CPU(struct pagevec, lr + static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); + static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); + ++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); ++static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); ++ + /* + * This path almost never happens for VM activity - pages are normally + * freed via pagevecs. But it gets used by networking. +@@ -440,11 +444,11 @@ void rotate_reclaimable_page(struct page + unsigned long flags; + + page_cache_get(page); +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pvec = &__get_cpu_var(lru_rotate_pvecs); + if (!pagevec_add(pvec, page)) + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + } + +@@ -495,12 +499,13 @@ static bool need_activate_page_drain(int + void activate_page(struct page *page) + { + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ activate_page_pvecs); + + page_cache_get(page); + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); +- put_cpu_var(activate_page_pvecs); ++ put_locked_var(swapvec_lock, activate_page_pvecs); + } + } + +@@ -526,7 +531,7 @@ void activate_page(struct page *page) + + static void __lru_cache_activate_page(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + int i; + + /* +@@ -548,7 +553,7 @@ static void __lru_cache_activate_page(st + } + } + +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /* +@@ -588,13 +593,13 @@ EXPORT_SYMBOL(mark_page_accessed); + */ + void __lru_cache_add(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + + page_cache_get(page); + if (!pagevec_space(pvec)) + __pagevec_lru_add(pvec); + pagevec_add(pvec, page); +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + EXPORT_SYMBOL(__lru_cache_add); + +@@ -717,9 +722,9 @@ void lru_add_drain_cpu(int cpu) + unsigned long flags; + + /* No harm done if a racing interrupt already did this */ +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + + pvec = &per_cpu(lru_deactivate_pvecs, cpu); +@@ -747,18 +752,19 @@ void deactivate_page(struct page *page) + return; + + if (likely(get_page_unless_zero(page))) { +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_deactivate_pvecs); + + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); +- put_cpu_var(lru_deactivate_pvecs); ++ put_locked_var(swapvec_lock, lru_deactivate_pvecs); + } + } + + void lru_add_drain(void) + { +- lru_add_drain_cpu(get_cpu()); +- put_cpu(); ++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); ++ local_unlock_cpu(swapvec_lock); + } + + static void lru_add_drain_per_cpu(struct work_struct *dummy) diff --git a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch new file mode 100644 index 000000000..0f5461cff --- /dev/null +++ b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch @@ -0,0 +1,30 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:44:03 -0500 +Subject: mm: Allow only slub on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + init/Kconfig | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1556,6 +1556,7 @@ choice + + config SLAB + bool "SLAB" ++ depends on !PREEMPT_RT_FULL + help + The regular slab allocator that is established and known to work + well in all environments. It organizes cache hot objects in +@@ -1574,6 +1575,7 @@ config SLUB + config SLOB + depends on EXPERT + bool "SLOB (Simple Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLOB replaces the stock allocator with a drastically simpler + allocator. SLOB is generally more space efficient but diff --git a/debian/patches/features/all/rt/mm-enable-slub.patch b/debian/patches/features/all/rt/mm-enable-slub.patch new file mode 100644 index 000000000..c8fa81777 --- /dev/null +++ b/debian/patches/features/all/rt/mm-enable-slub.patch @@ -0,0 +1,395 @@ +Subject: mm: Enable SLUB for RT +From: Thomas Gleixner +Date: Thu, 25 Oct 2012 10:32:35 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Make SLUB RT aware and remove the restriction in Kconfig. + +Signed-off-by: Thomas Gleixner +--- + mm/slab.h | 4 ++ + mm/slub.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++--------------- + 2 files changed, 95 insertions(+), 28 deletions(-) + +--- a/mm/slab.h ++++ b/mm/slab.h +@@ -273,7 +273,11 @@ static inline struct kmem_cache *cache_f + * The slab lists for all objects. + */ + struct kmem_cache_node { ++#ifdef CONFIG_SLUB ++ raw_spinlock_t list_lock; ++#else + spinlock_t list_lock; ++#endif + + #ifdef CONFIG_SLAB + struct list_head slabs_partial; /* partial list first, better asm code */ +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1109,7 +1109,7 @@ static noinline struct kmem_cache_node * + { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + +- spin_lock_irqsave(&n->list_lock, *flags); ++ raw_spin_lock_irqsave(&n->list_lock, *flags); + slab_lock(page); + + if (!check_slab(s, page)) +@@ -1157,7 +1157,7 @@ static noinline struct kmem_cache_node * + + fail: + slab_unlock(page); +- spin_unlock_irqrestore(&n->list_lock, *flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, *flags); + slab_fix(s, "Object at 0x%p not freed", object); + return NULL; + } +@@ -1310,6 +1310,12 @@ static inline void slab_free_hook(struct + + #endif /* CONFIG_SLUB_DEBUG */ + ++struct slub_free_list { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); ++ + /* + * Slab allocation and freeing + */ +@@ -1334,7 +1340,11 @@ static struct page *allocate_slab(struct + + flags &= gfp_allowed_mask; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (system_state == SYSTEM_RUNNING) ++#else + if (flags & __GFP_WAIT) ++#endif + local_irq_enable(); + + flags |= s->allocflags; +@@ -1374,7 +1384,11 @@ static struct page *allocate_slab(struct + kmemcheck_mark_unallocated_pages(page, pages); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (system_state == SYSTEM_RUNNING) ++#else + if (flags & __GFP_WAIT) ++#endif + local_irq_disable(); + if (!page) + return NULL; +@@ -1471,6 +1485,16 @@ static void __free_slab(struct kmem_cach + __free_memcg_kmem_pages(page, order); + } + ++static void free_delayed(struct list_head *h) ++{ ++ while(!list_empty(h)) { ++ struct page *page = list_first_entry(h, struct page, lru); ++ ++ list_del(&page->lru); ++ __free_slab(page->slab_cache, page); ++ } ++} ++ + #define need_reserve_slab_rcu \ + (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) + +@@ -1505,6 +1529,12 @@ static void free_slab(struct kmem_cache + } + + call_rcu(head, rcu_free_slab); ++ } else if (irqs_disabled()) { ++ struct slub_free_list *f = &__get_cpu_var(slub_free_list); ++ ++ raw_spin_lock(&f->lock); ++ list_add(&page->lru, &f->list); ++ raw_spin_unlock(&f->lock); + } else + __free_slab(s, page); + } +@@ -1618,7 +1648,7 @@ static void *get_partial_node(struct kme + if (!n || !n->nr_partial) + return NULL; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + list_for_each_entry_safe(page, page2, &n->partial, lru) { + void *t; + +@@ -1643,7 +1673,7 @@ static void *get_partial_node(struct kme + break; + + } +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + return object; + } + +@@ -1886,7 +1916,7 @@ static void deactivate_slab(struct kmem_ + * that acquire_slab() will see a slab page that + * is frozen + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } else { + m = M_FULL; +@@ -1897,7 +1927,7 @@ static void deactivate_slab(struct kmem_ + * slabs from diagnostic functions will not see + * any frozen slabs. + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } + +@@ -1932,7 +1962,7 @@ static void deactivate_slab(struct kmem_ + goto redo; + + if (lock) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + if (m == M_FREE) { + stat(s, DEACTIVATE_EMPTY); +@@ -1964,10 +1994,10 @@ static void unfreeze_partials(struct kme + n2 = get_node(s, page_to_nid(page)); + if (n != n2) { + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + n = n2; +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + + do { +@@ -1996,7 +2026,7 @@ static void unfreeze_partials(struct kme + } + + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + while (discard_page) { + page = discard_page; +@@ -2034,14 +2064,21 @@ static void put_cpu_partial(struct kmem_ + pobjects = oldpage->pobjects; + pages = oldpage->pages; + if (drain && pobjects > s->cpu_partial) { ++ struct slub_free_list *f; + unsigned long flags; ++ LIST_HEAD(tofree); + /* + * partial array is full. Move the existing + * set to the per node partial list. + */ + local_irq_save(flags); + unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); ++ f = &__get_cpu_var(slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock(&f->lock); + local_irq_restore(flags); ++ free_delayed(&tofree); + oldpage = NULL; + pobjects = 0; + pages = 0; +@@ -2105,7 +2142,22 @@ static bool has_cpu_slab(int cpu, void * + + static void flush_all(struct kmem_cache *s) + { ++ LIST_HEAD(tofree); ++ int cpu; ++ + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); ++ for_each_online_cpu(cpu) { ++ struct slub_free_list *f; ++ ++ if (!has_cpu_slab(cpu, s)) ++ continue; ++ ++ f = &per_cpu(slub_free_list, cpu); ++ raw_spin_lock_irq(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock_irq(&f->lock); ++ free_delayed(&tofree); ++ } + } + + /* +@@ -2133,10 +2185,10 @@ static unsigned long count_partial(struc + unsigned long x = 0; + struct page *page; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += get_count(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return x; + } + +@@ -2279,9 +2331,11 @@ static inline void *get_freelist(struct + static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + unsigned long addr, struct kmem_cache_cpu *c) + { ++ struct slub_free_list *f; + void *freelist; + struct page *page; + unsigned long flags; ++ LIST_HEAD(tofree); + + local_irq_save(flags); + #ifdef CONFIG_PREEMPT +@@ -2344,7 +2398,13 @@ static void *__slab_alloc(struct kmem_ca + VM_BUG_ON(!c->page->frozen); + c->freelist = get_freepointer(s, freelist); + c->tid = next_tid(c->tid); ++out: ++ f = &__get_cpu_var(slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock(&f->lock); + local_irq_restore(flags); ++ free_delayed(&tofree); + return freelist; + + new_slab: +@@ -2362,9 +2422,7 @@ static void *__slab_alloc(struct kmem_ca + if (unlikely(!freelist)) { + if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) + slab_out_of_memory(s, gfpflags, node); +- +- local_irq_restore(flags); +- return NULL; ++ goto out; + } + + page = c->page; +@@ -2379,8 +2437,7 @@ static void *__slab_alloc(struct kmem_ca + deactivate_slab(s, page, get_freepointer(s, freelist)); + c->page = NULL; + c->freelist = NULL; +- local_irq_restore(flags); +- return freelist; ++ goto out; + } + + /* +@@ -2552,7 +2609,7 @@ static void __slab_free(struct kmem_cach + + do { + if (unlikely(n)) { +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } + prior = page->freelist; +@@ -2584,7 +2641,7 @@ static void __slab_free(struct kmem_cach + * Otherwise the list_lock will synchronize with + * other processors updating the list of slabs. + */ +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + } + } +@@ -2626,7 +2683,7 @@ static void __slab_free(struct kmem_cach + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); + } +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return; + + slab_empty: +@@ -2641,7 +2698,7 @@ static void __slab_free(struct kmem_cach + remove_full(s, n, page); + } + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + stat(s, FREE_SLAB); + discard_slab(s, page); + } +@@ -2843,7 +2900,7 @@ static void + init_kmem_cache_node(struct kmem_cache_node *n) + { + n->nr_partial = 0; +- spin_lock_init(&n->list_lock); ++ raw_spin_lock_init(&n->list_lock); + INIT_LIST_HEAD(&n->partial); + #ifdef CONFIG_SLUB_DEBUG + atomic_long_set(&n->nr_slabs, 0); +@@ -3433,7 +3490,7 @@ int kmem_cache_shrink(struct kmem_cache + for (i = 0; i < objects; i++) + INIT_LIST_HEAD(slabs_by_inuse + i); + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + /* + * Build lists indexed by the items in use in each slab. +@@ -3454,7 +3511,7 @@ int kmem_cache_shrink(struct kmem_cache + for (i = objects - 1; i > 0; i--) + list_splice(slabs_by_inuse + i, n->partial.prev); + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + + /* Release empty slabs */ + list_for_each_entry_safe(page, t, slabs_by_inuse, lru) +@@ -3630,6 +3687,12 @@ void __init kmem_cache_init(void) + { + static __initdata struct kmem_cache boot_kmem_cache, + boot_kmem_cache_node; ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); ++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); ++ } + + if (debug_guardpage_minorder()) + slub_max_order = 0; +@@ -3934,7 +3997,7 @@ static int validate_slab_node(struct kme + struct page *page; + unsigned long flags; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + list_for_each_entry(page, &n->partial, lru) { + validate_slab_slab(s, page, map); +@@ -3957,7 +4020,7 @@ static int validate_slab_node(struct kme + atomic_long_read(&n->nr_slabs)); + + out: +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return count; + } + +@@ -4147,12 +4210,12 @@ static int list_locations(struct kmem_ca + if (!atomic_long_read(&n->nr_slabs)) + continue; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + process_slab(&t, s, page, alloc, map); + list_for_each_entry(page, &n->full, lru) + process_slab(&t, s, page, alloc, map); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + } + + for (i = 0; i < t.count; i++) { diff --git a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch new file mode 100644 index 000000000..9d947fdd8 --- /dev/null +++ b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch @@ -0,0 +1,85 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:13 -0500 +Subject: [PATCH] mm: make vmstat -rt aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/linux/vmstat.h | 4 ++++ + mm/vmstat.c | 6 ++++++ + 2 files changed, 10 insertions(+) + +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h +@@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, v + + static inline void __count_vm_event(enum vm_event_item item) + { ++ preempt_disable_rt(); + __this_cpu_inc(vm_event_states.event[item]); ++ preempt_enable_rt(); + } + + static inline void count_vm_event(enum vm_event_item item) +@@ -39,7 +41,9 @@ static inline void count_vm_event(enum v + + static inline void __count_vm_events(enum vm_event_item item, long delta) + { ++ preempt_disable_rt(); + __this_cpu_add(vm_event_states.event[item], delta); ++ preempt_enable_rt(); + } + + static inline void count_vm_events(enum vm_event_item item, long delta) +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -217,6 +217,7 @@ void __mod_zone_page_state(struct zone * + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -226,6 +227,7 @@ void __mod_zone_page_state(struct zone * + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_zone_page_state); + +@@ -258,6 +260,7 @@ void __inc_zone_state(struct zone *zone, + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -266,6 +269,7 @@ void __inc_zone_state(struct zone *zone, + zone_page_state_add(v + overstep, zone, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +@@ -280,6 +284,7 @@ void __dec_zone_state(struct zone *zone, + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -288,6 +293,7 @@ void __dec_zone_state(struct zone *zone, + zone_page_state_add(v - overstep, zone, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_zone_page_state(struct page *page, enum zone_stat_item item) diff --git a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch new file mode 100644 index 000000000..3fd868eec --- /dev/null +++ b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch @@ -0,0 +1,70 @@ +From: Yang Shi +Subject: [V3 PATCH] mm/memcontrol: Don't call schedule_work_on in preemption disabled context +Date: Wed, 30 Oct 2013 11:48:33 -0700 +Message-ID: <1383158913-16325-1-git-send-email-yang.shi@windriver.com> +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The following trace is triggered when running ltp oom test cases: + +BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 +in_atomic(): 1, irqs_disabled(): 0, pid: 17188, name: oom03 +Preemption disabled at:[] mem_cgroup_reclaim+0x90/0xe0 + +CPU: 2 PID: 17188 Comm: oom03 Not tainted 3.10.10-rt3 #2 +Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 +ffff88007684d730 ffff880070df9b58 ffffffff8169918d ffff880070df9b70 +ffffffff8106db31 ffff88007688b4a0 ffff880070df9b88 ffffffff8169d9c0 +ffff88007688b4a0 ffff880070df9bc8 ffffffff81059da1 0000000170df9bb0 +Call Trace: +[] dump_stack+0x19/0x1b +[] __might_sleep+0xf1/0x170 +[] rt_spin_lock+0x20/0x50 +[] queue_work_on+0x61/0x100 +[] drain_all_stock+0xe1/0x1c0 +[] mem_cgroup_reclaim+0x90/0xe0 +[] __mem_cgroup_try_charge+0x41a/0xc40 +[] ? release_pages+0x1b1/0x1f0 +[] ? sched_exec+0x40/0xb0 +[] mem_cgroup_charge_common+0x37/0x70 +[] mem_cgroup_newpage_charge+0x26/0x30 +[] handle_pte_fault+0x618/0x840 +[] ? unpin_current_cpu+0x16/0x70 +[] ? migrate_enable+0xd4/0x200 +[] handle_mm_fault+0x145/0x1e0 +[] __do_page_fault+0x1a1/0x4c0 +[] ? preempt_schedule_irq+0x4b/0x70 +[] ? retint_kernel+0x37/0x40 +[] do_page_fault+0xe/0x10 +[] page_fault+0x22/0x30 + +So, to prevent schedule_work_on from being called in preempt disabled context, +replace the pair of get/put_cpu() to get/put_cpu_light(). + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Yang Shi +Signed-off-by: Sebastian Andrzej Siewior +--- + + mm/memcontrol.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -2518,7 +2518,7 @@ static void drain_all_stock(struct mem_c + + /* Notify other cpus that system-wide "drain" is running */ + get_online_cpus(); +- curcpu = get_cpu(); ++ curcpu = get_cpu_light(); + for_each_online_cpu(cpu) { + struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); + struct mem_cgroup *memcg; +@@ -2535,7 +2535,7 @@ static void drain_all_stock(struct mem_c + schedule_work_on(cpu, &stock->work); + } + } +- put_cpu(); ++ put_cpu_light(); + + if (!sync) + goto out; diff --git a/debian/patches/features/all/rt/mm-page-alloc-fix.patch b/debian/patches/features/all/rt/mm-page-alloc-fix.patch new file mode 100644 index 000000000..ed3ed80bc --- /dev/null +++ b/debian/patches/features/all/rt/mm-page-alloc-fix.patch @@ -0,0 +1,23 @@ +Subject: mm-page-alloc-fix.patch +From: Thomas Gleixner +Date: Thu, 21 Jul 2011 16:47:49 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + mm/page_alloc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -2328,8 +2328,8 @@ static struct page * + struct page *page; + + /* Page migration frees to the PCP lists but we want merging */ +- drain_pages(get_cpu()); +- put_cpu(); ++ drain_pages(get_cpu_light()); ++ put_cpu_light(); + + page = get_page_from_freelist(gfp_mask, nodemask, + order, zonelist, high_zoneidx, diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch b/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch new file mode 100644 index 000000000..f4c59726e --- /dev/null +++ b/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch @@ -0,0 +1,21 @@ +Subject: mm-page-alloc-use-list-last-entry.patch +From: Peter Zijlstra +Date: Tue, 21 Jun 2011 11:24:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + mm/page_alloc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -694,7 +694,7 @@ static void free_pcppages_bulk(struct zo + do { + int mt; /* migratetype of the to-be-freed page */ + +- page = list_entry(list->prev, struct page, lru); ++ page = list_last_entry(list, struct page, lru); + /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); + mt = get_freepage_migratetype(page); diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch new file mode 100644 index 000000000..642ac62a1 --- /dev/null +++ b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch @@ -0,0 +1,28 @@ +Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock +From: Thomas Gleixner +Date: Thu, 27 Sep 2012 11:11:46 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The plain spinlock while sufficient does not update the local_lock +internals. Use a proper local_lock function instead to ease debugging. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + mm/page_alloc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -235,9 +235,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + + #ifdef CONFIG_PREEMPT_RT_BASE + # define cpu_lock_irqsave(cpu, flags) \ +- spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) ++ local_lock_irqsave_on(pa_lock, flags, cpu) + # define cpu_unlock_irqrestore(cpu, flags) \ +- spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) ++ local_unlock_irqrestore_on(pa_lock, flags, cpu) + #else + # define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) + # define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) diff --git a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch new file mode 100644 index 000000000..0273649c3 --- /dev/null +++ b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch @@ -0,0 +1,189 @@ +From: Peter Zijlstra +Date: Fri, 3 Jul 2009 08:44:37 -0500 +Subject: mm: page_alloc reduce lock sections further +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Split out the pages which are to be freed into a separate list and +call free_pages_bulk() outside of the percpu page allocator locks. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner +--- + mm/page_alloc.c | 86 +++++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 61 insertions(+), 25 deletions(-) + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -662,7 +662,7 @@ static inline int free_pages_check(struc + } + + /* +- * Frees a number of pages from the PCP lists ++ * Frees a number of pages which have been collected from the pcp lists. + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + * +@@ -673,15 +673,49 @@ static inline int free_pages_check(struc + * pinned" detection logic. + */ + static void free_pcppages_bulk(struct zone *zone, int count, +- struct per_cpu_pages *pcp) ++ struct list_head *list) + { +- int migratetype = 0; +- int batch_free = 0; + int to_free = count; ++ unsigned long flags; + +- spin_lock(&zone->lock); ++ spin_lock_irqsave(&zone->lock, flags); + zone->pages_scanned = 0; + ++ while (!list_empty(list)) { ++ struct page *page = list_first_entry(list, struct page, lru); ++ int mt; /* migratetype of the to-be-freed page */ ++ ++ /* must delete as __free_one_page list manipulates */ ++ list_del(&page->lru); ++ ++ mt = get_freepage_migratetype(page); ++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ ++ __free_one_page(page, zone, 0, mt); ++ trace_mm_page_pcpu_drain(page, 0, mt); ++ if (likely(!is_migrate_isolate_page(page))) { ++ __mod_zone_page_state(zone, NR_FREE_PAGES, 1); ++ if (is_migrate_cma(mt)) ++ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); ++ } ++ ++ to_free--; ++ } ++ WARN_ON(to_free != 0); ++ spin_unlock_irqrestore(&zone->lock, flags); ++} ++ ++/* ++ * Moves a number of pages from the PCP lists to free list which ++ * is freed outside of the locked region. ++ * ++ * Assumes all pages on list are in same zone, and of same order. ++ * count is the number of pages to free. ++ */ ++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, ++ struct list_head *dst) ++{ ++ int migratetype = 0, batch_free = 0; ++ + while (to_free) { + struct page *page; + struct list_head *list; +@@ -697,7 +731,7 @@ static void free_pcppages_bulk(struct zo + batch_free++; + if (++migratetype == MIGRATE_PCPTYPES) + migratetype = 0; +- list = &pcp->lists[migratetype]; ++ list = &src->lists[migratetype]; + } while (list_empty(list)); + + /* This is the only non-empty list. Free them all. */ +@@ -705,35 +739,25 @@ static void free_pcppages_bulk(struct zo + batch_free = to_free; + + do { +- int mt; /* migratetype of the to-be-freed page */ +- + page = list_last_entry(list, struct page, lru); +- /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); +- mt = get_freepage_migratetype(page); +- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ +- __free_one_page(page, zone, 0, mt); +- trace_mm_page_pcpu_drain(page, 0, mt); +- if (likely(!is_migrate_isolate_page(page))) { +- __mod_zone_page_state(zone, NR_FREE_PAGES, 1); +- if (is_migrate_cma(mt)) +- __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); +- } ++ list_add(&page->lru, dst); + } while (--to_free && --batch_free && !list_empty(list)); + } +- spin_unlock(&zone->lock); + } + + static void free_one_page(struct zone *zone, struct page *page, int order, + int migratetype) + { +- spin_lock(&zone->lock); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&zone->lock, flags); + zone->pages_scanned = 0; + + __free_one_page(page, zone, order, migratetype); + if (unlikely(!is_migrate_isolate(migratetype))) + __mod_zone_freepage_state(zone, 1 << order, migratetype); +- spin_unlock(&zone->lock); ++ spin_unlock_irqrestore(&zone->lock, flags); + } + + static bool free_pages_prepare(struct page *page, unsigned int order) +@@ -1236,6 +1260,7 @@ static int rmqueue_bulk(struct zone *zon + void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + { + unsigned long flags; ++ LIST_HEAD(dst); + int to_drain; + unsigned long batch; + +@@ -1246,10 +1271,11 @@ void drain_zone_pages(struct zone *zone, + else + to_drain = pcp->count; + if (to_drain > 0) { +- free_pcppages_bulk(zone, to_drain, pcp); ++ isolate_pcp_pages(to_drain, pcp, &dst); + pcp->count -= to_drain; + } + local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, to_drain, &dst); + } + static bool gfp_thisnode_allocation(gfp_t gfp_mask) + { +@@ -1277,16 +1303,21 @@ static void drain_pages(unsigned int cpu + for_each_populated_zone(zone) { + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; ++ LIST_HEAD(dst); ++ int count; + + cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +- if (pcp->count) { +- free_pcppages_bulk(zone, pcp->count, pcp); ++ count = pcp->count; ++ if (count) { ++ isolate_pcp_pages(count, pcp, &dst); + pcp->count = 0; + } + cpu_unlock_irqrestore(cpu, flags); ++ if (count) ++ free_pcppages_bulk(zone, count, &dst); + } + } + +@@ -1425,8 +1456,13 @@ void free_hot_cold_page(struct page *pag + pcp->count++; + if (pcp->count >= pcp->high) { + unsigned long batch = ACCESS_ONCE(pcp->batch); +- free_pcppages_bulk(zone, batch, pcp); ++ LIST_HEAD(dst); ++ ++ isolate_pcp_pages(batch, pcp, &dst); + pcp->count -= batch; ++ local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, batch, &dst); ++ return; + } + + out: diff --git a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch new file mode 100644 index 000000000..3cce939d7 --- /dev/null +++ b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -0,0 +1,202 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:37 -0500 +Subject: mm: page_alloc: rt-friendly per-cpu pages +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +rt-friendly per-cpu pages: convert the irqs-off per-cpu locking +method into a preemptible, explicit-per-cpu-locks method. + +Contains fixes from: + Peter Zijlstra + Thomas Gleixner + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + mm/page_alloc.c | 53 +++++++++++++++++++++++++++++++++++++---------------- + 1 file changed, 37 insertions(+), 16 deletions(-) + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -61,6 +61,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -230,6 +231,18 @@ EXPORT_SYMBOL(nr_node_ids); + EXPORT_SYMBOL(nr_online_nodes); + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(pa_lock); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define cpu_lock_irqsave(cpu, flags) \ ++ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) ++# define cpu_unlock_irqrestore(cpu, flags) \ ++ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) ++#else ++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) ++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) ++#endif ++ + int page_group_by_mobility_disabled __read_mostly; + + void set_pageblock_migratetype(struct page *page, int migratetype) +@@ -758,12 +771,12 @@ static void __free_pages_ok(struct page + if (!free_pages_prepare(page, order)) + return; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_events(PGFREE, 1 << order); + migratetype = get_pageblock_migratetype(page); + set_freepage_migratetype(page, migratetype); + free_one_page(page_zone(page), page, order, migratetype); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + void __init __free_pages_bootmem(struct page *page, unsigned int order) +@@ -1226,7 +1239,7 @@ void drain_zone_pages(struct zone *zone, + int to_drain; + unsigned long batch; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + batch = ACCESS_ONCE(pcp->batch); + if (pcp->count >= batch) + to_drain = batch; +@@ -1236,7 +1249,7 @@ void drain_zone_pages(struct zone *zone, + free_pcppages_bulk(zone, to_drain, pcp); + pcp->count -= to_drain; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + static bool gfp_thisnode_allocation(gfp_t gfp_mask) + { +@@ -1265,7 +1278,7 @@ static void drain_pages(unsigned int cpu + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; + +- local_irq_save(flags); ++ cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +@@ -1273,7 +1286,7 @@ static void drain_pages(unsigned int cpu + free_pcppages_bulk(zone, pcp->count, pcp); + pcp->count = 0; + } +- local_irq_restore(flags); ++ cpu_unlock_irqrestore(cpu, flags); + } + } + +@@ -1326,7 +1339,12 @@ void drain_all_pages(void) + else + cpumask_clear_cpu(cpu, &cpus_with_pcps); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); ++#else ++ for_each_cpu(cpu, &cpus_with_pcps) ++ drain_pages(cpu); ++#endif + } + + #ifdef CONFIG_HIBERNATION +@@ -1381,7 +1399,7 @@ void free_hot_cold_page(struct page *pag + + migratetype = get_pageblock_migratetype(page); + set_freepage_migratetype(page, migratetype); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_event(PGFREE); + + /* +@@ -1412,7 +1430,7 @@ void free_hot_cold_page(struct page *pag + } + + out: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + /* +@@ -1542,7 +1560,7 @@ struct page *buffered_rmqueue(struct zon + struct per_cpu_pages *pcp; + struct list_head *list; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + if (list_empty(list)) { +@@ -1574,13 +1592,15 @@ struct page *buffered_rmqueue(struct zon + */ + WARN_ON_ONCE(order > 1); + } +- spin_lock_irqsave(&zone->lock, flags); ++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); + page = __rmqueue(zone, order, migratetype); +- spin_unlock(&zone->lock); +- if (!page) ++ if (!page) { ++ spin_unlock(&zone->lock); + goto failed; ++ } + __mod_zone_freepage_state(zone, -(1 << order), + get_pageblock_migratetype(page)); ++ spin_unlock(&zone->lock); + } + + /* +@@ -1592,7 +1612,7 @@ struct page *buffered_rmqueue(struct zon + + __count_zone_vm_events(PGALLOC, zone, 1 << order); + zone_statistics(preferred_zone, zone, gfp_flags); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + VM_BUG_ON_PAGE(bad_range(zone, page), page); + if (prep_new_page(page, order, gfp_flags)) +@@ -1600,7 +1620,7 @@ struct page *buffered_rmqueue(struct zon + return page; + + failed: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return NULL; + } + +@@ -5517,6 +5537,7 @@ static int page_alloc_cpu_notify(struct + void __init page_alloc_init(void) + { + hotcpu_notifier(page_alloc_cpu_notify, 0); ++ local_irq_lock_init(pa_lock); + } + + /* +@@ -6385,7 +6406,7 @@ void zone_pcp_reset(struct zone *zone) + struct per_cpu_pageset *pset; + + /* avoid races with drain_pages() */ +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); +@@ -6394,7 +6415,7 @@ void zone_pcp_reset(struct zone *zone) + free_percpu(zone->pageset); + zone->pageset = &boot_pageset; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch b/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch new file mode 100644 index 000000000..96cb56f5d --- /dev/null +++ b/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch @@ -0,0 +1,119 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:37 -0500 +Subject: mm: Prepare decoupling the page fault disabling logic +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Add a pagefault_disabled variable to task_struct to allow decoupling +the pagefault-disabled logic from the preempt count. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/linux/sched.h | 1 + + include/linux/uaccess.h | 32 +++----------------------------- + kernel/fork.c | 1 + + mm/memory.c | 30 ++++++++++++++++++++++++++++++ + 4 files changed, 35 insertions(+), 29 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1388,6 +1388,7 @@ struct task_struct { + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; + #endif ++ int pagefault_disabled; + #ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h +@@ -6,36 +6,10 @@ + + /* + * These routines enable/disable the pagefault handler in that +- * it will not take any locks and go straight to the fixup table. +- * +- * They have great resemblance to the preempt_disable/enable calls +- * and in fact they are identical; this is because currently there is +- * no other way to make the pagefault handlers do this. So we do +- * disable preemption but we don't necessarily care about that. ++ * it will not take any MM locks and go straight to the fixup table. + */ +-static inline void pagefault_disable(void) +-{ +- preempt_count_inc(); +- /* +- * make sure to have issued the store before a pagefault +- * can hit. +- */ +- barrier(); +-} +- +-static inline void pagefault_enable(void) +-{ +-#ifndef CONFIG_PREEMPT +- /* +- * make sure to issue those last loads/stores before enabling +- * the pagefault handler again. +- */ +- barrier(); +- preempt_count_dec(); +-#else +- preempt_enable(); +-#endif +-} ++extern void pagefault_disable(void); ++extern void pagefault_enable(void); + + #ifndef ARCH_HAS_NOCACHE_UACCESS + +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1298,6 +1298,7 @@ static struct task_struct *copy_process( + p->hardirq_context = 0; + p->softirq_context = 0; + #endif ++ p->pagefault_disabled = 0; + #ifdef CONFIG_LOCKDEP + p->lockdep_depth = 0; /* no locks held yet */ + p->curr_chain_key = 0; +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3690,6 +3690,36 @@ static int handle_pte_fault(struct mm_st + return 0; + } + ++void pagefault_disable(void) ++{ ++ preempt_count_inc(); ++ current->pagefault_disabled++; ++ /* ++ * make sure to have issued the store before a pagefault ++ * can hit. ++ */ ++ barrier(); ++} ++EXPORT_SYMBOL(pagefault_disable); ++ ++void pagefault_enable(void) ++{ ++#ifndef CONFIG_PREEMPT ++ /* ++ * make sure to issue those last loads/stores before enabling ++ * the pagefault handler again. ++ */ ++ barrier(); ++ current->pagefault_disabled--; ++ preempt_count_dec(); ++#else ++ barrier(); ++ current->pagefault_disabled--; ++ preempt_enable(); ++#endif ++} ++EXPORT_SYMBOL(pagefault_enable); ++ + /* + * By the time we get here, we already hold the mm semaphore + */ diff --git a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch new file mode 100644 index 000000000..8b393ad22 --- /dev/null +++ b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch @@ -0,0 +1,70 @@ +Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt() +From: Yong Zhang +Date: Tue, 15 May 2012 13:53:56 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +User preempt_*_rt instead of local_irq_*_rt or otherwise there will be +warning on ARM like below: + +WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264() +Modules linked in: +[] (unwind_backtrace+0x0/0xe4) from [] (warn_slowpath_common+0x4c/0x64) +[] (warn_slowpath_common+0x4c/0x64) from [] (warn_slowpath_null+0x18/0x1c) +[] (warn_slowpath_null+0x18/0x1c) from [](smp_call_function_many+0x98/0x264) +[] (smp_call_function_many+0x98/0x264) from [] (smp_call_function+0x44/0x6c) +[] (smp_call_function+0x44/0x6c) from [] (__new_context+0xbc/0x124) +[] (__new_context+0xbc/0x124) from [] (flush_old_exec+0x460/0x5e4) +[] (flush_old_exec+0x460/0x5e4) from [] (load_elf_binary+0x2e0/0x11ac) +[] (load_elf_binary+0x2e0/0x11ac) from [] (search_binary_handler+0x94/0x2a4) +[] (search_binary_handler+0x94/0x2a4) from [] (do_execve+0x254/0x364) +[] (do_execve+0x254/0x364) from [] (sys_execve+0x34/0x54) +[] (sys_execve+0x34/0x54) from [] (ret_fast_syscall+0x0/0x30) +---[ end trace 0000000000000002 ]--- + +The reason is that ARM need irq enabled when doing activate_mm(). +According to mm-protect-activate-switch-mm.patch, actually +preempt_[disable|enable]_rt() is sufficient. + +Inspired-by: Steven Rostedt +Signed-off-by: Yong Zhang +Cc: Steven Rostedt +Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +--- + fs/exec.c | 2 ++ + mm/mmu_context.c | 2 ++ + 2 files changed, 4 insertions(+) + +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -842,10 +842,12 @@ static int exec_mmap(struct mm_struct *m + } + } + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + tsk->mm = mm; + tsk->active_mm = mm; + activate_mm(active_mm, mm); ++ preempt_enable_rt(); + task_unlock(tsk); + if (old_mm) { + up_read(&old_mm->mmap_sem); +--- a/mm/mmu_context.c ++++ b/mm/mmu_context.c +@@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm) + struct task_struct *tsk = current; + + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + if (active_mm != mm) { + atomic_inc(&mm->mm_count); +@@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm) + } + tsk->mm = mm; + switch_mm(active_mm, mm, tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + + if (active_mm != mm) diff --git a/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch b/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch new file mode 100644 index 000000000..b84556bb4 --- /dev/null +++ b/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch @@ -0,0 +1,30 @@ +From: Thomas Gleixner +Date: Sat, 25 Jul 2009 22:06:27 +0200 +Subject: mm: Remove preempt count from pagefault disable/enable +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Now that all users are cleaned up, we can remove the preemption count. + +Signed-off-by: Thomas Gleixner +--- + mm/memory.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3693,7 +3693,6 @@ static int handle_pte_fault(struct mm_st + #ifdef CONFIG_PREEMPT_RT_FULL + void pagefault_disable(void) + { +- preempt_count_inc(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault +@@ -3711,7 +3710,6 @@ void pagefault_enable(void) + */ + barrier(); + current->pagefault_disabled--; +- preempt_enable(); + } + EXPORT_SYMBOL(pagefault_enable); + #endif diff --git a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch new file mode 100644 index 000000000..65b39ec11 --- /dev/null +++ b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch @@ -0,0 +1,275 @@ +Subject: mm, rt: kmap_atomic scheduling +From: Peter Zijlstra +Date: Thu, 28 Jul 2011 10:43:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +In fact, with migrate_disable() existing one could play games with +kmap_atomic. You could save/restore the kmap_atomic slots on context +switch (if there are any in use of course), this should be esp easy now +that we have a kmap_atomic stack. + +Something like the below.. it wants replacing all the preempt_disable() +stuff with pagefault_disable() && migrate_disable() of course, but then +you can flip kmaps around like below. + +Signed-off-by: Peter Zijlstra +[dvhart@linux.intel.com: build fix] +Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins + +[tglx@linutronix.de: Get rid of the per cpu variable and store the idx + and the pte content right away in the task struct. + Shortens the context switch code. ] +--- + arch/x86/kernel/process_32.c | 32 ++++++++++++++++++++++++++++++++ + arch/x86/mm/highmem_32.c | 9 ++++++++- + arch/x86/mm/iomap_32.c | 9 ++++++++- + include/linux/highmem.h | 27 +++++++++++++++++++++++---- + include/linux/sched.h | 7 +++++++ + mm/highmem.c | 6 ++++-- + mm/memory.c | 2 ++ + 7 files changed, 84 insertions(+), 8 deletions(-) + +--- a/arch/x86/kernel/process_32.c ++++ b/arch/x86/kernel/process_32.c +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -218,6 +219,35 @@ start_thread(struct pt_regs *regs, unsig + } + EXPORT_SYMBOL_GPL(start_thread); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ pte_t *ptep = kmap_pte - idx; ++ ++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); ++ } ++} ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + + /* + * switch_to(x,y) should switch tasks from x to y. +@@ -305,6 +335,8 @@ EXPORT_SYMBOL_GPL(start_thread); + task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) + __switch_to_xtra(prev_p, next_p, tss); + ++ switch_kmaps(prev_p, next_p); ++ + /* + * Leave lazy mode, flushing any hypercalls made here. + * This must be done before restoring TLS segments so +--- a/arch/x86/mm/highmem_32.c ++++ b/arch/x86/mm/highmem_32.c +@@ -32,6 +32,7 @@ EXPORT_SYMBOL(kunmap); + */ + void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { ++ pte_t pte = mk_pte(page, prot); + unsigned long vaddr; + int idx, type; + +@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); +- set_pte(kmap_pte-idx, mk_pte(page, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte-idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr) + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + arch_flush_lazy_mmu_mode(); +--- a/arch/x86/mm/iomap_32.c ++++ b/arch/x86/mm/iomap_32.c +@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free); + + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + { ++ pte_t pte = pfn_pte(pfn, prot); + unsigned long vaddr; + int idx, type; + +@@ -64,7 +65,10 @@ void *kmap_atomic_prot_pfn(unsigned long + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte - idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -110,6 +114,9 @@ iounmap_atomic(void __iomem *kvaddr) + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + } +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -85,32 +85,51 @@ static inline void __kunmap_atomic(void + + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + ++#ifndef CONFIG_PREEMPT_RT_FULL + DECLARE_PER_CPU(int, __kmap_atomic_idx); ++#endif + + static inline int kmap_atomic_idx_push(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +-#ifdef CONFIG_DEBUG_HIGHMEM ++# ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx > KM_TYPE_NR); +-#endif ++# endif + return idx; ++#else ++ current->kmap_idx++; ++ BUG_ON(current->kmap_idx > KM_TYPE_NR); ++ return current->kmap_idx - 1; ++#endif + } + + static inline int kmap_atomic_idx(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + return __this_cpu_read(__kmap_atomic_idx) - 1; ++#else ++ return current->kmap_idx - 1; ++#endif + } + + static inline void kmap_atomic_idx_pop(void) + { +-#ifdef CONFIG_DEBUG_HIGHMEM ++#ifndef CONFIG_PREEMPT_RT_FULL ++# ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +-#else ++# else + __this_cpu_dec(__kmap_atomic_idx); ++# endif ++#else ++ current->kmap_idx--; ++# ifdef CONFIG_DEBUG_HIGHMEM ++ BUG_ON(current->kmap_idx < 0); ++# endif + #endif + } + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -24,6 +24,7 @@ struct sched_param { + #include + #include + #include ++#include + + #include + #include +@@ -1611,6 +1612,12 @@ struct task_struct { + struct rcu_head put_rcu; + int softirq_nestcnt; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 ++ int kmap_idx; ++ pte_t kmap_pte[KM_TYPE_NR]; ++# endif ++#endif + }; + + #define TNF_MIGRATED 0x01 +--- a/mm/highmem.c ++++ b/mm/highmem.c +@@ -29,10 +29,11 @@ + #include + #include + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + DEFINE_PER_CPU(int, __kmap_atomic_idx); + #endif ++#endif + + /* + * Virtual_count is not a pure "count". +@@ -47,8 +48,9 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx); + unsigned long totalhigh_pages __read_mostly; + EXPORT_SYMBOL(totalhigh_pages); + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); ++#endif + + unsigned int nr_free_highpages (void) + { +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3693,6 +3693,7 @@ static int handle_pte_fault(struct mm_st + #ifdef CONFIG_PREEMPT_RT_FULL + void pagefault_disable(void) + { ++ migrate_disable(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault +@@ -3710,6 +3711,7 @@ void pagefault_enable(void) + */ + barrier(); + current->pagefault_disabled--; ++ migrate_enable(); + } + EXPORT_SYMBOL(pagefault_enable); + #endif diff --git a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch new file mode 100644 index 000000000..3bfa99f62 --- /dev/null +++ b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch @@ -0,0 +1,39 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:44:34 -0500 +Subject: mm: scatterlist dont disable irqs on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + lib/scatterlist.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/lib/scatterlist.c ++++ b/lib/scatterlist.c +@@ -583,7 +583,7 @@ void sg_miter_stop(struct sg_mapping_ite + flush_kernel_dcache_page(miter->page); + + if (miter->__flags & SG_MITER_ATOMIC) { +- WARN_ON_ONCE(preemptible()); ++ WARN_ON_ONCE(!pagefault_disabled()); + kunmap_atomic(miter->addr); + } else + kunmap(miter->page); +@@ -628,7 +628,7 @@ static size_t sg_copy_buffer(struct scat + if (!sg_miter_skip(&miter, skip)) + return false; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; +@@ -645,7 +645,7 @@ static size_t sg_copy_buffer(struct scat + + sg_miter_stop(&miter); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return offset; + } + diff --git a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch new file mode 100644 index 000000000..64cf6f9f3 --- /dev/null +++ b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch @@ -0,0 +1,63 @@ +Subject: mm-vmalloc.patch +From: Thomas Gleixner +Date: Tue, 12 Jul 2011 11:39:36 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + mm/vmalloc.c | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) + +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -796,7 +796,7 @@ static struct vmap_block *new_vmap_block + struct vmap_block *vb; + struct vmap_area *va; + unsigned long vb_idx; +- int node, err; ++ int node, err, cpu; + + node = numa_node_id(); + +@@ -834,11 +834,12 @@ static struct vmap_block *new_vmap_block + BUG_ON(err); + radix_tree_preload_end(); + +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = &__get_cpu_var(vmap_block_queue); + spin_lock(&vbq->lock); + list_add_rcu(&vb->free_list, &vbq->free); + spin_unlock(&vbq->lock); +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + + return vb; + } +@@ -906,6 +907,7 @@ static void *vb_alloc(unsigned long size + struct vmap_block *vb; + unsigned long addr = 0; + unsigned int order; ++ int cpu = 0; + + BUG_ON(size & ~PAGE_MASK); + BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); +@@ -921,7 +923,8 @@ static void *vb_alloc(unsigned long size + + again: + rcu_read_lock(); +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = &__get_cpu_var(vmap_block_queue); + list_for_each_entry_rcu(vb, &vbq->free, free_list) { + int i; + +@@ -945,7 +948,7 @@ static void *vb_alloc(unsigned long size + spin_unlock(&vb->lock); + } + +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + rcu_read_unlock(); + + if (!addr) { diff --git a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch new file mode 100644 index 000000000..29f191094 --- /dev/null +++ b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch @@ -0,0 +1,40 @@ +Subject: mmci: Remove bogus local_irq_save() +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 12:11:12 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +On !RT interrupt runs with interrupts disabled. On RT it's in a +thread, so no need to disable interrupts at all. + +Signed-off-by: Thomas Gleixner +--- + drivers/mmc/host/mmci.c | 5 ----- + 1 file changed, 5 deletions(-) + +--- a/drivers/mmc/host/mmci.c ++++ b/drivers/mmc/host/mmci.c +@@ -1044,15 +1044,12 @@ static irqreturn_t mmci_pio_irq(int irq, + struct sg_mapping_iter *sg_miter = &host->sg_miter; + struct variant_data *variant = host->variant; + void __iomem *base = host->base; +- unsigned long flags; + u32 status; + + status = readl(base + MMCISTATUS); + + dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); + +- local_irq_save(flags); +- + do { + unsigned int remain, len; + char *buffer; +@@ -1092,8 +1089,6 @@ static irqreturn_t mmci_pio_irq(int irq, + + sg_miter_stop(sg_miter); + +- local_irq_restore(flags); +- + /* + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. diff --git a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch new file mode 100644 index 000000000..2e1654e14 --- /dev/null +++ b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch @@ -0,0 +1,95 @@ +Date: Wed, 26 Jun 2013 15:28:11 -0400 +From: Steven Rostedt +To: linux-kernel@vger.kernel.org, + linux-rt-users +Cc: Thomas Gleixner , + Carsten Emde , + Sebastian Andrzej Siewior , + Clark Williams , + "Paul E. McKenney" +Subject: [RFC][PATCH RT 5/6] rt,ntp: Move call to schedule_delayed_work() to helper thread +References: <20130626192806.107564905@goodmis.org> +Content-Disposition: inline; filename=ntp-sched-delay-thread.patch +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The ntp code for notify_cmos_timer() is called from a hard interrupt +context. schedule_delayed_work() under PREEMPT_RT_FULL calls spinlocks +that have been converted to mutexes, thus calling schedule_delayed_work() +from interrupt is not safe. + +Add a helper thread that does the call to schedule_delayed_work and wake +up that thread instead of calling schedule_delayed_work() directly. +This is only for CONFIG_PREEMPT_RT_FULL, otherwise the code still calls +schedule_delayed_work() directly in irq context. + +Note: There's a few places in the kernel that do this. Perhaps the RT +code should have a dedicated thread that does the checks. Just register +a notifier on boot up for your check and wake up the thread when +needed. This will be a todo. + +Signed-off-by: Steven Rostedt + +--- + kernel/time/ntp.c | 40 ++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 40 insertions(+) + +--- a/kernel/time/ntp.c ++++ b/kernel/time/ntp.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -517,10 +518,49 @@ static void sync_cmos_clock(struct work_ + schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT can not call schedule_delayed_work from real interrupt context. ++ * Need to make a thread to do the real work. ++ */ ++static struct task_struct *cmos_delay_thread; ++static bool do_cmos_delay; ++ ++static int run_cmos_delay(void *ignore) ++{ ++ while (!kthread_should_stop()) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (do_cmos_delay) { ++ do_cmos_delay = false; ++ schedule_delayed_work(&sync_cmos_work, 0); ++ } ++ schedule(); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++void ntp_notify_cmos_timer(void) ++{ ++ do_cmos_delay = true; ++ /* Make visible before waking up process */ ++ smp_wmb(); ++ wake_up_process(cmos_delay_thread); ++} ++ ++static __init int create_cmos_delay_thread(void) ++{ ++ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd"); ++ BUG_ON(!cmos_delay_thread); ++ return 0; ++} ++early_initcall(create_cmos_delay_thread); ++#else + void ntp_notify_cmos_timer(void) + { + schedule_delayed_work(&sync_cmos_work, 0); + } ++#endif /* CONFIG_PREEMPT_RT_FULL */ + + #else + void ntp_notify_cmos_timer(void) { } diff --git a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch new file mode 100644 index 000000000..c4bc6ad0d --- /dev/null +++ b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch @@ -0,0 +1,18 @@ +Subject: mutex-no-spin-on-rt.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:51:45 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/Kconfig.locks | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/Kconfig.locks ++++ b/kernel/Kconfig.locks +@@ -222,4 +222,4 @@ endif + + config MUTEX_SPIN_ON_OWNER + def_bool y +- depends on SMP && !DEBUG_MUTEXES ++ depends on SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL diff --git a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch new file mode 100644 index 000000000..f6d96f42c --- /dev/null +++ b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch @@ -0,0 +1,48 @@ +Subject: net: Another local_irq_disable/kmalloc headache +From: Thomas Gleixner +Date: Wed, 26 Sep 2012 16:21:08 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Replace it by a local lock. Though that's pretty inefficient :( + +Signed-off-by: Thomas Gleixner +--- + net/core/skbuff.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -62,6 +62,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -335,6 +336,7 @@ struct netdev_alloc_cache { + unsigned int pagecnt_bias; + }; + static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); ++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); + + static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +@@ -343,7 +345,7 @@ static void *__netdev_alloc_frag(unsigne + int order; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + nc = &__get_cpu_var(netdev_alloc_cache); + if (unlikely(!nc->frag.page)) { + refill: +@@ -377,7 +379,7 @@ static void *__netdev_alloc_frag(unsigne + nc->frag.offset += fragsz; + nc->pagecnt_bias--; + end: +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + return data; + } + diff --git a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch new file mode 100644 index 000000000..b28e7d4aa --- /dev/null +++ b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch @@ -0,0 +1,74 @@ +Subject: net: netfilter: Serialize xt_write_recseq sections on RT +From: Thomas Gleixner +Date: Sun, 28 Oct 2012 11:18:08 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The netfilter code relies only on the implicit semantics of +local_bh_disable() for serializing wt_write_recseq sections. RT breaks +that and needs explicit serialization here. + +Reported-by: Peter LaDow +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + include/linux/netfilter/x_tables.h | 7 +++++++ + net/netfilter/core.c | 6 ++++++ + 2 files changed, 13 insertions(+) + +--- a/include/linux/netfilter/x_tables.h ++++ b/include/linux/netfilter/x_tables.h +@@ -3,6 +3,7 @@ + + + #include ++#include + #include + + /** +@@ -282,6 +283,8 @@ void xt_free_table_info(struct xt_table_ + */ + DECLARE_PER_CPU(seqcount_t, xt_recseq); + ++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); ++ + /** + * xt_write_recseq_begin - start of a write section + * +@@ -296,6 +299,9 @@ static inline unsigned int xt_write_recs + { + unsigned int addend; + ++ /* RT protection */ ++ local_lock(xt_write_lock); ++ + /* + * Low order bit of sequence is set if we already + * called xt_write_recseq_begin(). +@@ -326,6 +332,7 @@ static inline void xt_write_recseq_end(u + /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ + smp_wmb(); + __this_cpu_add(xt_recseq.sequence, addend); ++ local_unlock(xt_write_lock); + } + + /* +--- a/net/netfilter/core.c ++++ b/net/netfilter/core.c +@@ -21,11 +21,17 @@ + #include + #include + #include ++#include + #include + #include + + #include "nf_internals.h" + ++#ifdef CONFIG_PREEMPT_RT_BASE ++DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); ++EXPORT_PER_CPU_SYMBOL(xt_write_lock); ++#endif ++ + static DEFINE_MUTEX(afinfo_mutex); + + const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; diff --git a/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch b/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch new file mode 100644 index 000000000..c66e5a3ae --- /dev/null +++ b/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch @@ -0,0 +1,112 @@ +Subject: net-flip-lock-dep-thingy.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 10:59:58 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +======================================================= +[ INFO: possible circular locking dependency detected ] +3.0.0-rc3+ #26 +------------------------------------------------------- +ip/1104 is trying to acquire lock: + (local_softirq_lock){+.+...}, at: [] __local_lock+0x25/0x68 + +but task is already holding lock: + (sk_lock-AF_INET){+.+...}, at: [] lock_sock+0x10/0x12 + +which lock already depends on the new lock. + + +the existing dependency chain (in reverse order) is: + +-> #1 (sk_lock-AF_INET){+.+...}: + [] lock_acquire+0x103/0x12e + [] lock_sock_nested+0x82/0x92 + [] lock_sock+0x10/0x12 + [] tcp_close+0x1b/0x355 + [] inet_release+0xc3/0xcd + [] sock_release+0x1f/0x74 + [] sock_close+0x27/0x2b + [] fput+0x11d/0x1e3 + [] filp_close+0x70/0x7b + [] sys_close+0xf8/0x13d + [] system_call_fastpath+0x16/0x1b + +-> #0 (local_softirq_lock){+.+...}: + [] __lock_acquire+0xacc/0xdc8 + [] lock_acquire+0x103/0x12e + [] _raw_spin_lock+0x3b/0x4a + [] __local_lock+0x25/0x68 + [] local_bh_disable+0x36/0x3b + [] _raw_write_lock_bh+0x16/0x4f + [] tcp_close+0x159/0x355 + [] inet_release+0xc3/0xcd + [] sock_release+0x1f/0x74 + [] sock_close+0x27/0x2b + [] fput+0x11d/0x1e3 + [] filp_close+0x70/0x7b + [] sys_close+0xf8/0x13d + [] system_call_fastpath+0x16/0x1b + +other info that might help us debug this: + + Possible unsafe locking scenario: + + CPU0 CPU1 + ---- ---- + lock(sk_lock-AF_INET); + lock(local_softirq_lock); + lock(sk_lock-AF_INET); + lock(local_softirq_lock); + + *** DEADLOCK *** + +1 lock held by ip/1104: + #0: (sk_lock-AF_INET){+.+...}, at: [] lock_sock+0x10/0x12 + +stack backtrace: +Pid: 1104, comm: ip Not tainted 3.0.0-rc3+ #26 +Call Trace: + [] print_circular_bug+0x1f8/0x209 + [] __lock_acquire+0xacc/0xdc8 + [] ? __local_lock+0x25/0x68 + [] lock_acquire+0x103/0x12e + [] ? __local_lock+0x25/0x68 + [] ? get_parent_ip+0x11/0x41 + [] _raw_spin_lock+0x3b/0x4a + [] ? __local_lock+0x25/0x68 + [] ? get_parent_ip+0x28/0x41 + [] __local_lock+0x25/0x68 + [] local_bh_disable+0x36/0x3b + [] ? lock_sock+0x10/0x12 + [] _raw_write_lock_bh+0x16/0x4f + [] tcp_close+0x159/0x355 + [] inet_release+0xc3/0xcd + [] sock_release+0x1f/0x74 + [] sock_close+0x27/0x2b + [] fput+0x11d/0x1e3 + [] filp_close+0x70/0x7b + [] sys_close+0xf8/0x13d + [] system_call_fastpath+0x16/0x1b + + +Signed-off-by: Thomas Gleixner +--- + net/core/sock.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2337,12 +2337,11 @@ void lock_sock_nested(struct sock *sk, i + if (sk->sk_lock.owned) + __lock_sock(sk); + sk->sk_lock.owned = 1; +- spin_unlock(&sk->sk_lock.slock); ++ spin_unlock_bh(&sk->sk_lock.slock); + /* + * The sk_lock has mutex_lock() semantics here: + */ + mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); +- local_bh_enable(); + } + EXPORT_SYMBOL(lock_sock_nested); + diff --git a/debian/patches/features/all/rt/net-gianfar-do-not-disable-interrupts.patch b/debian/patches/features/all/rt/net-gianfar-do-not-disable-interrupts.patch new file mode 100644 index 000000000..9cbdf5ad2 --- /dev/null +++ b/debian/patches/features/all/rt/net-gianfar-do-not-disable-interrupts.patch @@ -0,0 +1,246 @@ +From 271b3e203a92b4ba55b4a6ff3281a59e99ad486c Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Tue, 25 Mar 2014 18:34:20 +0100 +Subject: [PATCH 1/2] net: gianfar: do not disable interrupts +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +each per-queue lock is taken with spin_lock_irqsave() except in the case +where all of them are taken for some kind of serialisation. As an +optimisation local_irq_save() is used so that lock_tx_qs() and +lock_rx_qs() can use just the spin_lock() variant instead. +On RT local_irq_save() behaves differently so we use the nort() +variant. +Lockdep screems easily by "ethtool -K eth0 rx off tx off" + +What remains is missing lockdep annotation that makes lockdep think +lock_tx_qs() may cause a dead lock. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/net/ethernet/freescale/gianfar.c | 16 +++++++-------- + drivers/net/ethernet/freescale/gianfar_ethtool.c | 8 +++---- + drivers/net/ethernet/freescale/gianfar_sysfs.c | 24 +++++++++++------------ + 3 files changed, 24 insertions(+), 24 deletions(-) + +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -1316,7 +1316,7 @@ static int gfar_suspend(struct device *d + + if (netif_running(ndev)) { + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + +@@ -1334,7 +1334,7 @@ static int gfar_suspend(struct device *d + + unlock_rx_qs(priv); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + disable_napi(priv); + +@@ -1376,7 +1376,7 @@ static int gfar_resume(struct device *de + /* Disable Magic Packet mode, in case something + * else woke us up. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + +@@ -1388,7 +1388,7 @@ static int gfar_resume(struct device *de + + unlock_rx_qs(priv); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + netif_device_attach(ndev); + +@@ -2402,7 +2402,7 @@ void gfar_vlan_mode(struct net_device *d + u32 tempval; + + regs = priv->gfargrp[0].regs; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_rx_qs(priv); + + if (features & NETIF_F_HW_VLAN_CTAG_TX) { +@@ -2435,7 +2435,7 @@ void gfar_vlan_mode(struct net_device *d + gfar_change_mtu(dev, dev->mtu); + + unlock_rx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + static int gfar_change_mtu(struct net_device *dev, int new_mtu) +@@ -3385,14 +3385,14 @@ static irqreturn_t gfar_error(int irq, v + dev->stats.tx_dropped++; + atomic64_inc(&priv->extra_stats.tx_underrun); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + /* Reactivate the Tx Queues */ + gfar_write(®s->tstat, gfargrp->tstat); + + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + netif_dbg(priv, tx_err, dev, "Transmit Error\n"); + } +--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c ++++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c +@@ -500,7 +500,7 @@ static int gfar_sringparam(struct net_de + /* Halt TX and RX, and process the frames which + * have already been received + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + +@@ -508,7 +508,7 @@ static int gfar_sringparam(struct net_de + + unlock_rx_qs(priv); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + for (i = 0; i < priv->num_rx_queues; i++) + gfar_clean_rx_ring(priv->rx_queue[i], +@@ -623,7 +623,7 @@ int gfar_set_features(struct net_device + /* Halt TX and RX, and process the frames which + * have already been received + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + +@@ -631,7 +631,7 @@ int gfar_set_features(struct net_device + + unlock_tx_qs(priv); + unlock_rx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + for (i = 0; i < priv->num_rx_queues; i++) + gfar_clean_rx_ring(priv->rx_queue[i], +--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c ++++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c +@@ -67,7 +67,7 @@ static ssize_t gfar_set_bd_stash(struct + return count; + + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_rx_qs(priv); + + /* Set the new stashing value */ +@@ -83,7 +83,7 @@ static ssize_t gfar_set_bd_stash(struct + gfar_write(®s->attr, temp); + + unlock_rx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return count; + } +@@ -111,7 +111,7 @@ static ssize_t gfar_set_rx_stash_size(st + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) + return count; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_rx_qs(priv); + + if (length > priv->rx_buffer_size) +@@ -139,7 +139,7 @@ static ssize_t gfar_set_rx_stash_size(st + + out: + unlock_rx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return count; + } +@@ -170,7 +170,7 @@ static ssize_t gfar_set_rx_stash_index(s + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) + return count; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_rx_qs(priv); + + if (index > priv->rx_stash_size) +@@ -188,7 +188,7 @@ static ssize_t gfar_set_rx_stash_index(s + + out: + unlock_rx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return count; + } +@@ -218,7 +218,7 @@ static ssize_t gfar_set_fifo_threshold(s + if (length > GFAR_MAX_FIFO_THRESHOLD) + return count; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + priv->fifo_threshold = length; +@@ -229,7 +229,7 @@ static ssize_t gfar_set_fifo_threshold(s + gfar_write(®s->fifo_tx_thr, temp); + + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return count; + } +@@ -258,7 +258,7 @@ static ssize_t gfar_set_fifo_starve(stru + if (num > GFAR_MAX_FIFO_STARVE) + return count; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + priv->fifo_starve = num; +@@ -269,7 +269,7 @@ static ssize_t gfar_set_fifo_starve(stru + gfar_write(®s->fifo_tx_starve, temp); + + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return count; + } +@@ -299,7 +299,7 @@ static ssize_t gfar_set_fifo_starve_off( + if (num > GFAR_MAX_FIFO_STARVE_OFF) + return count; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + priv->fifo_starve_off = num; +@@ -310,7 +310,7 @@ static ssize_t gfar_set_fifo_starve_off( + gfar_write(®s->fifo_tx_starve_shutoff, temp); + + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return count; + } diff --git a/debian/patches/features/all/rt/net-gianfar-do-not-try-to-cleanup-TX-packets-if-they.patch b/debian/patches/features/all/rt/net-gianfar-do-not-try-to-cleanup-TX-packets-if-they.patch new file mode 100644 index 000000000..0aeb6e624 --- /dev/null +++ b/debian/patches/features/all/rt/net-gianfar-do-not-try-to-cleanup-TX-packets-if-they.patch @@ -0,0 +1,68 @@ +From 70eadd6aab86fcd9c68be4a213e9aa53b196eeab Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 27 Mar 2014 14:09:02 +0100 +Subject: [PATCH 2/2] net: gianfar: do not try to cleanup TX packets if they + are not done +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +What I observe is that the TX queue is not empty and does not make any +progress. gfar_clean_tx_ring() does not clean up the packet because it +is not completed yet. +The root cause is that the DMA engine did not start yet (it was +preempted before doing so) and that dumb loop, loops until that packet +is gone. +This is broken since c233cf4 ("gianfar: Fix tx napi polling"). + +What remains are spurious interrupts if CPU0 cleans up TX packages and +CPU1 returns with IRQ_NONE. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/net/ethernet/freescale/gianfar.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -135,7 +135,6 @@ static int gfar_poll_sq(struct napi_stru + static void gfar_netpoll(struct net_device *dev); + #endif + int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); +-static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); + static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull, struct napi_struct *napi); + void gfar_halt(struct net_device *dev); +@@ -2531,7 +2530,7 @@ static void gfar_align_skb(struct sk_buf + } + + /* Interrupt Handler for Transmit complete */ +-static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) ++static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) + { + struct net_device *dev = tx_queue->dev; + struct netdev_queue *txq; +@@ -2631,6 +2630,7 @@ static void gfar_clean_tx_ring(struct gf + tx_queue->dirty_tx = bdp; + + netdev_tx_completed_queue(txq, howmany, bytes_sent); ++ return howmany; + } + + static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) +@@ -2952,10 +2952,14 @@ static int gfar_poll(struct napi_struct + tx_queue = priv->tx_queue[i]; + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { +- gfar_clean_tx_ring(tx_queue); +- has_tx_work = 1; ++ int ret; ++ ++ ret = gfar_clean_tx_ring(tx_queue); ++ if (ret) ++ has_tx_work++; + } + } ++ work_done += has_tx_work; + + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + /* skip queue if not active */ diff --git a/debian/patches/features/all/rt/net-ip_send_unicast_reply-add-missing-local-serializ.patch b/debian/patches/features/all/rt/net-ip_send_unicast_reply-add-missing-local-serializ.patch new file mode 100644 index 000000000..7da2b2ef7 --- /dev/null +++ b/debian/patches/features/all/rt/net-ip_send_unicast_reply-add-missing-local-serializ.patch @@ -0,0 +1,102 @@ +From e047fce21592ed3959e21ed803a7577d3c20e394 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Sun, 29 Dec 2013 18:11:54 +0100 +Subject: [PATCH] net: ip_send_unicast_reply: add missing local serialization +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +in response to the oops in ip_output.c:ip_send_unicast_reply under high +network load with CONFIG_PREEMPT_RT_FULL=y, reported by Sami Pietikainen +, this patch adds local serialization in +ip_send_unicast_reply. + +from ip_output.c: +/* + * Generic function to send a packet as reply to another packet. + * Used to send some TCP resets/acks so far. + * + * Use a fake percpu inet socket to avoid false sharing and contention. + */ +static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { +... + +which was added in commit be9f4a44 in linux-stable. The git log, wich +introduced the PER_CPU unicast_sock, states: + +commit be9f4a44e7d41cee50ddb5f038fc2391cbbb4046 +Author: Eric Dumazet +Date: Thu Jul 19 07:34:03 2012 +0000 + + ipv4: tcp: remove per net tcp_sock + + tcp_v4_send_reset() and tcp_v4_send_ack() use a single socket + per network namespace. + + This leads to bad behavior on multiqueue NICS, because many cpus + contend for the socket lock and once socket lock is acquired, extra + false sharing on various socket fields slow down the operations. + + To better resist to attacks, we use a percpu socket. Each cpu can + run without contention, using appropriate memory (local node) + + +The per-cpu here thus is assuming exclusivity serializing per cpu - so +the use of get_cpu_ligh introduced in +net-use-cpu-light-in-ip-send-unicast-reply.patch, which droped the +preempt_disable in favor of a migrate_disable is probably wrong as this +only handles the referencial consistency but not the serialization. To +evade a preempt_disable here a local lock would be needed. + +Therapie: + * add local lock: + * and re-introduce local serialization: + +Tested on x86 with high network load using the testcase from Sami Pietikainen + while : ; do wget -O - ftp://LOCAL_SERVER/empty_file > /dev/null 2>&1; done + +Link: http://www.spinics.net/lists/linux-rt-users/msg11007.html +Cc: stable-rt@vger.kernel.org +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + net/ipv4/ip_output.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -79,6 +79,7 @@ + #include + #include + #include ++#include + + int sysctl_ip_default_ttl __read_mostly = IPDEFTTL; + EXPORT_SYMBOL(sysctl_ip_default_ttl); +@@ -1477,6 +1478,9 @@ static DEFINE_PER_CPU(struct inet_sock, + .uc_ttl = -1, + }; + ++/* serialize concurrent calls on the same CPU to ip_send_unicast_reply */ ++static DEFINE_LOCAL_IRQ_LOCK(unicast_lock); ++ + void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, + __be32 saddr, const struct ip_reply_arg *arg, + unsigned int len) +@@ -1516,8 +1520,7 @@ void ip_send_unicast_reply(struct net *n + if (IS_ERR(rt)) + return; + +- get_cpu_light(); +- inet = &__get_cpu_var(unicast_sock); ++ inet = &get_locked_var(unicast_lock, unicast_sock); + + inet->tos = arg->tos; + sk = &inet->sk; +@@ -1541,7 +1544,7 @@ void ip_send_unicast_reply(struct net *n + ip_push_pending_frames(sk, &fl4); + } + +- put_cpu_light(); ++ put_locked_var(unicast_lock, unicast_sock); + + ip_rt_put(rt); + } diff --git a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch new file mode 100644 index 000000000..0e35ff8ec --- /dev/null +++ b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch @@ -0,0 +1,105 @@ +From: Sebastian Andrzej Siewior +Date: Wed, 20 Mar 2013 18:06:20 +0100 +Subject: [PATCH] net: Add a mutex around devnet_rename_seq +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +On RT write_seqcount_begin() disables preemption and device_rename() +allocates memory with GFP_KERNEL and grabs later the sysfs_mutex +mutex. Serialize with a mutex and add use the non preemption disabling +__write_seqcount_begin(). + +To avoid writer starvation, let the reader grab the mutex and release +it when it detects a writer in progress. This keeps the normal case +(no reader on the fly) fast. + +[ tglx: Instead of replacing the seqcount by a mutex, add the mutex ] + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Thomas Gleixner +--- + net/core/dev.c | 34 ++++++++++++++++++++-------------- + 1 file changed, 20 insertions(+), 14 deletions(-) + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -178,6 +178,7 @@ static unsigned int napi_gen_id; + static DEFINE_HASHTABLE(napi_hash, 8); + + static seqcount_t devnet_rename_seq; ++static DEFINE_MUTEX(devnet_rename_mutex); + + static inline void dev_base_seq_inc(struct net *net) + { +@@ -828,7 +829,8 @@ int netdev_get_name(struct net *net, cha + strcpy(name, dev->name); + rcu_read_unlock(); + if (read_seqcount_retry(&devnet_rename_seq, seq)) { +- cond_resched(); ++ mutex_lock(&devnet_rename_mutex); ++ mutex_unlock(&devnet_rename_mutex); + goto retry; + } + +@@ -1094,30 +1096,28 @@ int dev_change_name(struct net_device *d + if (dev->flags & IFF_UP) + return -EBUSY; + +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); ++ __raw_write_seqcount_begin(&devnet_rename_seq); + +- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { +- write_seqcount_end(&devnet_rename_seq); +- return 0; +- } ++ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) ++ goto outunlock; + + memcpy(oldname, dev->name, IFNAMSIZ); + + err = dev_get_valid_name(net, dev, newname); +- if (err < 0) { +- write_seqcount_end(&devnet_rename_seq); +- return err; +- } ++ if (err < 0) ++ goto outunlock; + + rollback: + ret = device_rename(&dev->dev, dev->name); + if (ret) { + memcpy(dev->name, oldname, IFNAMSIZ); +- write_seqcount_end(&devnet_rename_seq); +- return ret; ++ err = ret; ++ goto outunlock; + } + +- write_seqcount_end(&devnet_rename_seq); ++ __raw_write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); + + netdev_adjacent_rename_links(dev, oldname); + +@@ -1138,7 +1138,8 @@ int dev_change_name(struct net_device *d + /* err >= 0 after dev_alloc_name() or stores the first errno */ + if (err >= 0) { + err = ret; +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); ++ __raw_write_seqcount_begin(&devnet_rename_seq); + memcpy(dev->name, oldname, IFNAMSIZ); + memcpy(oldname, newname, IFNAMSIZ); + goto rollback; +@@ -1149,6 +1150,11 @@ int dev_change_name(struct net_device *d + } + + return err; ++ ++outunlock: ++ __raw_write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); ++ return err; + } + + /** diff --git a/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch b/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch new file mode 100644 index 000000000..bdc6f5824 --- /dev/null +++ b/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch @@ -0,0 +1,32 @@ +Subject: net: Use local_bh_disable in netif_rx_ni() +From: Thomas Gleixner +Date: Sun, 28 Oct 2012 15:12:49 +0000 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +This code triggers the new WARN in __raise_softirq_irqsoff() though it +actually looks at the softirq pending bit and calls into the softirq +code, but that fits not well with the context related softirq model of +RT. It's correct on mainline though, but going through +local_bh_disable/enable here is not going to hurt badly. + +Signed-off-by: Thomas Gleixner +--- + net/core/dev.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3298,11 +3298,9 @@ int netif_rx_ni(struct sk_buff *skb) + + trace_netif_rx_ni_entry(skb); + +- migrate_disable(); ++ local_bh_disable(); + err = netif_rx_internal(skb); +- if (local_softirq_pending()) +- thread_do_softirq(); +- migrate_enable(); ++ local_bh_enable(); + + return err; + } diff --git a/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch b/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch new file mode 100644 index 000000000..90d28ec82 --- /dev/null +++ b/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch @@ -0,0 +1,26 @@ +Subject: net-netif_rx_ni-migrate-disable.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 16:29:27 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + net/core/dev.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3298,11 +3298,11 @@ int netif_rx_ni(struct sk_buff *skb) + + trace_netif_rx_ni_entry(skb); + +- preempt_disable(); ++ migrate_disable(); + err = netif_rx_internal(skb); + if (local_softirq_pending()) + thread_do_softirq(); +- preempt_enable(); ++ migrate_enable(); + + return err; + } diff --git a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch new file mode 100644 index 000000000..bf88fbb4b --- /dev/null +++ b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch @@ -0,0 +1,60 @@ +From dc0674747e073c118c1e822b2af22bb4d1513211 Mon Sep 17 00:00:00 2001 +From: Marc Kleine-Budde +Date: Wed, 5 Mar 2014 00:49:47 +0100 +Subject: [PATCH] net: sched: dev_deactivate_many(): use msleep(1) instead of + yield() to wait for outstanding qdisc_run calls +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50 +(by default). If a high priority userspace process tries to shut down a busy +network interface it might spin in a yield loop waiting for the device to +become idle. With the interrupt thread having a lower priority than the +looping process it might never be scheduled and so result in a deadlock on UP +systems. + +With Magic SysRq the following backtrace can be produced: + +> test_app R running 0 174 168 0x00000000 +> [] (__schedule+0x220/0x3fc) from [] (preempt_schedule_irq+0x48/0x80) +> [] (preempt_schedule_irq+0x48/0x80) from [] (svc_preempt+0x8/0x20) +> [] (svc_preempt+0x8/0x20) from [] (local_bh_enable+0x18/0x88) +> [] (local_bh_enable+0x18/0x88) from [] (dev_deactivate_many+0x220/0x264) +> [] (dev_deactivate_many+0x220/0x264) from [] (__dev_close_many+0x64/0xd4) +> [] (__dev_close_many+0x64/0xd4) from [] (__dev_close+0x28/0x3c) +> [] (__dev_close+0x28/0x3c) from [] (__dev_change_flags+0x88/0x130) +> [] (__dev_change_flags+0x88/0x130) from [] (dev_change_flags+0x10/0x48) +> [] (dev_change_flags+0x10/0x48) from [] (do_setlink+0x370/0x7ec) +> [] (do_setlink+0x370/0x7ec) from [] (rtnl_newlink+0x2b4/0x450) +> [] (rtnl_newlink+0x2b4/0x450) from [] (rtnetlink_rcv_msg+0x158/0x1f4) +> [] (rtnetlink_rcv_msg+0x158/0x1f4) from [] (netlink_rcv_skb+0xac/0xc0) +> [] (netlink_rcv_skb+0xac/0xc0) from [] (rtnetlink_rcv+0x18/0x24) +> [] (rtnetlink_rcv+0x18/0x24) from [] (netlink_unicast+0x13c/0x198) +> [] (netlink_unicast+0x13c/0x198) from [] (netlink_sendmsg+0x264/0x2e0) +> [] (netlink_sendmsg+0x264/0x2e0) from [] (sock_sendmsg+0x78/0x98) +> [] (sock_sendmsg+0x78/0x98) from [] (___sys_sendmsg.part.25+0x268/0x278) +> [] (___sys_sendmsg.part.25+0x268/0x278) from [] (__sys_sendmsg+0x48/0x78) +> [] (__sys_sendmsg+0x48/0x78) from [] (ret_fast_syscall+0x0/0x2c) + +This patch works around the problem by replacing yield() by msleep(1), giving +the interrupt thread time to finish, similar to other changes contained in the +rt patch set. Using wait_for_completion() instead would probably be a better +solution. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Marc Kleine-Budde +Signed-off-by: Sebastian Andrzej Siewior +--- + net/sched/sch_generic.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -850,7 +850,7 @@ void dev_deactivate_many(struct list_hea + /* Wait for outstanding qdisc_run calls. */ + list_for_each_entry(dev, head, close_list) + while (some_qdisc_is_busy(dev)) +- yield(); ++ msleep(1); + } + + void dev_deactivate(struct net_device *dev) diff --git a/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch b/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch new file mode 100644 index 000000000..1b78a0dc9 --- /dev/null +++ b/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch @@ -0,0 +1,93 @@ +Subject: net: Avoid livelock in net_tx_action() on RT +From: Steven Rostedt +Date: Thu, 06 Oct 2011 10:48:39 -0400 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +qdisc_lock is taken w/o disabling interrupts or bottom halfs. So code +holding a qdisc_lock() can be interrupted and softirqs can run on the +return of interrupt in !RT. + +The spin_trylock() in net_tx_action() makes sure, that the softirq +does not deadlock. When the lock can't be acquired q is requeued and +the NET_TX softirq is raised. That causes the softirq to run over and +over. + +That works in mainline as do_softirq() has a retry loop limit and +leaves the softirq processing in the interrupt return path and +schedules ksoftirqd. The task which holds qdisc_lock cannot be +preempted, so the lock is released and either ksoftirqd or the next +softirq in the return from interrupt path can proceed. Though it's a +bit strange to actually run MAX_SOFTIRQ_RESTART (10) loops before it +decides to bail out even if it's clear in the first iteration :) + +On RT all softirq processing is done in a FIFO thread and we don't +have a loop limit, so ksoftirqd preempts the lock holder forever and +unqueues and requeues until the reset button is hit. + +Due to the forced threading of ksoftirqd on RT we actually cannot +deadlock on qdisc_lock because it's a "sleeping lock". So it's safe to +replace the spin_trylock() with a spin_lock(). When contended, +ksoftirqd is scheduled out and the lock holder can proceed. + +[ tglx: Massaged changelog and code comments ] + +Solved-by: Thomas Gleixner +Signed-off-by: Steven Rostedt +Tested-by: Carsten Emde +Cc: Clark Williams +Cc: John Kacur +Cc: Luis Claudio R. Goncalves +Signed-off-by: Thomas Gleixner + +--- + net/core/dev.c | 32 +++++++++++++++++++++++++++++++- + 1 file changed, 31 insertions(+), 1 deletion(-) + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3308,6 +3308,36 @@ int netif_rx_ni(struct sk_buff *skb) + } + EXPORT_SYMBOL(netif_rx_ni); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT runs ksoftirqd as a real time thread and the root_lock is a ++ * "sleeping spinlock". If the trylock fails then we can go into an ++ * infinite loop when ksoftirqd preempted the task which actually ++ * holds the lock, because we requeue q and raise NET_TX softirq ++ * causing ksoftirqd to loop forever. ++ * ++ * It's safe to use spin_lock on RT here as softirqs run in thread ++ * context and cannot deadlock against the thread which is holding ++ * root_lock. ++ * ++ * On !RT the trylock might fail, but there we bail out from the ++ * softirq loop after 10 attempts which we can't do on RT. And the ++ * task holding root_lock cannot be preempted, so the only downside of ++ * that trylock is that we need 10 loops to decide that we should have ++ * given up in the first one :) ++ */ ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ return 1; ++} ++#else ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ return spin_trylock(lock); ++} ++#endif ++ + static void net_tx_action(struct softirq_action *h) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); +@@ -3349,7 +3379,7 @@ static void net_tx_action(struct softirq + head = head->next_sched; + + root_lock = qdisc_lock(q); +- if (spin_trylock(root_lock)) { ++ if (take_root_lock(root_lock)) { + smp_mb__before_clear_bit(); + clear_bit(__QDISC_STATE_SCHED, + &q->state); diff --git a/debian/patches/features/all/rt/net-use-cpu-chill.patch b/debian/patches/features/all/rt/net-use-cpu-chill.patch new file mode 100644 index 000000000..33185df90 --- /dev/null +++ b/debian/patches/features/all/rt/net-use-cpu-chill.patch @@ -0,0 +1,63 @@ +Subject: net: Use cpu_chill() instead of cpu_relax() +From: Thomas Gleixner +Date: Wed, 07 Mar 2012 21:10:04 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Retry loops on RT might loop forever when the modifying side was +preempted. Use cpu_chill() instead of cpu_relax() to let the system +make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + net/packet/af_packet.c | 5 +++-- + net/rds/ib_rdma.c | 3 ++- + 2 files changed, 5 insertions(+), 3 deletions(-) + +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -700,7 +701,7 @@ static void prb_retire_rx_blk_timer_expi + if (BLOCK_NUM_PKTS(pbd)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + +@@ -951,7 +952,7 @@ static void prb_retire_current_block(str + if (!(status & TP_STATUS_BLK_TMO)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + prb_close_block(pkc, pbd, po, status); +--- a/net/rds/ib_rdma.c ++++ b/net/rds/ib_rdma.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include "rds.h" + #include "ib.h" +@@ -286,7 +287,7 @@ static inline void wait_clean_list_grace + for_each_online_cpu(cpu) { + flag = &per_cpu(clean_list_grace, cpu); + while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) +- cpu_relax(); ++ cpu_chill(); + } + } + diff --git a/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch b/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch new file mode 100644 index 000000000..b0e623592 --- /dev/null +++ b/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch @@ -0,0 +1,31 @@ +Subject: net: Use get_cpu_light() in ip_send_unicast_reply() +From: Thomas Gleixner +Date: Mon, 01 Oct 2012 17:12:35 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + net/ipv4/ip_output.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -1516,7 +1516,8 @@ void ip_send_unicast_reply(struct net *n + if (IS_ERR(rt)) + return; + +- inet = &get_cpu_var(unicast_sock); ++ get_cpu_light(); ++ inet = &__get_cpu_var(unicast_sock); + + inet->tos = arg->tos; + sk = &inet->sk; +@@ -1540,7 +1541,7 @@ void ip_send_unicast_reply(struct net *n + ip_push_pending_frames(sk, &fl4); + } + +- put_cpu_var(unicast_sock); ++ put_cpu_light(); + + ip_rt_put(rt); + } diff --git a/debian/patches/features/all/rt/net-wireless-warn-nort.patch b/debian/patches/features/all/rt/net-wireless-warn-nort.patch new file mode 100644 index 000000000..af01ebcb0 --- /dev/null +++ b/debian/patches/features/all/rt/net-wireless-warn-nort.patch @@ -0,0 +1,21 @@ +Subject: net-wireless-warn-nort.patch +From: Thomas Gleixner +Date: Thu, 21 Jul 2011 21:05:33 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + net/mac80211/rx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -3346,7 +3346,7 @@ void ieee80211_rx(struct ieee80211_hw *h + struct ieee80211_supported_band *sband; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + +- WARN_ON_ONCE(softirq_count() == 0); ++ WARN_ON_ONCE_NONRT(softirq_count() == 0); + + if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) + goto drop; diff --git a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch new file mode 100644 index 000000000..990a8ae85 --- /dev/null +++ b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch @@ -0,0 +1,143 @@ +From: Oleg Nesterov +Subject: signal/x86: Delay calling signals in atomic +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +On x86_64 we must disable preemption before we enable interrupts +for stack faults, int3 and debugging, because the current task is using +a per CPU debug stack defined by the IST. If we schedule out, another task +can come in and use the same stack and cause the stack to be corrupted +and crash the kernel on return. + +When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and +one of these is the spin lock used in signal handling. + +Some of the debug code (int3) causes do_trap() to send a signal. +This function calls a spin lock that has been converted to a mutex +and has the possibility to sleep. If this happens, the above issues with +the corrupted stack is possible. + +Instead of calling the signal right away, for PREEMPT_RT and x86_64, +the signal information is stored on the stacks task_struct and +TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume +code will send the signal when preemption is enabled. + +[ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT_FULL to + ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ] + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner +--- + + arch/x86/include/asm/signal.h | 13 +++++++++++++ + arch/x86/kernel/signal.c | 8 ++++++++ + include/linux/sched.h | 4 ++++ + kernel/signal.c | 37 +++++++++++++++++++++++++++++++++++-- + 4 files changed, 60 insertions(+), 2 deletions(-) + +--- a/arch/x86/include/asm/signal.h ++++ b/arch/x86/include/asm/signal.h +@@ -23,6 +23,19 @@ typedef struct { + unsigned long sig[_NSIG_WORDS]; + } sigset_t; + ++/* ++ * Because some traps use the IST stack, we must keep preemption ++ * disabled while calling do_trap(), but do_trap() may call ++ * force_sig_info() which will grab the signal spin_locks for the ++ * task, which in PREEMPT_RT_FULL are mutexes. By defining ++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set ++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the ++ * trap. ++ */ ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) ++#define ARCH_RT_DELAYS_SIGNAL_SEND ++#endif ++ + #ifndef CONFIG_COMPAT + typedef sigset_t compat_sigset_t; + #endif +--- a/arch/x86/kernel/signal.c ++++ b/arch/x86/kernel/signal.c +@@ -739,6 +739,14 @@ do_notify_resume(struct pt_regs *regs, v + mce_notify_process(); + #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ + ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (unlikely(current->forced_info.si_signo)) { ++ struct task_struct *t = current; ++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); ++ t->forced_info.si_signo = 0; ++ } ++#endif ++ + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1345,6 +1345,10 @@ struct task_struct { + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ + struct sigpending pending; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* TODO: move me into ->restart_block ? */ ++ struct siginfo forced_info; ++#endif + + unsigned long sas_ss_sp; + size_t sas_ss_size; +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1305,8 +1305,8 @@ int do_send_sig_info(int sig, struct sig + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. + */ +-int +-force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++static int ++do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + unsigned long int flags; + int ret, blocked, ignored; +@@ -1331,6 +1331,39 @@ force_sig_info(int sig, struct siginfo * + return ret; + } + ++int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++{ ++/* ++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap ++ * since it can not enable preemption, and the signal code's spin_locks ++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will ++ * send the signal on exit of the trap. ++ */ ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (in_atomic()) { ++ if (WARN_ON_ONCE(t != current)) ++ return 0; ++ if (WARN_ON_ONCE(t->forced_info.si_signo)) ++ return 0; ++ ++ if (is_si_special(info)) { ++ WARN_ON_ONCE(info != SEND_SIG_PRIV); ++ t->forced_info.si_signo = sig; ++ t->forced_info.si_errno = 0; ++ t->forced_info.si_code = SI_KERNEL; ++ t->forced_info.si_pid = 0; ++ t->forced_info.si_uid = 0; ++ } else { ++ t->forced_info = *info; ++ } ++ ++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); ++ return 0; ++ } ++#endif ++ return do_force_sig_info(sig, info, t); ++} ++ + /* + * Nuke all other threads in the group. + */ diff --git a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch new file mode 100644 index 000000000..4c666fc1f --- /dev/null +++ b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch @@ -0,0 +1,22 @@ +From: Thomas Gleixner +Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +--- + kernel/panic.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -368,9 +368,11 @@ static u64 oops_id; + + static int init_oops_id(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!oops_id) + get_random_bytes(&oops_id, sizeof(oops_id)); + else ++#endif + oops_id++; + + return 0; diff --git a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch new file mode 100644 index 000000000..ef3fe7b6a --- /dev/null +++ b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch @@ -0,0 +1,166 @@ +Subject: rcu: Make ksoftirqd do RCU quiescent states +From: "Paul E. McKenney" +Date: Wed, 5 Oct 2011 11:45:18 -0700 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable +to network-based denial-of-service attacks. This patch therefore +makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq() +is running in ksoftirqd context. A wrapper layer in interposed so that +other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying +function __do_softirq_common() does the actual work. + +The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is +that there might be a local_bh_enable() inside an RCU-preempt read-side +critical section. This local_bh_enable() can invoke __do_softirq() +directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just +calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be +an illegal RCU-preempt quiescent state in the middle of an RCU-preempt +read-side critical section. Therefore, quiescent states can only happen +in cases where __do_softirq() is invoked directly from ksoftirqd. + +Signed-off-by: Paul E. McKenney +Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com +Signed-off-by: Thomas Gleixner + +--- + include/linux/rcupdate.h | 6 ------ + kernel/rcu/tree.c | 9 ++++++++- + kernel/rcu/tree_plugin.h | 8 +++++++- + kernel/softirq.c | 18 ++++++++++++------ + 4 files changed, 27 insertions(+), 14 deletions(-) + +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -229,13 +229,7 @@ static inline int rcu_preempt_depth(void + /* Internal to kernel */ + void rcu_init(void); + void rcu_sched_qs(int cpu); +- +-#ifdef CONFIG_PREEMPT_RT_FULL +-static inline void rcu_bh_qs(int cpu) { } +-#else + void rcu_bh_qs(int cpu); +-#endif +- + void rcu_check_callbacks(int cpu, int user); + struct notifier_block; + void rcu_idle_enter(void); +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -199,7 +199,14 @@ void rcu_sched_qs(int cpu) + rdp->passed_quiesce = 1; + } + +-#ifndef CONFIG_PREEMPT_RT_FULL ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void rcu_preempt_qs(int cpu); ++ ++void rcu_bh_qs(int cpu) ++{ ++ rcu_preempt_qs(cpu); ++} ++#else + void rcu_bh_qs(int cpu) + { + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -1575,7 +1575,7 @@ static void rcu_prepare_kthreads(int cpu + + #endif /* #else #ifdef CONFIG_RCU_BOOST */ + +-#if !defined(CONFIG_RCU_FAST_NO_HZ) ++#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) + + /* + * Check to see if any future RCU-related work will need to be done +@@ -1591,6 +1591,9 @@ int rcu_needs_cpu(int cpu, unsigned long + *delta_jiffies = ULONG_MAX; + return rcu_cpu_has_callbacks(cpu, NULL); + } ++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ ++ ++#if !defined(CONFIG_RCU_FAST_NO_HZ) + + /* + * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up +@@ -1688,6 +1691,8 @@ static bool rcu_try_advance_all_cbs(void + return cbs_ready; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready + * to invoke. If the CPU has callbacks, try to advance them. Tell the +@@ -1726,6 +1731,7 @@ int rcu_needs_cpu(int cpu, unsigned long + } + return 0; + } ++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ + + /* + * Prepare a CPU for idle from an RCU perspective. The first major task +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -141,7 +141,7 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + +-static void handle_pending_softirqs(u32 pending, int cpu) ++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) + { + struct softirq_action *h = softirq_vec; + unsigned int prev_count = preempt_count(); +@@ -167,7 +167,8 @@ static void handle_pending_softirqs(u32 + prev_count, preempt_count()); + preempt_count_set(prev_count); + } +- rcu_bh_qs(cpu); ++ if (need_rcu_bh_qs) ++ rcu_bh_qs(cpu); + h++; + pending >>= softirq_bit; + } +@@ -349,7 +350,7 @@ asmlinkage void __do_softirq(void) + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- handle_pending_softirqs(pending, cpu); ++ handle_pending_softirqs(pending, cpu, 1); + + pending = local_softirq_pending(); + if (pending) { +@@ -398,7 +399,12 @@ static void ksoftirqd_clr_sched_params(u + static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); + static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); + +-asmlinkage void __do_softirq(void); ++static void __do_softirq_common(int need_rcu_bh_qs); ++ ++asmlinkage __do_softirq(void) ++{ ++ __do_softirq_common(0); ++} + + void __init softirq_early_init(void) + { +@@ -507,7 +513,7 @@ asmlinkage void __do_softirq(void) + + lockdep_softirq_enter(); + +- handle_pending_softirqs(pending, cpu); ++ handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); + + pending = local_softirq_pending(); + if (pending) +@@ -546,7 +552,7 @@ static int __thread_do_softirq(int cpu) + * schedule! + */ + if (local_softirq_pending()) +- __do_softirq(); ++ __do_softirq_common(cpu >= 0); + local_unlock(local_softirq_lock); + unpin_current_cpu(); + preempt_disable(); diff --git a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch new file mode 100644 index 000000000..0a6a8988e --- /dev/null +++ b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch @@ -0,0 +1,26 @@ +Subject: pci: Use __wake_up_all_locked pci_unblock_user_cfg_access() +From: Thomas Gleixner +Date: Thu, 01 Dec 2011 00:07:16 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The waitqueue is protected by the pci_lock, so we can just avoid to +lock the waitqueue lock itself. That prevents the +might_sleep()/scheduling while atomic problem on RT + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + drivers/pci/access.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/pci/access.c ++++ b/drivers/pci/access.c +@@ -441,7 +441,7 @@ void pci_cfg_access_unlock(struct pci_de + WARN_ON(!dev->block_cfg_access); + + dev->block_cfg_access = 0; +- wake_up_all(&pci_cfg_wait); ++ wake_up_all_locked(&pci_cfg_wait); + raw_spin_unlock_irqrestore(&pci_lock, flags); + } + EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); diff --git a/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch b/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch new file mode 100644 index 000000000..34a6dee1c --- /dev/null +++ b/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch @@ -0,0 +1,28 @@ +From 49faecbc581de038b423d7abbebe0d7b50ed15ef Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 8 Apr 2013 16:08:46 +0200 +Subject: [PATCH] percpu-rwsem: compile fix +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The shortcut on mainline skip lockdep. No idea why this is a good thing. + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/locking/percpu-rwsem.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/kernel/locking/percpu-rwsem.c ++++ b/kernel/locking/percpu-rwsem.c +@@ -84,8 +84,12 @@ void percpu_down_read(struct percpu_rw_s + + down_read(&brw->rw_sem); + atomic_inc(&brw->slow_read_ctr); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ up_read(&brw->rw_sem); ++#else + /* avoid up_read()->rwsem_release() */ + __up_read(&brw->rw_sem); ++#endif + } + + void percpu_up_read(struct percpu_rw_semaphore *brw) diff --git a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch new file mode 100644 index 000000000..43d741cca --- /dev/null +++ b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch @@ -0,0 +1,103 @@ +From a7fac98271eef0bd3b872c56ba8fc1989646d3fd Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 9 Apr 2014 11:58:17 +0200 +Subject: [PATCH] percpu_ida: use locklocks +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +the local_irq_save() + spin_lock() does not work that well on -RT + +Signed-off-by: Sebastian Andrzej Siewior +--- + lib/percpu_ida.c | 20 ++++++++++++-------- + 1 file changed, 12 insertions(+), 8 deletions(-) + +--- a/lib/percpu_ida.c ++++ b/lib/percpu_ida.c +@@ -29,6 +29,9 @@ + #include + #include + #include ++#include ++ ++static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock); + + struct percpu_ida_cpu { + /* +@@ -151,13 +154,13 @@ int percpu_ida_alloc(struct percpu_ida * + unsigned long flags; + int tag; + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + + /* Fastpath */ + tag = alloc_local_tag(tags); + if (likely(tag >= 0)) { +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + return tag; + } + +@@ -176,6 +179,7 @@ int percpu_ida_alloc(struct percpu_ida * + + if (!tags->nr_free) + alloc_global_tags(pool, tags); ++ + if (!tags->nr_free) + steal_tags(pool, tags); + +@@ -187,7 +191,7 @@ int percpu_ida_alloc(struct percpu_ida * + } + + spin_unlock(&pool->lock); +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + + if (tag >= 0 || state == TASK_RUNNING) + break; +@@ -199,7 +203,7 @@ int percpu_ida_alloc(struct percpu_ida * + + schedule(); + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + } + if (state != TASK_RUNNING) +@@ -224,7 +228,7 @@ void percpu_ida_free(struct percpu_ida * + + BUG_ON(tag >= pool->nr_tags); + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + + spin_lock(&tags->lock); +@@ -256,7 +260,7 @@ void percpu_ida_free(struct percpu_ida * + spin_unlock(&pool->lock); + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + } + EXPORT_SYMBOL_GPL(percpu_ida_free); + +@@ -348,7 +352,7 @@ int percpu_ida_for_each_free(struct perc + struct percpu_ida_cpu *remote; + unsigned cpu, i, err = 0; + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + for_each_possible_cpu(cpu) { + remote = per_cpu_ptr(pool->tag_cpu, cpu); + spin_lock(&remote->lock); +@@ -370,7 +374,7 @@ int percpu_ida_for_each_free(struct perc + } + spin_unlock(&pool->lock); + out: +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + return err; + } + EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); diff --git a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch new file mode 100644 index 000000000..0bc0fdb73 --- /dev/null +++ b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch @@ -0,0 +1,69 @@ +From: Yong Zhang +Date: Wed, 11 Jul 2012 22:05:21 +0000 +Subject: perf: Make swevent hrtimer run in irq instead of softirq +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Otherwise we get a deadlock like below: + +[ 1044.042749] BUG: scheduling while atomic: ksoftirqd/21/141/0x00010003 +[ 1044.042752] INFO: lockdep is turned off. +[ 1044.042754] Modules linked in: +[ 1044.042757] Pid: 141, comm: ksoftirqd/21 Tainted: G W 3.4.0-rc2-rt3-23676-ga723175-dirty #29 +[ 1044.042759] Call Trace: +[ 1044.042761] [] __schedule_bug+0x65/0x80 +[ 1044.042770] [] __schedule+0x83c/0xa70 +[ 1044.042775] [] ? prepare_to_wait+0x32/0xb0 +[ 1044.042779] [] schedule+0x2e/0xa0 +[ 1044.042782] [] hrtimer_wait_for_timer+0x6d/0xb0 +[ 1044.042786] [] ? wake_up_bit+0x40/0x40 +[ 1044.042790] [] hrtimer_cancel+0x20/0x40 +[ 1044.042794] [] perf_swevent_cancel_hrtimer+0x3c/0x50 +[ 1044.042798] [] task_clock_event_stop+0x11/0x40 +[ 1044.042802] [] task_clock_event_del+0xe/0x10 +[ 1044.042805] [] event_sched_out+0x118/0x1d0 +[ 1044.042809] [] group_sched_out+0x29/0x90 +[ 1044.042813] [] __perf_event_disable+0x18e/0x200 +[ 1044.042817] [] remote_function+0x63/0x70 +[ 1044.042821] [] generic_smp_call_function_single_interrupt+0xce/0x120 +[ 1044.042826] [] smp_call_function_single_interrupt+0x27/0x40 +[ 1044.042831] [] call_function_single_interrupt+0x6c/0x80 +[ 1044.042833] [] ? perf_event_overflow+0x20/0x20 +[ 1044.042840] [] ? _raw_spin_unlock_irq+0x30/0x70 +[ 1044.042844] [] ? _raw_spin_unlock_irq+0x36/0x70 +[ 1044.042848] [] run_hrtimer_softirq+0xc2/0x200 +[ 1044.042853] [] ? perf_event_overflow+0x20/0x20 +[ 1044.042857] [] __do_softirq_common+0xf5/0x3a0 +[ 1044.042862] [] __thread_do_softirq+0x15d/0x200 +[ 1044.042865] [] run_ksoftirqd+0xfa/0x210 +[ 1044.042869] [] ? __thread_do_softirq+0x200/0x200 +[ 1044.042873] [] ? __thread_do_softirq+0x200/0x200 +[ 1044.042877] [] kthread+0xb6/0xc0 +[ 1044.042881] [] ? _raw_spin_unlock_irq+0x3b/0x70 +[ 1044.042886] [] kernel_thread_helper+0x4/0x10 +[ 1044.042889] [] ? finish_task_switch+0x8c/0x110 +[ 1044.042894] [] ? _raw_spin_unlock_irq+0x3b/0x70 +[ 1044.042897] [] ? retint_restore_args+0xe/0xe +[ 1044.042900] [] ? kthreadd+0x1e0/0x1e0 +[ 1044.042902] [] ? gs_change+0xb/0xb + +Signed-off-by: Yong Zhang +Cc: Peter Zijlstra +Cc: Steven Rostedt +Link: http://lkml.kernel.org/r/1341476476-5666-1-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +Signed-off-by: Steven Rostedt + +--- + kernel/events/core.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -6088,6 +6088,7 @@ static void perf_swevent_init_hrtimer(st + + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; ++ hwc->hrtimer.irqsafe = 1; + + /* + * Since hrtimers have a fixed rate, we can do a static freq->period diff --git a/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch b/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch new file mode 100644 index 000000000..0497c9edd --- /dev/null +++ b/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch @@ -0,0 +1,72 @@ +Subject: x86-no-perf-irq-work-rt.patch +From: Thomas Gleixner +Date: Wed, 13 Jul 2011 14:05:05 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/x86/kernel/irq_work.c | 2 ++ + kernel/irq_work.c | 5 ++++- + kernel/timer.c | 6 +++++- + 3 files changed, 11 insertions(+), 2 deletions(-) + +--- a/arch/x86/kernel/irq_work.c ++++ b/arch/x86/kernel/irq_work.c +@@ -38,6 +38,7 @@ static inline void __smp_irq_work_interr + exiting_irq(); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void arch_irq_work_raise(void) + { + #ifdef CONFIG_X86_LOCAL_APIC +@@ -48,3 +49,4 @@ void arch_irq_work_raise(void) + apic_wait_icr_idle(); + #endif + } ++#endif +--- a/kernel/irq_work.c ++++ b/kernel/irq_work.c +@@ -119,8 +119,9 @@ static void __irq_work_run(void) + if (llist_empty(this_list)) + return; + ++#ifndef CONFIG_PREEMPT_RT_FULL + BUG_ON(!irqs_disabled()); +- ++#endif + llnode = llist_del_all(this_list); + while (llnode != NULL) { + work = llist_entry(llnode, struct irq_work, llnode); +@@ -152,7 +153,9 @@ static void __irq_work_run(void) + */ + void irq_work_run(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + BUG_ON(!in_irq()); ++#endif + __irq_work_run(); + } + EXPORT_SYMBOL_GPL(irq_work_run); +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1425,7 +1425,7 @@ void update_process_times(int user_tick) + scheduler_tick(); + run_local_timers(); + rcu_check_callbacks(cpu, user_tick); +-#ifdef CONFIG_IRQ_WORK ++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) + if (in_irq()) + irq_work_run(); + #endif +@@ -1439,6 +1439,10 @@ static void run_timer_softirq(struct sof + { + struct tvec_base *base = __this_cpu_read(tvec_bases); + ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++ irq_work_run(); ++#endif ++ + if (time_after_eq(jiffies, base->timer_jiffies)) + __run_timers(base); + } diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch new file mode 100644 index 000000000..6a619acac --- /dev/null +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch @@ -0,0 +1,175 @@ +Subject: sched: Generic migrate_disable +From: Peter Zijlstra +Date: Thu Aug 11 15:14:58 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Make migrate_disable() be a preempt_disable() for !rt kernels. This +allows generic code to use it but still enforces that these code +sections stay relatively small. + +A preemptible migrate_disable() accessible for general use would allow +people growing arbitrary per-cpu crap instead of clean these things +up. + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org +--- + include/linux/preempt.h | 21 +++++++++------------ + include/linux/sched.h | 13 +++++++++++++ + include/linux/smp.h | 9 ++------- + kernel/sched/core.c | 6 ++++-- + kernel/trace/trace.c | 2 +- + lib/smp_processor_id.c | 2 +- + 6 files changed, 30 insertions(+), 23 deletions(-) + +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -148,28 +148,25 @@ do { \ + set_preempt_need_resched(); \ + } while (0) + +-#ifdef CONFIG_SMP +-extern void migrate_disable(void); +-extern void migrate_enable(void); +-#else +-# define migrate_disable() barrier() +-# define migrate_enable() barrier() +-#endif +- + #ifdef CONFIG_PREEMPT_RT_FULL + # define preempt_disable_rt() preempt_disable() + # define preempt_enable_rt() preempt_enable() + # define preempt_disable_nort() barrier() + # define preempt_enable_nort() barrier() +-# define migrate_disable_rt() migrate_disable() +-# define migrate_enable_rt() migrate_enable() ++# ifdef CONFIG_SMP ++ extern void migrate_disable(void); ++ extern void migrate_enable(void); ++# else /* CONFIG_SMP */ ++# define migrate_disable() barrier() ++# define migrate_enable() barrier() ++# endif /* CONFIG_SMP */ + #else + # define preempt_disable_rt() barrier() + # define preempt_enable_rt() barrier() + # define preempt_disable_nort() preempt_disable() + # define preempt_enable_nort() preempt_enable() +-# define migrate_disable_rt() barrier() +-# define migrate_enable_rt() barrier() ++# define migrate_disable() preempt_disable() ++# define migrate_enable() preempt_enable() + #endif + + #ifdef CONFIG_PREEMPT_NOTIFIERS +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1198,7 +1198,9 @@ struct task_struct { + #endif + + unsigned int policy; ++#ifdef CONFIG_PREEMPT_RT_FULL + int migrate_disable; ++#endif + int nr_cpus_allowed; + cpumask_t cpus_allowed; + +@@ -2951,11 +2953,22 @@ static inline void set_task_cpu(struct t + + #endif /* CONFIG_SMP */ + ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ return p->migrate_disable; ++#else ++ return 0; ++#endif ++} ++ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ + static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) + { ++#ifdef CONFIG_PREEMPT_RT_FULL + if (p->migrate_disable) + return cpumask_of(task_cpu(p)); ++#endif + + return &p->cpus_allowed; + } +--- a/include/linux/smp.h ++++ b/include/linux/smp.h +@@ -182,13 +182,8 @@ static inline void kick_all_cpus_sync(vo + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + +-#ifndef CONFIG_PREEMPT_RT_FULL +-# define get_cpu_light() get_cpu() +-# define put_cpu_light() put_cpu() +-#else +-# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) +-# define put_cpu_light() migrate_enable() +-#endif ++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++#define put_cpu_light() migrate_enable() + + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4622,7 +4622,7 @@ void init_idle(struct task_struct *idle, + #ifdef CONFIG_SMP + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { +- if (!p->migrate_disable) { ++ if (!__migrate_disabled(p)) { + if (p->sched_class && p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask); +@@ -4673,7 +4673,7 @@ int set_cpus_allowed_ptr(struct task_str + do_set_cpus_allowed(p, new_mask); + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) + goto out; + + dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); +@@ -4692,6 +4692,7 @@ int set_cpus_allowed_ptr(struct task_str + } + EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); + ++#ifdef CONFIG_PREEMPT_RT_FULL + void migrate_disable(void) + { + struct task_struct *p = current; +@@ -4784,6 +4785,7 @@ void migrate_enable(void) + preempt_enable(); + } + EXPORT_SYMBOL(migrate_enable); ++#endif /* CONFIG_PREEMPT_RT_FULL */ + + /* + * Move (not current) task off this cpu, onto dest cpu. We're doing +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1553,7 +1553,7 @@ tracing_generic_entry_update(struct trac + (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); + +- entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0; ++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; + } + EXPORT_SYMBOL_GPL(tracing_generic_entry_update); + +--- a/lib/smp_processor_id.c ++++ b/lib/smp_processor_id.c +@@ -40,7 +40,7 @@ notrace unsigned int debug_smp_processor + + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " + "code: %s/%d\n", preempt_count() - 1, +- current->migrate_disable, current->comm, current->pid); ++ __migrate_disabled(current), current->comm, current->pid); + print_symbol("caller is %s\n", (long)__builtin_return_address(0)); + dump_stack(); + diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch new file mode 100644 index 000000000..e4acbd742 --- /dev/null +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch @@ -0,0 +1,68 @@ +Subject: sched: Optimize migrate_disable +From: Peter Zijlstra +Date: Thu Aug 11 15:03:35 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Change from task_rq_lock() to raw_spin_lock(&rq->lock) to avoid a few +atomic ops. See comment on why it should be safe. + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org +--- + kernel/sched/core.c | 24 ++++++++++++++++++++---- + 1 file changed, 20 insertions(+), 4 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4712,7 +4712,19 @@ void migrate_disable(void) + preempt_enable(); + return; + } +- rq = task_rq_lock(p, &flags); ++ ++ /* ++ * Since this is always current we can get away with only locking ++ * rq->lock, the ->cpus_allowed value can normally only be changed ++ * while holding both p->pi_lock and rq->lock, but seeing that this ++ * it current, we cannot actually be waking up, so all code that ++ * relies on serialization against p->pi_lock is out of scope. ++ * ++ * Taking rq->lock serializes us against things like ++ * set_cpus_allowed_ptr() that can still happen concurrently. ++ */ ++ rq = this_rq(); ++ raw_spin_lock_irqsave(&rq->lock, flags); + p->migrate_disable = 1; + mask = tsk_cpus_allowed(p); + +@@ -4723,7 +4735,7 @@ void migrate_disable(void) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); + } +- task_rq_unlock(rq, p, &flags); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); + preempt_enable(); + } + EXPORT_SYMBOL(migrate_disable); +@@ -4751,7 +4763,11 @@ void migrate_enable(void) + return; + } + +- rq = task_rq_lock(p, &flags); ++ /* ++ * See comment in migrate_disable(). ++ */ ++ rq = this_rq(); ++ raw_spin_lock_irqsave(&rq->lock, flags); + p->migrate_disable = 0; + mask = tsk_cpus_allowed(p); + +@@ -4763,7 +4779,7 @@ void migrate_enable(void) + p->nr_cpus_allowed = cpumask_weight(mask); + } + +- task_rq_unlock(rq, p, &flags); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); + unpin_current_cpu(); + preempt_enable(); + } diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch new file mode 100644 index 000000000..f132cc928 --- /dev/null +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch @@ -0,0 +1,332 @@ +Subject: mm: pagefault_disabled() +From: Peter Zijlstra +Date: Thu Aug 11 15:31:31 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Wrap the test for pagefault_disabled() into a helper, this allows us +to remove the need for current->pagefault_disabled on !-rt kernels. + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org +--- + arch/alpha/mm/fault.c | 2 +- + arch/arm/mm/fault.c | 2 +- + arch/avr32/mm/fault.c | 3 +-- + arch/cris/mm/fault.c | 2 +- + arch/frv/mm/fault.c | 2 +- + arch/ia64/mm/fault.c | 2 +- + arch/m32r/mm/fault.c | 2 +- + arch/m68k/mm/fault.c | 2 +- + arch/microblaze/mm/fault.c | 2 +- + arch/mips/mm/fault.c | 2 +- + arch/mn10300/mm/fault.c | 2 +- + arch/parisc/mm/fault.c | 2 +- + arch/powerpc/mm/fault.c | 2 +- + arch/s390/mm/fault.c | 4 ++-- + arch/score/mm/fault.c | 2 +- + arch/sh/mm/fault.c | 2 +- + arch/sparc/mm/fault_32.c | 2 +- + arch/sparc/mm/fault_64.c | 2 +- + arch/tile/mm/fault.c | 2 +- + arch/um/kernel/trap.c | 2 +- + arch/x86/mm/fault.c | 2 +- + arch/xtensa/mm/fault.c | 2 +- + include/linux/sched.h | 14 ++++++++++++++ + kernel/fork.c | 2 ++ + 24 files changed, 39 insertions(+), 24 deletions(-) + +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns + + /* If we're in an interrupt context, or have no user context, + we must not take the fault. */ +- if (!mm || in_atomic() || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + #ifdef CONFIG_ALPHA_LARGE_VMALLOC +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c +@@ -277,7 +277,7 @@ do_page_fault(unsigned long addr, unsign + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if (user_mode(regs)) +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -81,8 +81,7 @@ asmlinkage void do_page_fault(unsigned l + * If we're in an interrupt or have no user context, we must + * not take the fault... + */ +- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) || +- current->pagefault_disabled) ++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled()) + goto no_context; + + local_irq_enable(); +--- a/arch/cris/mm/fault.c ++++ b/arch/cris/mm/fault.c +@@ -113,7 +113,7 @@ do_page_fault(unsigned long address, str + * user context, we must not take the fault. + */ + +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if (user_mode(regs)) +--- a/arch/frv/mm/fault.c ++++ b/arch/frv/mm/fault.c +@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if (user_mode(__frame)) +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long addres + /* + * If we're in an interrupt or have no user context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + #ifdef CONFIG_VIRTUAL_MEM_MAP +--- a/arch/m32r/mm/fault.c ++++ b/arch/m32r/mm/fault.c +@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user context or are running in an + * atomic region then we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + if (error_code & ACE_USERMODE) +--- a/arch/m68k/mm/fault.c ++++ b/arch/m68k/mm/fault.c +@@ -81,7 +81,7 @@ int do_page_fault(struct pt_regs *regs, + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if (user_mode(regs)) +--- a/arch/microblaze/mm/fault.c ++++ b/arch/microblaze/mm/fault.c +@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs, + if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) + is_write = 0; + +- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { ++ if (unlikely(!mm || pagefault_disabled())) { + if (kernel_mode(regs)) + goto bad_area_nosemaphore; + +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -89,7 +89,7 @@ static void __kprobes __do_page_fault(st + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + if (user_mode(regs)) +--- a/arch/mn10300/mm/fault.c ++++ b/arch/mn10300/mm/fault.c +@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -183,7 +183,7 @@ void do_page_fault(struct pt_regs *regs, + int fault; + unsigned int flags; + +- if (in_atomic() || current->pagefault_disabled) ++ if (pagefault_disabled()) + goto no_context; + + tsk = current; +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + +- if (in_atomic() || mm == NULL || current->pagefault_disabled) { ++ if (in_atomic() || mm == NULL || pagefault_disabled()) { + if (!user_mode(regs)) { + rc = SIGSEGV; + goto bail; +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -291,8 +291,8 @@ static inline int do_exception(struct pt + * user context. + */ + fault = VM_FAULT_BADCONTEXT; +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || +- tsk->pagefault_disabled)) ++ if (unlikely(!user_space_fault(trans_exc_code) || ++ !mm || pagefault_disabled())) + goto out; + + address = trans_exc_code & __FAIL_ADDR_MASK; +--- a/arch/score/mm/fault.c ++++ b/arch/score/mm/fault.c +@@ -73,7 +73,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + if (user_mode(regs)) +--- a/arch/sh/mm/fault.c ++++ b/arch/sh/mm/fault.c +@@ -438,7 +438,7 @@ asmlinkage void __kprobes do_page_fault( + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { ++ if (unlikely(!mm || pagefault_disabled())) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -199,7 +199,7 @@ asmlinkage void do_sparc_fault(struct pt + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -324,7 +324,7 @@ asmlinkage void __kprobes do_sparc64_fau + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_enabled) ++ if (!mm || pagefault_disabled()) + goto intr_or_no_mm; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +--- a/arch/tile/mm/fault.c ++++ b/arch/tile/mm/fault.c +@@ -357,7 +357,7 @@ static int handle_page_fault(struct pt_r + * If we're in an interrupt, have no user context or are running in an + * atomic region then we must not take the fault. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) { ++ if (!mm || pagefault_disabled()) { + vma = NULL; /* happy compiler */ + goto bad_area_nosemaphore; + } +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c +@@ -38,7 +38,7 @@ int handle_page_fault(unsigned long addr + * If the fault was during atomic operation, don't take the fault, just + * fail. + */ +- if (in_atomic() || current->pagefault_disabled) ++ if (pagefault_disabled()) + goto out_nosemaphore; + + if (is_user) +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -1103,7 +1103,7 @@ static void __kprobes noinline + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { ++ if (unlikely(!mm || pagefault_disabled())) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +--- a/arch/xtensa/mm/fault.c ++++ b/arch/xtensa/mm/fault.c +@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) + /* If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) { ++ if (!mm || pagefault_disabled()) { + bad_page_fault(regs, address, SIGSEGV); + return; + } +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -54,6 +54,7 @@ struct sched_param { + #include + #include + #include ++#include + + #include + +@@ -1388,7 +1389,9 @@ struct task_struct { + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL + int pagefault_disabled; ++#endif + #ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; +@@ -1622,6 +1625,17 @@ static inline void task_numa_free(struct + } + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } ++#else ++static inline bool cur_pf_disabled(void) { return false; } ++#endif ++ ++static inline bool pagefault_disabled(void) ++{ ++ return in_atomic() || cur_pf_disabled(); ++} ++ + static inline struct pid *task_pid(struct task_struct *task) + { + return task->pids[PIDTYPE_PID].pid; +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1298,7 +1298,9 @@ static struct task_struct *copy_process( + p->hardirq_context = 0; + p->softirq_context = 0; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL + p->pagefault_disabled = 0; ++#endif + #ifdef CONFIG_LOCKDEP + p->lockdep_depth = 0; /* no locks held yet */ + p->curr_chain_key = 0; diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch new file mode 100644 index 000000000..b1b5e929b --- /dev/null +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch @@ -0,0 +1,167 @@ +Subject: rcu: Frob softirq test +From: Peter Zijlstra +Date: Sat Aug 13 00:23:17 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +With RT_FULL we get the below wreckage: + +[ 126.060484] ======================================================= +[ 126.060486] [ INFO: possible circular locking dependency detected ] +[ 126.060489] 3.0.1-rt10+ #30 +[ 126.060490] ------------------------------------------------------- +[ 126.060492] irq/24-eth0/1235 is trying to acquire lock: +[ 126.060495] (&(lock)->wait_lock#2){+.+...}, at: [] rt_mutex_slowunlock+0x16/0x55 +[ 126.060503] +[ 126.060504] but task is already holding lock: +[ 126.060506] (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 +[ 126.060511] +[ 126.060511] which lock already depends on the new lock. +[ 126.060513] +[ 126.060514] +[ 126.060514] the existing dependency chain (in reverse order) is: +[ 126.060516] +[ 126.060516] -> #1 (&p->pi_lock){-...-.}: +[ 126.060519] [] lock_acquire+0x145/0x18a +[ 126.060524] [] _raw_spin_lock_irqsave+0x4b/0x85 +[ 126.060527] [] task_blocks_on_rt_mutex+0x36/0x20f +[ 126.060531] [] rt_mutex_slowlock+0xd1/0x15a +[ 126.060534] [] rt_mutex_lock+0x2d/0x2f +[ 126.060537] [] rcu_boost+0xad/0xde +[ 126.060541] [] rcu_boost_kthread+0x7d/0x9b +[ 126.060544] [] kthread+0x99/0xa1 +[ 126.060547] [] kernel_thread_helper+0x4/0x10 +[ 126.060551] +[ 126.060552] -> #0 (&(lock)->wait_lock#2){+.+...}: +[ 126.060555] [] __lock_acquire+0x1157/0x1816 +[ 126.060558] [] lock_acquire+0x145/0x18a +[ 126.060561] [] _raw_spin_lock+0x40/0x73 +[ 126.060564] [] rt_mutex_slowunlock+0x16/0x55 +[ 126.060566] [] rt_mutex_unlock+0x27/0x29 +[ 126.060569] [] rcu_read_unlock_special+0x17e/0x1c4 +[ 126.060573] [] __rcu_read_unlock+0x48/0x89 +[ 126.060576] [] select_task_rq_rt+0xc7/0xd5 +[ 126.060580] [] try_to_wake_up+0x175/0x429 +[ 126.060583] [] wake_up_process+0x15/0x17 +[ 126.060585] [] wakeup_softirqd+0x24/0x26 +[ 126.060590] [] irq_exit+0x49/0x55 +[ 126.060593] [] smp_apic_timer_interrupt+0x8a/0x98 +[ 126.060597] [] apic_timer_interrupt+0x13/0x20 +[ 126.060600] [] irq_forced_thread_fn+0x1b/0x44 +[ 126.060603] [] irq_thread+0xde/0x1af +[ 126.060606] [] kthread+0x99/0xa1 +[ 126.060608] [] kernel_thread_helper+0x4/0x10 +[ 126.060611] +[ 126.060612] other info that might help us debug this: +[ 126.060614] +[ 126.060615] Possible unsafe locking scenario: +[ 126.060616] +[ 126.060617] CPU0 CPU1 +[ 126.060619] ---- ---- +[ 126.060620] lock(&p->pi_lock); +[ 126.060623] lock(&(lock)->wait_lock); +[ 126.060625] lock(&p->pi_lock); +[ 126.060627] lock(&(lock)->wait_lock); +[ 126.060629] +[ 126.060629] *** DEADLOCK *** +[ 126.060630] +[ 126.060632] 1 lock held by irq/24-eth0/1235: +[ 126.060633] #0: (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 +[ 126.060638] +[ 126.060638] stack backtrace: +[ 126.060641] Pid: 1235, comm: irq/24-eth0 Not tainted 3.0.1-rt10+ #30 +[ 126.060643] Call Trace: +[ 126.060644] [] print_circular_bug+0x289/0x29a +[ 126.060651] [] __lock_acquire+0x1157/0x1816 +[ 126.060655] [] ? trace_hardirqs_off_caller+0x1f/0x99 +[ 126.060658] [] ? rt_mutex_slowunlock+0x16/0x55 +[ 126.060661] [] lock_acquire+0x145/0x18a +[ 126.060664] [] ? rt_mutex_slowunlock+0x16/0x55 +[ 126.060668] [] _raw_spin_lock+0x40/0x73 +[ 126.060671] [] ? rt_mutex_slowunlock+0x16/0x55 +[ 126.060674] [] ? rcu_report_qs_rsp+0x87/0x8c +[ 126.060677] [] rt_mutex_slowunlock+0x16/0x55 +[ 126.060680] [] ? rcu_read_unlock_special+0x9b/0x1c4 +[ 126.060683] [] rt_mutex_unlock+0x27/0x29 +[ 126.060687] [] rcu_read_unlock_special+0x17e/0x1c4 +[ 126.060690] [] __rcu_read_unlock+0x48/0x89 +[ 126.060693] [] select_task_rq_rt+0xc7/0xd5 +[ 126.060696] [] ? select_task_rq_rt+0x27/0xd5 +[ 126.060701] [] ? clockevents_program_event+0x8e/0x90 +[ 126.060704] [] try_to_wake_up+0x175/0x429 +[ 126.060708] [] ? tick_program_event+0x1f/0x21 +[ 126.060711] [] wake_up_process+0x15/0x17 +[ 126.060715] [] wakeup_softirqd+0x24/0x26 +[ 126.060718] [] irq_exit+0x49/0x55 +[ 126.060721] [] smp_apic_timer_interrupt+0x8a/0x98 +[ 126.060724] [] apic_timer_interrupt+0x13/0x20 +[ 126.060726] [] ? migrate_disable+0x75/0x12d +[ 126.060733] [] ? local_bh_disable+0xe/0x1f +[ 126.060736] [] ? local_bh_disable+0x1d/0x1f +[ 126.060739] [] irq_forced_thread_fn+0x1b/0x44 +[ 126.060742] [] ? _raw_spin_unlock_irq+0x3b/0x59 +[ 126.060745] [] irq_thread+0xde/0x1af +[ 126.060748] [] ? irq_thread_fn+0x3a/0x3a +[ 126.060751] [] ? irq_finalize_oneshot+0xd1/0xd1 +[ 126.060754] [] ? irq_finalize_oneshot+0xd1/0xd1 +[ 126.060757] [] kthread+0x99/0xa1 +[ 126.060761] [] kernel_thread_helper+0x4/0x10 +[ 126.060764] [] ? finish_task_switch+0x87/0x10a +[ 126.060768] [] ? retint_restore_args+0xe/0xe +[ 126.060771] [] ? __init_kthread_worker+0x8c/0x8c +[ 126.060774] [] ? gs_change+0xb/0xb + +Because irq_exit() does: + +void irq_exit(void) +{ + account_system_vtime(current); + trace_hardirq_exit(); + sub_preempt_count(IRQ_EXIT_OFFSET); + if (!in_interrupt() && local_softirq_pending()) + invoke_softirq(); + + ... +} + +Which triggers a wakeup, which uses RCU, now if the interrupted task has +t->rcu_read_unlock_special set, the rcu usage from the wakeup will end +up in rcu_read_unlock_special(). rcu_read_unlock_special() will test +for in_irq(), which will fail as we just decremented preempt_count +with IRQ_EXIT_OFFSET, and in_sering_softirq(), which for +PREEMPT_RT_FULL reads: + +int in_serving_softirq(void) +{ + int res; + + preempt_disable(); + res = __get_cpu_var(local_softirq_runner) == current; + preempt_enable(); + return res; +} + +Which will thus also fail, resulting in the above wreckage. + +The 'somewhat' ugly solution is to open-code the preempt_count() test +in rcu_read_unlock_special(). + +Also, we're not at all sure how ->rcu_read_unlock_special gets set +here... so this is very likely a bandaid and more thought is required. + +Cc: Paul E. McKenney +Signed-off-by: Peter Zijlstra +--- + kernel/rcu/tree_plugin.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -370,7 +370,7 @@ void rcu_read_unlock_special(struct task + } + + /* Hardware IRQ handlers cannot block, complain if they get here. */ +- if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) { ++ if (WARN_ON_ONCE(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET))) { + local_irq_restore(flags); + return; + } diff --git a/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch b/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch new file mode 100644 index 000000000..ea804b54d --- /dev/null +++ b/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch @@ -0,0 +1,173 @@ +Subject: mm: raw_pagefault_disable +From: Peter Zijlstra +Date: Fri Aug 05 17:16:58 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Adding migrate_disable() to pagefault_disable() to preserve the +per-cpu thing for kmap_atomic might not have been the best of choices. +But short of adding preempt_disable/migrate_disable foo all over the +kmap code it still seems the best way. + +It does however yield the below borkage as well as wreck !-rt builds +since !-rt does rely on pagefault_disable() not preempting. So fix all +that up by adding raw_pagefault_disable(). + + [] warn_slowpath_common+0x85/0x9d + [] warn_slowpath_fmt+0x46/0x48 + [] ? _raw_spin_lock+0x6c/0x73 + [] ? watchdog_overflow_callback+0x9b/0xd0 + [] watchdog_overflow_callback+0xb7/0xd0 + [] __perf_event_overflow+0x11c/0x1fe + [] ? perf_event_update_userpage+0x149/0x151 + [] ? perf_event_task_disable+0x7c/0x7c + [] perf_event_overflow+0x14/0x16 + [] x86_pmu_handle_irq+0xcb/0x108 + [] perf_event_nmi_handler+0x46/0x91 + [] notifier_call_chain+0x79/0xa6 + [] __atomic_notifier_call_chain+0x66/0x98 + [] ? notifier_call_chain+0xa6/0xa6 + [] atomic_notifier_call_chain+0x14/0x16 + [] notify_die+0x2e/0x30 + [] do_nmi+0x7e/0x22b + [] nmi+0x1a/0x2c + [] ? sub_preempt_count+0x4b/0xaa + <> [] delay_tsc+0xac/0xd1 + [] __delay+0xf/0x11 + [] do_raw_spin_lock+0xd2/0x13c + [] _raw_spin_lock_irqsave+0x6b/0x85 + [] ? task_rq_lock+0x35/0x8d + [] task_rq_lock+0x35/0x8d + [] migrate_disable+0x65/0x12c + [] pagefault_disable+0xe/0x1f + [] dump_trace+0x21f/0x2e2 + [] show_trace_log_lvl+0x54/0x5d + [] show_trace+0x15/0x17 + [] dump_stack+0x77/0x80 + [] spin_bug+0x9c/0xa3 + [] ? task_rq_lock+0x50/0x8d + [] do_raw_spin_lock+0x47/0x13c + [] _raw_spin_lock+0x60/0x73 + [] ? task_rq_lock+0x50/0x8d + [] task_rq_lock+0x50/0x8d + [] migrate_disable+0x65/0x12c + [] pagefault_disable+0xe/0x1f + [] dump_trace+0x21f/0x2e2 + [] save_stack_trace+0x2f/0x4c + [] save_trace+0x3f/0xaf + [] mark_lock+0x228/0x530 + [] __lock_acquire+0x662/0x1812 + [] ? native_sched_clock+0x37/0x6d + [] ? trace_hardirqs_off_caller+0x1f/0x99 + [] ? sched_rt_period_timer+0xbd/0x218 + [] lock_acquire+0x145/0x18a + [] ? sched_rt_period_timer+0xbd/0x218 + [] _raw_spin_lock+0x40/0x73 + [] ? sched_rt_period_timer+0xbd/0x218 + [] sched_rt_period_timer+0xbd/0x218 + [] __run_hrtimer+0x1e4/0x347 + [] ? can_migrate_task.clone.82+0x14a/0x14a + [] hrtimer_interrupt+0xee/0x1d6 + [] ? add_preempt_count+0xae/0xb2 + [] smp_apic_timer_interrupt+0x85/0x98 + [] apic_timer_interrupt+0x13/0x20 + + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org +--- + include/linux/uaccess.h | 40 ++++++++++++++++++++++++++++++++++++++-- + mm/memory.c | 8 ++------ + 2 files changed, 40 insertions(+), 8 deletions(-) + +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h +@@ -8,8 +8,44 @@ + * These routines enable/disable the pagefault handler in that + * it will not take any MM locks and go straight to the fixup table. + */ ++static inline void raw_pagefault_disable(void) ++{ ++ preempt_count_inc(); ++ /* ++ * make sure to have issued the store before a pagefault ++ * can hit. ++ */ ++ barrier(); ++} ++ ++static inline void raw_pagefault_enable(void) ++{ ++#ifndef CONFIG_PREEMPT ++ /* ++ * make sure to issue those last loads/stores before enabling ++ * the pagefault handler again. ++ */ ++ barrier(); ++ preempt_count_dec(); ++#else ++ preempt_enable(); ++#endif ++} ++ ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline void pagefault_disable(void) ++{ ++ raw_pagefault_disable(); ++} ++ ++static inline void pagefault_enable(void) ++{ ++ raw_pagefault_enable(); ++} ++#else + extern void pagefault_disable(void); + extern void pagefault_enable(void); ++#endif + + #ifndef ARCH_HAS_NOCACHE_UACCESS + +@@ -50,9 +86,9 @@ static inline unsigned long __copy_from_ + mm_segment_t old_fs = get_fs(); \ + \ + set_fs(KERNEL_DS); \ +- pagefault_disable(); \ ++ raw_pagefault_disable(); \ + ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ +- pagefault_enable(); \ ++ raw_pagefault_enable(); \ + set_fs(old_fs); \ + ret; \ + }) +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3690,6 +3690,7 @@ static int handle_pte_fault(struct mm_st + return 0; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL + void pagefault_disable(void) + { + preempt_count_inc(); +@@ -3704,21 +3705,16 @@ EXPORT_SYMBOL(pagefault_disable); + + void pagefault_enable(void) + { +-#ifndef CONFIG_PREEMPT + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; +- preempt_count_dec(); +-#else +- barrier(); +- current->pagefault_disabled--; + preempt_enable(); +-#endif + } + EXPORT_SYMBOL(pagefault_enable); ++#endif + + /* + * By the time we get here, we already hold the mm semaphore diff --git a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch new file mode 100644 index 000000000..a4077c921 --- /dev/null +++ b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch @@ -0,0 +1,183 @@ +Subject: crypto: Convert crypto notifier chain to SRCU +From: Peter Zijlstra +Date: Fri, 05 Oct 2012 09:03:24 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The crypto notifier deadlocks on RT. Though this can be a real deadlock +on mainline as well due to fifo fair rwsems. + +The involved parties here are: + +[ 82.172678] swapper/0 S 0000000000000001 0 1 0 0x00000000 +[ 82.172682] ffff88042f18fcf0 0000000000000046 ffff88042f18fc80 ffffffff81491238 +[ 82.172685] 0000000000011cc0 0000000000011cc0 ffff88042f18c040 ffff88042f18ffd8 +[ 82.172688] 0000000000011cc0 0000000000011cc0 ffff88042f18ffd8 0000000000011cc0 +[ 82.172689] Call Trace: +[ 82.172697] [] ? _raw_spin_unlock_irqrestore+0x6c/0x7a +[ 82.172701] [] schedule+0x64/0x66 +[ 82.172704] [] schedule_timeout+0x27/0xd0 +[ 82.172708] [] ? unpin_current_cpu+0x1a/0x6c +[ 82.172713] [] ? migrate_enable+0x12f/0x141 +[ 82.172716] [] wait_for_common+0xbb/0x11f +[ 82.172719] [] ? try_to_wake_up+0x182/0x182 +[ 82.172722] [] wait_for_completion_interruptible+0x1d/0x2e +[ 82.172726] [] crypto_wait_for_test+0x49/0x6b +[ 82.172728] [] crypto_register_alg+0x53/0x5a +[ 82.172730] [] crypto_register_algs+0x33/0x72 +[ 82.172734] [] ? aes_init+0x12/0x12 +[ 82.172737] [] aesni_init+0x64/0x66 +[ 82.172741] [] do_one_initcall+0x7f/0x13b +[ 82.172744] [] kernel_init+0x199/0x22c +[ 82.172747] [] ? loglevel+0x31/0x31 +[ 82.172752] [] kernel_thread_helper+0x4/0x10 +[ 82.172755] [] ? retint_restore_args+0x13/0x13 +[ 82.172759] [] ? start_kernel+0x3ca/0x3ca +[ 82.172761] [] ? gs_change+0x13/0x13 + +[ 82.174186] cryptomgr_test S 0000000000000001 0 41 2 0x00000000 +[ 82.174189] ffff88042c971980 0000000000000046 ffffffff81d74830 0000000000000292 +[ 82.174192] 0000000000011cc0 0000000000011cc0 ffff88042c96eb80 ffff88042c971fd8 +[ 82.174195] 0000000000011cc0 0000000000011cc0 ffff88042c971fd8 0000000000011cc0 +[ 82.174195] Call Trace: +[ 82.174198] [] schedule+0x64/0x66 +[ 82.174201] [] schedule_timeout+0x27/0xd0 +[ 82.174204] [] ? unpin_current_cpu+0x1a/0x6c +[ 82.174206] [] ? migrate_enable+0x12f/0x141 +[ 82.174209] [] wait_for_common+0xbb/0x11f +[ 82.174212] [] ? try_to_wake_up+0x182/0x182 +[ 82.174215] [] wait_for_completion_interruptible+0x1d/0x2e +[ 82.174218] [] cryptomgr_notify+0x280/0x385 +[ 82.174221] [] notifier_call_chain+0x6b/0x98 +[ 82.174224] [] ? rt_down_read+0x10/0x12 +[ 82.174227] [] __blocking_notifier_call_chain+0x70/0x8d +[ 82.174230] [] blocking_notifier_call_chain+0x14/0x16 +[ 82.174234] [] crypto_probing_notify+0x24/0x50 +[ 82.174236] [] crypto_alg_mod_lookup+0x3e/0x74 +[ 82.174238] [] crypto_alloc_base+0x36/0x8f +[ 82.174241] [] cryptd_alloc_ablkcipher+0x6e/0xb5 +[ 82.174243] [] ? kzalloc.clone.5+0xe/0x10 +[ 82.174246] [] ablk_init_common+0x1d/0x38 +[ 82.174249] [] ablk_ecb_init+0x15/0x17 +[ 82.174251] [] __crypto_alloc_tfm+0xc7/0x114 +[ 82.174254] [] ? crypto_lookup_skcipher+0x1f/0xe4 +[ 82.174256] [] crypto_alloc_ablkcipher+0x60/0xa5 +[ 82.174258] [] alg_test_skcipher+0x24/0x9b +[ 82.174261] [] ? finish_task_switch+0x3f/0xfa +[ 82.174263] [] alg_test+0x16f/0x1d7 +[ 82.174267] [] ? cryptomgr_probe+0xac/0xac +[ 82.174269] [] cryptomgr_test+0x2c/0x47 +[ 82.174272] [] kthread+0x7e/0x86 +[ 82.174275] [] ? finish_task_switch+0xaf/0xfa +[ 82.174278] [] kernel_thread_helper+0x4/0x10 +[ 82.174281] [] ? retint_restore_args+0x13/0x13 +[ 82.174284] [] ? __init_kthread_worker+0x8c/0x8c +[ 82.174287] [] ? gs_change+0x13/0x13 + +[ 82.174329] cryptomgr_probe D 0000000000000002 0 47 2 0x00000000 +[ 82.174332] ffff88042c991b70 0000000000000046 ffff88042c991bb0 0000000000000006 +[ 82.174335] 0000000000011cc0 0000000000011cc0 ffff88042c98ed00 ffff88042c991fd8 +[ 82.174338] 0000000000011cc0 0000000000011cc0 ffff88042c991fd8 0000000000011cc0 +[ 82.174338] Call Trace: +[ 82.174342] [] schedule+0x64/0x66 +[ 82.174344] [] __rt_mutex_slowlock+0x85/0xbe +[ 82.174347] [] rt_mutex_slowlock+0xec/0x159 +[ 82.174351] [] rt_mutex_fastlock.clone.8+0x29/0x2f +[ 82.174353] [] rt_mutex_lock+0x33/0x37 +[ 82.174356] [] __rt_down_read+0x50/0x5a +[ 82.174358] [] ? rt_down_read+0x10/0x12 +[ 82.174360] [] rt_down_read+0x10/0x12 +[ 82.174363] [] __blocking_notifier_call_chain+0x58/0x8d +[ 82.174366] [] blocking_notifier_call_chain+0x14/0x16 +[ 82.174369] [] crypto_probing_notify+0x24/0x50 +[ 82.174372] [] crypto_wait_for_test+0x22/0x6b +[ 82.174374] [] crypto_register_instance+0xb4/0xc0 +[ 82.174377] [] cryptd_create+0x378/0x3b6 +[ 82.174379] [] ? __crypto_lookup_template+0x5b/0x63 +[ 82.174382] [] cryptomgr_probe+0x45/0xac +[ 82.174385] [] ? crypto_alloc_pcomp+0x1b/0x1b +[ 82.174388] [] kthread+0x7e/0x86 +[ 82.174391] [] ? finish_task_switch+0xaf/0xfa +[ 82.174394] [] kernel_thread_helper+0x4/0x10 +[ 82.174398] [] ? retint_restore_args+0x13/0x13 +[ 82.174401] [] ? __init_kthread_worker+0x8c/0x8c +[ 82.174403] [] ? gs_change+0x13/0x13 + +cryptomgr_test spawns the cryptomgr_probe thread from the notifier +call. The probe thread fires the same notifier as the test thread and +deadlocks on the rwsem on RT. + +Now this is a potential deadlock in mainline as well, because we have +fifo fair rwsems. If another thread blocks with a down_write() on the +notifier chain before the probe thread issues the down_read() it will +block the probe thread and the whole party is dead locked. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner +--- + crypto/algapi.c | 4 ++-- + crypto/api.c | 6 +++--- + crypto/internal.h | 4 ++-- + 3 files changed, 7 insertions(+), 7 deletions(-) + +--- a/crypto/algapi.c ++++ b/crypto/algapi.c +@@ -684,13 +684,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); + + int crypto_register_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_register(&crypto_chain, nb); ++ return srcu_notifier_chain_register(&crypto_chain, nb); + } + EXPORT_SYMBOL_GPL(crypto_register_notifier); + + int crypto_unregister_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_unregister(&crypto_chain, nb); ++ return srcu_notifier_chain_unregister(&crypto_chain, nb); + } + EXPORT_SYMBOL_GPL(crypto_unregister_notifier); + +--- a/crypto/api.c ++++ b/crypto/api.c +@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list); + DECLARE_RWSEM(crypto_alg_sem); + EXPORT_SYMBOL_GPL(crypto_alg_sem); + +-BLOCKING_NOTIFIER_HEAD(crypto_chain); ++SRCU_NOTIFIER_HEAD(crypto_chain); + EXPORT_SYMBOL_GPL(crypto_chain); + + static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +@@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long + { + int ok; + +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + if (ok == NOTIFY_DONE) { + request_module("cryptomgr"); +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + } + + return ok; +--- a/crypto/internal.h ++++ b/crypto/internal.h +@@ -48,7 +48,7 @@ struct crypto_larval { + + extern struct list_head crypto_alg_list; + extern struct rw_semaphore crypto_alg_sem; +-extern struct blocking_notifier_head crypto_chain; ++extern struct srcu_notifier_head crypto_chain; + + #ifdef CONFIG_PROC_FS + void __init crypto_init_proc(void); +@@ -142,7 +142,7 @@ static inline int crypto_is_moribund(str + + static inline void crypto_notify(unsigned long val, void *v) + { +- blocking_notifier_call_chain(&crypto_chain, val, v); ++ srcu_notifier_call_chain(&crypto_chain, val, v); + } + + #endif /* _CRYPTO_INTERNAL_H */ diff --git a/debian/patches/features/all/rt/pid-h-include-atomic-h.patch b/debian/patches/features/all/rt/pid-h-include-atomic-h.patch new file mode 100644 index 000000000..fc96d1f3d --- /dev/null +++ b/debian/patches/features/all/rt/pid-h-include-atomic-h.patch @@ -0,0 +1,20 @@ +Subject: rwsem-inlcude-fix.patch +From: Thomas Gleixner +Date: Fri, 15 Jul 2011 21:24:27 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/pid.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/linux/pid.h ++++ b/include/linux/pid.h +@@ -2,6 +2,7 @@ + #define _LINUX_PID_H + + #include ++#include + + enum pid_type + { diff --git a/debian/patches/features/all/rt/ping-sysrq.patch b/debian/patches/features/all/rt/ping-sysrq.patch new file mode 100644 index 000000000..fc5926af1 --- /dev/null +++ b/debian/patches/features/all/rt/ping-sysrq.patch @@ -0,0 +1,122 @@ +Subject: net: sysrq via icmp +From: Carsten Emde +Date: Tue, 19 Jul 2011 13:51:17 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +There are (probably rare) situations when a system crashed and the system +console becomes unresponsive but the network icmp layer still is alive. +Wouldn't it be wonderful, if we then could submit a sysreq command via ping? + +This patch provides this facility. Please consult the updated documentation +Documentation/sysrq.txt for details. + +Signed-off-by: Carsten Emde + +--- + Documentation/sysrq.txt | 11 +++++++++-- + include/net/netns/ipv4.h | 1 + + net/ipv4/icmp.c | 30 ++++++++++++++++++++++++++++++ + net/ipv4/sysctl_net_ipv4.c | 7 +++++++ + 4 files changed, 47 insertions(+), 2 deletions(-) + +--- a/Documentation/sysrq.txt ++++ b/Documentation/sysrq.txt +@@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen ( + On other - If you know of the key combos for other architectures, please + let me know so I can add them to this section. + +-On all - write a character to /proc/sysrq-trigger. e.g.: +- ++On all - write a character to /proc/sysrq-trigger, e.g.: + echo t > /proc/sysrq-trigger + ++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. ++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq ++ Send an ICMP echo request with this pattern plus the particular ++ SysRq command key. Example: ++ # ping -c1 -s57 -p0102030468 ++ will trigger the SysRq-H (help) command. ++ ++ + * What are the 'command' keys? + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + 'b' - Will immediately reboot the system without syncing or unmounting +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -61,6 +61,7 @@ struct netns_ipv4 { + + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; ++ int sysctl_icmp_echo_sysrq; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -69,6 +69,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -803,6 +804,30 @@ static void icmp_redirect(struct sk_buff + } + + /* ++ * 32bit and 64bit have different timestamp length, so we check for ++ * the cookie at offset 20 and verify it is repeated at offset 50 ++ */ ++#define CO_POS0 20 ++#define CO_POS1 50 ++#define CO_SIZE sizeof(int) ++#define ICMP_SYSRQ_SIZE 57 ++ ++/* ++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie ++ * pattern and if it matches send the next byte as a trigger to sysrq. ++ */ ++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) ++{ ++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); ++ char *p = skb->data; ++ ++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && ++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && ++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) ++ handle_sysrq(p[CO_POS0 + CO_SIZE]); ++} ++ ++/* + * Handle ICMP_ECHO ("ping") requests. + * + * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo +@@ -829,6 +854,11 @@ static void icmp_echo(struct sk_buff *sk + icmp_param.data_len = skb->len; + icmp_param.head_len = sizeof(struct icmphdr); + icmp_reply(&icmp_param, skb); ++ ++ if (skb->len == ICMP_SYSRQ_SIZE && ++ net->ipv4.sysctl_icmp_echo_sysrq) { ++ icmp_check_sysrq(net, skb); ++ } + } + } + +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -776,6 +776,13 @@ static struct ctl_table ipv4_net_table[] + .proc_handler = proc_dointvec + }, + { ++ .procname = "icmp_echo_sysrq", ++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec ++ }, ++ { + .procname = "icmp_ignore_bogus_error_responses", + .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, + .maxlen = sizeof(int), diff --git a/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch b/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch new file mode 100644 index 000000000..b3a36fd95 --- /dev/null +++ b/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch @@ -0,0 +1,58 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:44:44 -0500 +Subject: posix-timers: Avoid wakeups when no timers are active +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Waking the thread even when no timers are scheduled is useless. + +Signed-off-by: Thomas Gleixner + +--- + kernel/posix-cpu-timers.c | 21 ++++++++++++++++++--- + 1 file changed, 18 insertions(+), 3 deletions(-) + +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -1274,6 +1274,21 @@ static int posix_cpu_timers_thread(void + return 0; + } + ++static inline int __fastpath_timer_check(struct task_struct *tsk) ++{ ++ /* tsk == current, ensure it is safe to use ->signal/sighand */ ++ if (unlikely(tsk->exit_state)) ++ return 0; ++ ++ if (!task_cputime_zero(&tsk->cputime_expires)) ++ return 1; ++ ++ if (!task_cputime_zero(&tsk->signal->cputime_expires)) ++ return 1; ++ ++ return 0; ++} ++ + void run_posix_cpu_timers(struct task_struct *tsk) + { + unsigned long cpu = smp_processor_id(); +@@ -1286,7 +1301,7 @@ void run_posix_cpu_timers(struct task_st + tasklist = per_cpu(posix_timer_tasklist, cpu); + + /* check to see if we're already queued */ +- if (!tsk->posix_timer_list) { ++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { + get_task_struct(tsk); + if (tasklist) { + tsk->posix_timer_list = tasklist; +@@ -1298,9 +1313,9 @@ void run_posix_cpu_timers(struct task_st + tsk->posix_timer_list = tsk; + } + per_cpu(posix_timer_tasklist, cpu) = tsk; ++ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); + } +- /* XXX signal the thread somehow */ +- wake_up_process(per_cpu(posix_timer_task, cpu)); + } + + /* diff --git a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch new file mode 100644 index 000000000..f826bedfa --- /dev/null +++ b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch @@ -0,0 +1,34 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:29:20 -0500 +Subject: posix-timers: Prevent broadcast signals +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Posix timers should not send broadcast signals and kernel only +signals. Prevent it. + +Signed-off-by: Thomas Gleixner + +--- + kernel/posix-timers.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/kernel/posix-timers.c ++++ b/kernel/posix-timers.c +@@ -497,6 +497,7 @@ static enum hrtimer_restart posix_timer_ + static struct pid *good_sigevent(sigevent_t * event) + { + struct task_struct *rtn = current->group_leader; ++ int sig = event->sigev_signo; + + if ((event->sigev_notify & SIGEV_THREAD_ID ) && + (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || +@@ -505,7 +506,8 @@ static struct pid *good_sigevent(sigeven + return NULL; + + if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && +- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) ++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || ++ sig_kernel_coredump(sig))) + return NULL; + + return task_pid(rtn); diff --git a/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch b/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch new file mode 100644 index 000000000..94185f236 --- /dev/null +++ b/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch @@ -0,0 +1,27 @@ +From: Arnaldo Carvalho de Melo +Date: Fri, 3 Jul 2009 08:30:00 -0500 +Subject: posix-timers: Shorten posix_cpu_timers/ kernel thread names +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Shorten the softirq kernel thread names because they always overflow the +limited comm length, appearing as "posix_cpu_timer" CPU# times. + +Signed-off-by: Arnaldo Carvalho de Melo +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/posix-cpu-timers.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -1317,7 +1317,7 @@ static int posix_cpu_thread_call(struct + switch (action) { + case CPU_UP_PREPARE: + p = kthread_create(posix_cpu_timers_thread, hcpu, +- "posix_cpu_timers/%d",cpu); ++ "posixcputmr/%d",cpu); + if (IS_ERR(p)) + return NOTIFY_BAD; + p->flags |= PF_NOFREEZE; diff --git a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch new file mode 100644 index 000000000..edbccbb2c --- /dev/null +++ b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch @@ -0,0 +1,321 @@ +From: John Stultz +Date: Fri, 3 Jul 2009 08:29:58 -0500 +Subject: posix-timers: thread posix-cpu-timers on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +posix-cpu-timer code takes non -rt safe locks in hard irq +context. Move it to a thread. + +[ 3.0 fixes from Peter Zijlstra ] + +Signed-off-by: John Stultz +Signed-off-by: Thomas Gleixner + +--- + include/linux/init_task.h | 7 + + include/linux/sched.h | 3 + init/main.c | 3 + kernel/fork.c | 3 + kernel/posix-cpu-timers.c | 183 ++++++++++++++++++++++++++++++++++++++++++++-- + 5 files changed, 192 insertions(+), 7 deletions(-) + +--- a/include/linux/init_task.h ++++ b/include/linux/init_task.h +@@ -145,6 +145,12 @@ extern struct task_group root_task_group + # define INIT_PERF_EVENTS(tsk) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define INIT_TIMER_LIST .posix_timer_list = NULL, ++#else ++# define INIT_TIMER_LIST ++#endif ++ + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + # define INIT_VTIME(tsk) \ + .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ +@@ -218,6 +224,7 @@ extern struct task_group root_task_group + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ ++ INIT_TIMER_LIST \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1310,6 +1310,9 @@ struct task_struct { + + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *posix_timer_list; ++#endif + + /* process credentials */ + const struct cred __rcu *real_cred; /* objective and real subjective task +--- a/init/main.c ++++ b/init/main.c +@@ -6,7 +6,7 @@ + * GK 2/5/95 - Changed to support mounting root fs via NFS + * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 + * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 +- * Simplified starting of init: Michael A. Griffith ++ * Simplified starting of init: Michael A. Griffith + */ + + #define DEBUG /* Enable initcall_debug */ +@@ -74,6 +74,7 @@ + #include + #include + #include ++#include + #include + #include + #include +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1109,6 +1109,9 @@ void mm_init_owner(struct mm_struct *mm, + */ + static void posix_cpu_timers_init(struct task_struct *tsk) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ tsk->posix_timer_list = NULL; ++#endif + tsk->cputime_expires.prof_exp = 0; + tsk->cputime_expires.virt_exp = 0; + tsk->cputime_expires.sched_exp = 0; +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -3,6 +3,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -640,7 +641,7 @@ static int posix_cpu_timer_set(struct k_ + /* + * Disarm any old timer after extracting its expiry time. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + ret = 0; + old_incr = timer->it.cpu.incr; +@@ -1061,7 +1062,7 @@ void posix_cpu_timer_schedule(struct k_i + /* + * Now re-arm for the new expiry time. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + arm_timer(timer); + unlock_task_sighand(p, &flags); + +@@ -1127,10 +1128,11 @@ static inline int fastpath_timer_check(s + sig = tsk->signal; + if (sig->cputimer.running) { + struct task_cputime group_sample; ++ unsigned long flags; + +- raw_spin_lock(&sig->cputimer.lock); ++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags); + group_sample = sig->cputimer.cputime; +- raw_spin_unlock(&sig->cputimer.lock); ++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags); + + if (task_cputime_expired(&group_sample, &sig->cputime_expires)) + return 1; +@@ -1144,13 +1146,13 @@ static inline int fastpath_timer_check(s + * already updated our counts. We need to check if any timers fire now. + * Interrupts are disabled. + */ +-void run_posix_cpu_timers(struct task_struct *tsk) ++static void __run_posix_cpu_timers(struct task_struct *tsk) + { + LIST_HEAD(firing); + struct k_itimer *timer, *next; + unsigned long flags; + +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + /* + * The fast path checks that there are no expired thread or thread +@@ -1208,6 +1210,175 @@ void run_posix_cpu_timers(struct task_st + } + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++#include ++#include ++DEFINE_PER_CPU(struct task_struct *, posix_timer_task); ++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); ++ ++static int posix_cpu_timers_thread(void *data) ++{ ++ int cpu = (long)data; ++ ++ BUG_ON(per_cpu(posix_timer_task,cpu) != current); ++ ++ while (!kthread_should_stop()) { ++ struct task_struct *tsk = NULL; ++ struct task_struct *next = NULL; ++ ++ if (cpu_is_offline(cpu)) ++ goto wait_to_die; ++ ++ /* grab task list */ ++ raw_local_irq_disable(); ++ tsk = per_cpu(posix_timer_tasklist, cpu); ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ raw_local_irq_enable(); ++ ++ /* its possible the list is empty, just return */ ++ if (!tsk) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ __set_current_state(TASK_RUNNING); ++ continue; ++ } ++ ++ /* Process task list */ ++ while (1) { ++ /* save next */ ++ next = tsk->posix_timer_list; ++ ++ /* run the task timers, clear its ptr and ++ * unreference it ++ */ ++ __run_posix_cpu_timers(tsk); ++ tsk->posix_timer_list = NULL; ++ put_task_struct(tsk); ++ ++ /* check if this is the last on the list */ ++ if (next == tsk) ++ break; ++ tsk = next; ++ } ++ } ++ return 0; ++ ++wait_to_die: ++ /* Wait for kthread_stop */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ unsigned long cpu = smp_processor_id(); ++ struct task_struct *tasklist; ++ ++ BUG_ON(!irqs_disabled()); ++ if(!per_cpu(posix_timer_task, cpu)) ++ return; ++ /* get per-cpu references */ ++ tasklist = per_cpu(posix_timer_tasklist, cpu); ++ ++ /* check to see if we're already queued */ ++ if (!tsk->posix_timer_list) { ++ get_task_struct(tsk); ++ if (tasklist) { ++ tsk->posix_timer_list = tasklist; ++ } else { ++ /* ++ * The list is terminated by a self-pointing ++ * task_struct ++ */ ++ tsk->posix_timer_list = tsk; ++ } ++ per_cpu(posix_timer_tasklist, cpu) = tsk; ++ } ++ /* XXX signal the thread somehow */ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); ++} ++ ++/* ++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added. ++ * Here we can start up the necessary migration thread for the new CPU. ++ */ ++static int posix_cpu_thread_call(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ struct task_struct *p; ++ struct sched_param param; ++ ++ switch (action) { ++ case CPU_UP_PREPARE: ++ p = kthread_create(posix_cpu_timers_thread, hcpu, ++ "posix_cpu_timers/%d",cpu); ++ if (IS_ERR(p)) ++ return NOTIFY_BAD; ++ p->flags |= PF_NOFREEZE; ++ kthread_bind(p, cpu); ++ /* Must be high prio to avoid getting starved */ ++ param.sched_priority = MAX_RT_PRIO-1; ++ sched_setscheduler(p, SCHED_FIFO, ¶m); ++ per_cpu(posix_timer_task,cpu) = p; ++ break; ++ case CPU_ONLINE: ++ /* Strictly unneccessary, as first user will wake it. */ ++ wake_up_process(per_cpu(posix_timer_task,cpu)); ++ break; ++#ifdef CONFIG_HOTPLUG_CPU ++ case CPU_UP_CANCELED: ++ /* Unbind it from offline cpu so it can run. Fall thru. */ ++ kthread_bind(per_cpu(posix_timer_task, cpu), ++ cpumask_any(cpu_online_mask)); ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++ case CPU_DEAD: ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++#endif ++ } ++ return NOTIFY_OK; ++} ++ ++/* Register at highest priority so that task migration (migrate_all_tasks) ++ * happens before everything else. ++ */ ++static struct notifier_block posix_cpu_thread_notifier = { ++ .notifier_call = posix_cpu_thread_call, ++ .priority = 10 ++}; ++ ++static int __init posix_cpu_thread_init(void) ++{ ++ void *hcpu = (void *)(long)smp_processor_id(); ++ /* Start one for boot CPU. */ ++ unsigned long cpu; ++ ++ /* init the per-cpu posix_timer_tasklets */ ++ for_each_possible_cpu(cpu) ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); ++ register_cpu_notifier(&posix_cpu_thread_notifier); ++ return 0; ++} ++early_initcall(posix_cpu_thread_init); ++#else /* CONFIG_PREEMPT_RT_BASE */ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ __run_posix_cpu_timers(tsk); ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + /* + * Set one of the process-wide special case CPU timers or RLIMIT_CPU. + * The tsk->sighand->siglock must be held by the caller. diff --git a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch new file mode 100644 index 000000000..855f8da6b --- /dev/null +++ b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch @@ -0,0 +1,21 @@ +Subject: power-disable-highmem-on-rt.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 17:08:34 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/powerpc/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -293,7 +293,7 @@ menu "Kernel options" + + config HIGHMEM + bool "High memory support" +- depends on PPC32 ++ depends on PPC32 && !PREEMPT_RT_FULL + + source kernel/Kconfig.hz + source kernel/Kconfig.preempt diff --git a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch new file mode 100644 index 000000000..44c8e6678 --- /dev/null +++ b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch @@ -0,0 +1,24 @@ +From: Thomas Gleixner +Subject: Powerpc: Use generic rwsem on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/powerpc/Kconfig | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT + + config RWSEM_GENERIC_SPINLOCK + bool ++ default y if PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM + bool +- default y ++ default y if !PREEMPT_RT_FULL + + config GENERIC_LOCKBREAK + bool diff --git a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch new file mode 100644 index 000000000..e9d384d3a --- /dev/null +++ b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch @@ -0,0 +1,172 @@ +From: Thomas Gleixner +Date: Thu, 1 Nov 2012 10:14:11 +0100 +Subject: [PATCH] powerpc-preempt-lazy-support.patch +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/powerpc/Kconfig | 1 + + arch/powerpc/include/asm/thread_info.h | 11 ++++++++--- + arch/powerpc/kernel/asm-offsets.c | 1 + + arch/powerpc/kernel/entry_32.S | 17 ++++++++++++----- + arch/powerpc/kernel/entry_64.S | 14 +++++++++++--- + 5 files changed, 33 insertions(+), 11 deletions(-) + +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -133,6 +133,7 @@ config PPC + select GENERIC_CLOCKEVENTS + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER ++ select HAVE_PREEMPT_LAZY + select HAVE_MOD_ARCH_SPECIFIC + select MODULES_USE_ELF_RELA + select CLONE_BACKWARDS +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h +@@ -43,6 +43,8 @@ struct thread_info { + int cpu; /* cpu we're on */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ + struct restart_block restart_block; + unsigned long local_flags; /* private flags for thread */ + +@@ -88,8 +90,7 @@ static inline struct thread_info *curren + #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ + #define TIF_SIGPENDING 1 /* signal pending */ + #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling +- TIF_NEED_RESCHED */ ++#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ + #define TIF_32BIT 4 /* 32 bit binary */ + #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ +@@ -107,6 +108,8 @@ static inline struct thread_info *curren + #if defined(CONFIG_PPC64) + #define TIF_ELF2ABI 18 /* function descriptors must die! */ + #endif ++#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling ++ TIF_NEED_RESCHED */ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1< +Date: Fri, 26 Oct 2012 18:50:54 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +It has become an obsession to mitigate the determinism vs. throughput +loss of RT. Looking at the mainline semantics of preemption points +gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER +tasks. One major issue is the wakeup of tasks which are right away +preempting the waking task while the waking task holds a lock on which +the woken task will block right after having preempted the wakee. In +mainline this is prevented due to the implicit preemption disable of +spin/rw_lock held regions. On RT this is not possible due to the fully +preemptible nature of sleeping spinlocks. + +Though for a SCHED_OTHER task preempting another SCHED_OTHER task this +is really not a correctness issue. RT folks are concerned about +SCHED_FIFO/RR tasks preemption and not about the purely fairness +driven SCHED_OTHER preemption latencies. + +So I introduced a lazy preemption mechanism which only applies to +SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the +existing preempt_count each tasks sports now a preempt_lazy_count +which is manipulated on lock acquiry and release. This is slightly +incorrect as for lazyness reasons I coupled this on +migrate_disable/enable so some other mechanisms get the same treatment +(e.g. get_cpu_light). + +Now on the scheduler side instead of setting NEED_RESCHED this sets +NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and +therefor allows to exit the waking task the lock held region before +the woken task preempts. That also works better for cross CPU wakeups +as the other side can stay in the adaptive spinning loop. + +For RT class preemption there is no change. This simply sets +NEED_RESCHED and forgoes the lazy preemption counter. + + Initial test do not expose any observable latency increasement, but +history shows that I've been proven wrong before :) + +The lazy preemption mode is per default on, but with +CONFIG_SCHED_DEBUG enabled it can be disabled via: + + # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features + +and reenabled via + + # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features + +The test results so far are very machine and workload dependent, but +there is a clear trend that it enhances the non RT workload +performance. + +Signed-off-by: Thomas Gleixner +--- + arch/x86/include/asm/preempt.h | 12 ++++++++-- + include/linux/ftrace_event.h | 1 + include/linux/preempt.h | 33 +++++++++++++++++++++++++-- + include/linux/sched.h | 37 ++++++++++++++++++++++++++++++ + include/linux/thread_info.h | 13 ++++++++++ + kernel/Kconfig.preempt | 6 +++++ + kernel/sched/core.c | 49 ++++++++++++++++++++++++++++++++++++++++- + kernel/sched/fair.c | 16 ++++++------- + kernel/sched/features.h | 3 ++ + kernel/sched/sched.h | 9 +++++++ + kernel/trace/trace.c | 41 ++++++++++++++++++++-------------- + kernel/trace/trace.h | 2 + + kernel/trace/trace_output.c | 13 +++++++++- + 13 files changed, 201 insertions(+), 34 deletions(-) + +--- a/arch/x86/include/asm/preempt.h ++++ b/arch/x86/include/asm/preempt.h +@@ -85,17 +85,25 @@ static __always_inline void __preempt_co + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ +-static __always_inline bool __preempt_count_dec_and_test(void) ++static __always_inline bool ____preempt_count_dec_and_test(void) + { + GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); + } + ++static __always_inline bool __preempt_count_dec_and_test(void) ++{ ++ if (____preempt_count_dec_and_test()) ++ return true; ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++} ++ + /* + * Returns true when we need to resched and can (barring IRQ state). + */ + static __always_inline bool should_resched(void) + { +- return unlikely(!__this_cpu_read_4(__preempt_count)); ++ return unlikely(!__this_cpu_read_4(__preempt_count) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)); + } + + #ifdef CONFIG_PREEMPT +--- a/include/linux/ftrace_event.h ++++ b/include/linux/ftrace_event.h +@@ -59,6 +59,7 @@ struct trace_entry { + int pid; + unsigned short migrate_disable; + unsigned short padding; ++ unsigned char preempt_lazy_count; + }; + + #define FTRACE_MAX_EVENT \ +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -33,6 +33,20 @@ extern void preempt_count_sub(int val); + #define preempt_count_inc() preempt_count_add(1) + #define preempt_count_dec() preempt_count_sub(1) + ++#ifdef CONFIG_PREEMPT_LAZY ++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) ++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) ++#define inc_preempt_lazy_count() add_preempt_lazy_count(1) ++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) ++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) ++#else ++#define add_preempt_lazy_count(val) do { } while (0) ++#define sub_preempt_lazy_count(val) do { } while (0) ++#define inc_preempt_lazy_count() do { } while (0) ++#define dec_preempt_lazy_count() do { } while (0) ++#define preempt_lazy_count() (0) ++#endif ++ + #ifdef CONFIG_PREEMPT_COUNT + + #define preempt_disable() \ +@@ -41,6 +55,12 @@ do { \ + barrier(); \ + } while (0) + ++#define preempt_lazy_disable() \ ++do { \ ++ inc_preempt_lazy_count(); \ ++ barrier(); \ ++} while (0) ++ + #define sched_preempt_enable_no_resched() \ + do { \ + barrier(); \ +@@ -69,6 +89,13 @@ do { \ + __preempt_schedule(); \ + } while (0) + ++#define preempt_lazy_enable() \ ++do { \ ++ dec_preempt_lazy_count(); \ ++ barrier(); \ ++ preempt_check_resched(); \ ++} while (0) ++ + #else + #define preempt_enable() \ + do { \ +@@ -99,7 +126,8 @@ do { \ + #define preempt_enable_notrace() \ + do { \ + barrier(); \ +- if (unlikely(__preempt_count_dec_and_test())) \ ++ if (unlikely(__preempt_count_dec_and_test() || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY))) \ + __preempt_schedule_context(); \ + } while (0) + #else +@@ -122,7 +150,6 @@ do { \ + #define sched_preempt_enable_no_resched() barrier() + #define preempt_enable_no_resched() barrier() + #define preempt_enable() barrier() +-#define preempt_check_resched() do { } while (0) + + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() +@@ -147,7 +174,7 @@ do { \ + } while (0) + #define preempt_fold_need_resched() \ + do { \ +- if (tif_need_resched()) \ ++ if (tif_need_resched_now()) \ + set_preempt_need_resched(); \ + } while (0) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2650,6 +2650,43 @@ static inline int test_tsk_need_resched( + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); ++} ++ ++static inline int need_resched_lazy(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++#else ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } ++static inline int need_resched_lazy(void) { return 0; } ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++#endif ++ + static inline int restart_syscall(void) + { + set_tsk_thread_flag(current, TIF_SIGPENDING); +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -118,7 +118,18 @@ static inline __deprecated void set_need + */ + } + +-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#ifdef CONFIG_PREEMPT_LAZY ++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) ++#define tif_need_resched_lazy() (test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++ ++#else ++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED)) ++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) ++#define tif_need_resched_lazy() (0) ++ ++#endif + + #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK + /* +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE + bool + select PREEMPT + ++config HAVE_PREEMPT_LAZY ++ bool ++ ++config PREEMPT_LAZY ++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL ++ + choice + prompt "Preemption Model" + default PREEMPT_NONE +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -539,6 +539,37 @@ void resched_task(struct task_struct *p) + smp_send_reschedule(cpu); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++void resched_task_lazy(struct task_struct *p) ++{ ++ int cpu; ++ ++ if (!sched_feat(PREEMPT_LAZY)) { ++ resched_task(p); ++ return; ++ } ++ ++ lockdep_assert_held(&task_rq(p)->lock); ++ ++ if (test_tsk_need_resched(p)) ++ return; ++ ++ if (test_tsk_need_resched_lazy(p)) ++ return; ++ ++ set_tsk_need_resched_lazy(p); ++ ++ cpu = task_cpu(p); ++ if (cpu == smp_processor_id()) ++ return; ++ ++ /* NEED_RESCHED_LAZY must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(p)) ++ smp_send_reschedule(cpu); ++} ++#endif ++ + void resched_cpu(int cpu) + { + struct rq *rq = cpu_rq(cpu); +@@ -1882,6 +1913,9 @@ int sched_fork(unsigned long clone_flags + p->on_cpu = 0; + #endif + init_task_preempt_count(p); ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(p)->preempt_lazy_count = 0; ++#endif + #ifdef CONFIG_SMP + plist_node_init(&p->pushable_tasks, MAX_PRIO); + RB_CLEAR_NODE(&p->pushable_dl_tasks); +@@ -2664,6 +2698,7 @@ void migrate_disable(void) + } + + preempt_disable(); ++ preempt_lazy_disable(); + pin_current_cpu(); + p->migrate_disable = 1; + preempt_enable(); +@@ -2718,6 +2753,7 @@ void migrate_enable(void) + + unpin_current_cpu(); + preempt_enable(); ++ preempt_lazy_enable(); + } + EXPORT_SYMBOL(migrate_enable); + #else +@@ -2845,6 +2881,7 @@ static void __sched __schedule(void) + put_prev_task(rq, prev); + next = pick_next_task(rq); + clear_tsk_need_resched(prev); ++ clear_tsk_need_resched_lazy(prev); + clear_preempt_need_resched(); + rq->skip_clock_update = 0; + +@@ -2950,6 +2987,14 @@ asmlinkage void __sched notrace preempt_ + if (likely(!preemptible())) + return; + ++#ifdef CONFIG_PREEMPT_LAZY ++ /* ++ * Check for lazy preemption ++ */ ++ if (current_thread_info()->preempt_lazy_count && ++ !test_thread_flag(TIF_NEED_RESCHED)) ++ return; ++#endif + do { + __preempt_count_add(PREEMPT_ACTIVE); + /* +@@ -4694,7 +4739,9 @@ void init_idle(struct task_struct *idle, + + /* Set the preempt count _outside_ the spinlocks! */ + init_idle_preempt_count(idle, cpu); +- ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(idle)->preempt_lazy_count = 0; ++#endif + /* + * The idle tasks have their own, simple scheduling class: + */ +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -2679,7 +2679,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq + ideal_runtime = sched_slice(cfs_rq, curr); + delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + if (delta_exec > ideal_runtime) { +- resched_task(rq_of(cfs_rq)->curr); ++ resched_task_lazy(rq_of(cfs_rq)->curr); + /* + * The current task ran long enough, ensure it doesn't get + * re-elected due to buddy favours. +@@ -2703,7 +2703,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq + return; + + if (delta > ideal_runtime) +- resched_task(rq_of(cfs_rq)->curr); ++ resched_task_lazy(rq_of(cfs_rq)->curr); + } + + static void +@@ -2824,7 +2824,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc + * validating it and just reschedule. + */ + if (queued) { +- resched_task(rq_of(cfs_rq)->curr); ++ resched_task_lazy(rq_of(cfs_rq)->curr); + return; + } + /* +@@ -3013,7 +3013,7 @@ static void __account_cfs_rq_runtime(str + * hierarchy can be throttled + */ + if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) +- resched_task(rq_of(cfs_rq)->curr); ++ resched_task_lazy(rq_of(cfs_rq)->curr); + } + + static __always_inline +@@ -3612,7 +3612,7 @@ static void hrtick_start_fair(struct rq + + if (delta < 0) { + if (rq->curr == p) +- resched_task(p); ++ resched_task_lazy(p); + return; + } + +@@ -4477,7 +4477,7 @@ static void check_preempt_wakeup(struct + return; + + preempt: +- resched_task(curr); ++ resched_task_lazy(curr); + /* + * Only set the backward buddy when the current task is still + * on the rq. This can happen when a wakeup gets interleaved +@@ -6965,7 +6965,7 @@ static void task_fork_fair(struct task_s + * 'current' within the tree based on its new key value. + */ + swap(curr->vruntime, se->vruntime); +- resched_task(rq->curr); ++ resched_task_lazy(rq->curr); + } + + se->vruntime -= cfs_rq->min_vruntime; +@@ -6990,7 +6990,7 @@ prio_changed_fair(struct rq *rq, struct + */ + if (rq->curr == p) { + if (p->prio > oldprio) +- resched_task(rq->curr); ++ resched_task_lazy(rq->curr); + } else + check_preempt_curr(rq, p, 0); + } +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -58,6 +58,9 @@ SCHED_FEAT(NONTASK_POWER, true) + SCHED_FEAT(TTWU_QUEUE, true) + #else + SCHED_FEAT(TTWU_QUEUE, false) ++# ifdef CONFIG_PREEMPT_LAZY ++SCHED_FEAT(PREEMPT_LAZY, true) ++# endif + #endif + + SCHED_FEAT(FORCE_SD_OVERLAP, false) +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1201,6 +1201,15 @@ extern void init_sched_dl_class(void); + extern void resched_task(struct task_struct *p); + extern void resched_cpu(int cpu); + ++#ifdef CONFIG_PREEMPT_LAZY ++extern void resched_task_lazy(struct task_struct *tsk); ++#else ++static inline void resched_task_lazy(struct task_struct *tsk) ++{ ++ resched_task(tsk); ++} ++#endif ++ + extern struct rt_bandwidth def_rt_bandwidth; + extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1541,6 +1541,7 @@ tracing_generic_entry_update(struct trac + struct task_struct *tsk = current; + + entry->preempt_count = pc & 0xff; ++ entry->preempt_lazy_count = preempt_lazy_count(); + entry->pid = (tsk) ? tsk->pid : 0; + entry->flags = + #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +@@ -1550,7 +1551,8 @@ tracing_generic_entry_update(struct trac + #endif + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | +- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); + + entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; +@@ -2459,15 +2461,17 @@ get_total_entries(struct trace_buffer *b + + static void print_lat_help_header(struct seq_file *m) + { +- seq_puts(m, "# _------=> CPU# \n"); +- seq_puts(m, "# / _-----=> irqs-off \n"); +- seq_puts(m, "# | / _----=> need-resched \n"); +- seq_puts(m, "# || / _---=> hardirq/softirq \n"); +- seq_puts(m, "# ||| / _--=> preempt-depth \n"); +- seq_puts(m, "# |||| / _--=> migrate-disable\n"); +- seq_puts(m, "# ||||| / delay \n"); +- seq_puts(m, "# cmd pid |||||| time | caller \n"); +- seq_puts(m, "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# _--------=> CPU# \n"); ++ seq_puts(m, "# / _-------=> irqs-off \n"); ++ seq_puts(m, "# | / _------=> need-resched \n"); ++ seq_puts(m, "# || / _-----=> need-resched_lazy \n"); ++ seq_puts(m, "# ||| / _----=> hardirq/softirq \n"); ++ seq_puts(m, "# |||| / _---=> preempt-depth \n"); ++ seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n"); ++ seq_puts(m, "# |||||| / _-=> migrate-disable \n"); ++ seq_puts(m, "# ||||||| / delay \n"); ++ seq_puts(m, "# cmd pid |||||||| time | caller \n"); ++ seq_puts(m, "# \\ / |||||||| \\ | / \n"); + } + + static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +@@ -2491,13 +2495,16 @@ static void print_func_help_header(struc + static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) + { + print_event_info(buf, m); +- seq_puts(m, "# _-----=> irqs-off\n"); +- seq_puts(m, "# / _----=> need-resched\n"); +- seq_puts(m, "# | / _---=> hardirq/softirq\n"); +- seq_puts(m, "# || / _--=> preempt-depth\n"); +- seq_puts(m, "# ||| / delay\n"); +- seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); +- seq_puts(m, "# | | | |||| | |\n"); ++ seq_puts(m, "# _-------=> irqs-off \n"); ++ seq_puts(m, "# / _------=> need-resched \n"); ++ seq_puts(m, "# |/ _-----=> need-resched_lazy \n"); ++ seq_puts(m, "# ||/ _----=> hardirq/softirq \n"); ++ seq_puts(m, "# |||/ _---=> preempt-depth \n"); ++ seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n"); ++ seq_puts(m, "# ||||| / _-=> migrate-disable \n"); ++ seq_puts(m, "# |||||| / delay\n"); ++ seq_puts(m, "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n"); ++ seq_puts(m, "# | | | |||||| | |\n"); + } + + void +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -118,6 +118,7 @@ struct kretprobe_trace_entry_head { + * NEED_RESCHED - reschedule is requested + * HARDIRQ - inside an interrupt handler + * SOFTIRQ - inside a softirq handler ++ * NEED_RESCHED_LAZY - lazy reschedule is requested + */ + enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, +@@ -126,6 +127,7 @@ enum trace_flag_type { + TRACE_FLAG_HARDIRQ = 0x08, + TRACE_FLAG_SOFTIRQ = 0x10, + TRACE_FLAG_PREEMPT_RESCHED = 0x20, ++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x40, + }; + + #define TRACE_BUF_SIZE 1024 +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -606,6 +606,7 @@ int trace_print_lat_fmt(struct trace_seq + { + char hardsoft_irq; + char need_resched; ++ char need_resched_lazy; + char irqs_off; + int hardirq; + int softirq; +@@ -634,6 +635,8 @@ int trace_print_lat_fmt(struct trace_seq + need_resched = '.'; + break; + } ++ need_resched_lazy = ++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; + + hardsoft_irq = + (hardirq && softirq) ? 'H' : +@@ -641,8 +644,9 @@ int trace_print_lat_fmt(struct trace_seq + softirq ? 's' : + '.'; + +- if (!trace_seq_printf(s, "%c%c%c", +- irqs_off, need_resched, hardsoft_irq)) ++ if (!trace_seq_printf(s, "%c%c%c%c", ++ irqs_off, need_resched, need_resched_lazy, ++ hardsoft_irq)) + return 0; + + if (entry->preempt_count) +@@ -650,6 +654,11 @@ int trace_print_lat_fmt(struct trace_seq + else + ret = trace_seq_putc(s, '.'); + ++ if (entry->preempt_lazy_count) ++ ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count); ++ else ++ ret = trace_seq_putc(s, '.'); ++ + if (entry->migrate_disable) + ret = trace_seq_printf(s, "%x", entry->migrate_disable); + else diff --git a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch new file mode 100644 index 000000000..c8fa59cff --- /dev/null +++ b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch @@ -0,0 +1,48 @@ +From: Thomas Gleixner +Date: Fri, 24 Jul 2009 12:38:56 +0200 +Subject: preempt: Provide preempt_*_(no)rt variants +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +RT needs a few preempt_disable/enable points which are not necessary +otherwise. Implement variants to avoid #ifdeffery. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/preempt.h | 18 +++++++++++++++++- + 1 file changed, 17 insertions(+), 1 deletion(-) + +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -47,7 +47,11 @@ do { \ + preempt_count_dec(); \ + } while (0) + +-#define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++#else ++# define preempt_enable_no_resched() preempt_enable() ++#endif + + #ifdef CONFIG_PREEMPT + #define preempt_enable() \ +@@ -144,6 +148,18 @@ do { \ + set_preempt_need_resched(); \ + } while (0) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define preempt_disable_rt() preempt_disable() ++# define preempt_enable_rt() preempt_enable() ++# define preempt_disable_nort() barrier() ++# define preempt_enable_nort() barrier() ++#else ++# define preempt_disable_rt() barrier() ++# define preempt_enable_rt() barrier() ++# define preempt_disable_nort() preempt_disable() ++# define preempt_enable_nort() preempt_enable() ++#endif ++ + #ifdef CONFIG_PREEMPT_NOTIFIERS + + struct preempt_notifier; diff --git a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch new file mode 100644 index 000000000..49e8cc8bd --- /dev/null +++ b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch @@ -0,0 +1,36 @@ +Subject: printk: %27force_early_printk%27 boot param to help with debugging +From: Peter Zijlstra +Date: Fri, 02 Sep 2011 14:41:29 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Subject: printk: 'force_early_printk' boot param to help with debugging +From: Peter Zijlstra +Date: Fri Sep 02 14:29:33 CEST 2011 + +Gives me an option to screw printk and actually see what the machine +says. + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1314967289.1301.11.camel@twins +Signed-off-by: Thomas Gleixner +Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org +--- + kernel/printk/printk.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -1513,6 +1513,13 @@ asmlinkage void early_printk(const char + */ + static bool __read_mostly printk_killswitch; + ++static int __init force_early_printk_setup(char *str) ++{ ++ printk_killswitch = true; ++ return 0; ++} ++early_param("force_early_printk", force_early_printk_setup); ++ + void printk_kill(void) + { + printk_killswitch = true; diff --git a/debian/patches/features/all/rt/printk-kill.patch b/debian/patches/features/all/rt/printk-kill.patch new file mode 100644 index 000000000..97ecd9fa9 --- /dev/null +++ b/debian/patches/features/all/rt/printk-kill.patch @@ -0,0 +1,169 @@ +Subject: printk-kill.patch +From: Ingo Molnar +Date: Fri, 22 Jul 2011 17:58:40 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/printk.h | 3 + + kernel/printk/printk.c | 79 ++++++++++++++++++++++++++++++++++--------------- + kernel/watchdog.c | 15 ++++++++- + 3 files changed, 71 insertions(+), 26 deletions(-) + +--- a/include/linux/printk.h ++++ b/include/linux/printk.h +@@ -109,9 +109,11 @@ int no_printk(const char *fmt, ...) + extern asmlinkage __printf(1, 2) + void early_printk(const char *fmt, ...); + void early_vprintk(const char *fmt, va_list ap); ++extern void printk_kill(void); + #else + static inline __printf(1, 2) __cold + void early_printk(const char *s, ...) { } ++static inline void printk_kill(void) { } + #endif + + #ifdef CONFIG_PRINTK +@@ -145,7 +147,6 @@ extern int __printk_ratelimit(const char + #define printk_ratelimit() __printk_ratelimit(__func__) + extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec); +- + extern int printk_delay_msec; + extern int dmesg_restrict; + extern int kptr_restrict; +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -1483,6 +1483,55 @@ static size_t cont_print_text(char *text + return textlen; + } + ++#ifdef CONFIG_EARLY_PRINTK ++struct console *early_console; ++ ++void early_vprintk(const char *fmt, va_list ap) ++{ ++ if (early_console) { ++ char buf[512]; ++ int n = vscnprintf(buf, sizeof(buf), fmt, ap); ++ ++ early_console->write(early_console, buf, n); ++ } ++} ++ ++asmlinkage void early_printk(const char *fmt, ...) ++{ ++ va_list ap; ++ ++ va_start(ap, fmt); ++ early_vprintk(fmt, ap); ++ va_end(ap); ++} ++ ++/* ++ * This is independent of any log levels - a global ++ * kill switch that turns off all of printk. ++ * ++ * Used by the NMI watchdog if early-printk is enabled. ++ */ ++static bool __read_mostly printk_killswitch; ++ ++void printk_kill(void) ++{ ++ printk_killswitch = true; ++} ++ ++static int forced_early_printk(const char *fmt, va_list ap) ++{ ++ if (!printk_killswitch) ++ return 0; ++ early_vprintk(fmt, ap); ++ return 1; ++} ++#else ++static inline int forced_early_printk(const char *fmt, va_list ap) ++{ ++ return 0; ++} ++#endif ++ + asmlinkage int vprintk_emit(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, va_list args) +@@ -1496,6 +1545,13 @@ asmlinkage int vprintk_emit(int facility + int this_cpu; + int printed_len = 0; + ++ /* ++ * Fall back to early_printk if a debugging subsystem has ++ * killed printk output ++ */ ++ if (unlikely(forced_early_printk(fmt, args))) ++ return 1; ++ + boot_delay_msec(level); + printk_delay(); + +@@ -1721,29 +1777,6 @@ static size_t cont_print_text(char *text + + #endif /* CONFIG_PRINTK */ + +-#ifdef CONFIG_EARLY_PRINTK +-struct console *early_console; +- +-void early_vprintk(const char *fmt, va_list ap) +-{ +- if (early_console) { +- char buf[512]; +- int n = vscnprintf(buf, sizeof(buf), fmt, ap); +- +- early_console->write(early_console, buf, n); +- } +-} +- +-asmlinkage void early_printk(const char *fmt, ...) +-{ +- va_list ap; +- +- va_start(ap, fmt); +- early_vprintk(fmt, ap); +- va_end(ap); +-} +-#endif +- + static int __add_preferred_console(char *name, int idx, char *options, + char *brl_options) + { +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -205,6 +205,8 @@ static int is_softlockup(unsigned long t + + #ifdef CONFIG_HARDLOCKUP_DETECTOR + ++static DEFINE_RAW_SPINLOCK(watchdog_output_lock); ++ + static struct perf_event_attr wd_hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, +@@ -239,10 +241,19 @@ static void watchdog_overflow_callback(s + if (__this_cpu_read(hard_watchdog_warn) == true) + return; + +- if (hardlockup_panic) ++ /* ++ * If early-printk is enabled then make sure we do not ++ * lock up in printk() and kill console logging: ++ */ ++ printk_kill(); ++ ++ if (hardlockup_panic) { + panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); +- else ++ } else { ++ raw_spin_lock(&watchdog_output_lock); + WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); ++ raw_spin_unlock(&watchdog_output_lock); ++ } + + __this_cpu_write(hard_watchdog_warn, true); + return; diff --git a/debian/patches/features/all/rt/printk-rt-aware.patch b/debian/patches/features/all/rt/printk-rt-aware.patch new file mode 100644 index 000000000..5e09bbe71 --- /dev/null +++ b/debian/patches/features/all/rt/printk-rt-aware.patch @@ -0,0 +1,102 @@ +Subject: printk-rt-aware.patch +From: Thomas Gleixner +Date: Wed, 19 Sep 2012 14:50:37 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/printk/printk.c | 33 +++++++++++++++++++++++++++++---- + 1 file changed, 29 insertions(+), 4 deletions(-) + +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -1267,6 +1267,7 @@ static void call_console_drivers(int lev + if (!console_drivers) + return; + ++ migrate_disable(); + for_each_console(con) { + if (exclusive_console && con != exclusive_console) + continue; +@@ -1279,6 +1280,7 @@ static void call_console_drivers(int lev + continue; + con->write(con, text, len); + } ++ migrate_enable(); + } + + /* +@@ -1338,12 +1340,18 @@ static inline int can_use_console(unsign + * interrupts disabled. It should return with 'lockbuf_lock' + * released but interrupts still disabled. + */ +-static int console_trylock_for_printk(unsigned int cpu) ++static int console_trylock_for_printk(unsigned int cpu, unsigned long flags) + __releases(&logbuf_lock) + { + int retval = 0, wake = 0; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) && ++ (preempt_count() <= 1); ++#else ++ int lock = 1; ++#endif + +- if (console_trylock()) { ++ if (lock && console_trylock()) { + retval = 1; + + /* +@@ -1681,8 +1689,15 @@ asmlinkage int vprintk_emit(int facility + * The console_trylock_for_printk() function will release 'logbuf_lock' + * regardless of whether it actually gets the console semaphore or not. + */ +- if (console_trylock_for_printk(this_cpu)) ++ if (console_trylock_for_printk(this_cpu, flags)) { ++#ifndef CONFIG_PREEMPT_RT_FULL ++ console_unlock(); ++#else ++ raw_local_irq_restore(flags); + console_unlock(); ++ raw_local_irq_save(flags); ++#endif ++ } + + lockdep_on(); + out_restore_irqs: +@@ -2024,11 +2039,16 @@ static void console_cont_flush(char *tex + goto out; + + len = cont_print_text(text, size); ++#ifndef CONFIG_PREEMPT_RT_FULL + raw_spin_unlock(&logbuf_lock); + stop_critical_timings(); + call_console_drivers(cont.level, text, len); + start_critical_timings(); + local_irq_restore(flags); ++#else ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(cont.level, text, len); ++#endif + return; + out: + raw_spin_unlock_irqrestore(&logbuf_lock, flags); +@@ -2111,12 +2131,17 @@ void console_unlock(void) + console_idx = log_next(console_idx); + console_seq++; + console_prev = msg->flags; +- raw_spin_unlock(&logbuf_lock); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ raw_spin_unlock(&logbuf_lock); + stop_critical_timings(); /* don't trace print latency */ + call_console_drivers(level, text, len); + start_critical_timings(); + local_irq_restore(flags); ++#else ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(level, text, len); ++#endif + } + console_locked = 0; + mutex_release(&console_lock_dep_map, 1, _RET_IP_); diff --git a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch new file mode 100644 index 000000000..fd7a1f5ca --- /dev/null +++ b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -0,0 +1,162 @@ +From 1bd263cbd3951f8f36ee6dcfe9160dafcfdd91fe Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 29 Aug 2013 18:21:04 +0200 +Subject: [PATCH] ptrace: fix ptrace vs tasklist_lock race +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +As explained by Alexander Fyodorov : + +|read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel, +|and it can remove __TASK_TRACED from task->state (by moving it to +|task->saved_state). If parent does wait() on child followed by a sys_ptrace +|call, the following race can happen: +| +|- child sets __TASK_TRACED in ptrace_stop() +|- parent does wait() which eventually calls wait_task_stopped() and returns +| child's pid +|- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves +| __TASK_TRACED flag to saved_state +|- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive() + +The patch is based on his initial patch where an additional check is +added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is +taken in case the caller is interrupted between looking into ->state and +->saved_state. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/sched.h | 48 +++++++++++++++++++++++++++++++++++++++++++++--- + kernel/ptrace.c | 7 ++++++- + kernel/sched/core.c | 19 ++++++++++++++++--- + 3 files changed, 67 insertions(+), 7 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -232,10 +232,7 @@ extern char ___assert_task_state[1 - 2*! + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + +-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +-#define task_is_stopped_or_traced(task) \ +- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + #define task_contributes_to_load(task) \ + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0) +@@ -2608,6 +2605,51 @@ static inline int signal_pending_state(l + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); + } + ++static inline bool __task_is_stopped_or_traced(struct task_struct *task) ++{ ++ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#endif ++ return false; ++} ++ ++static inline bool task_is_stopped_or_traced(struct task_struct *task) ++{ ++ bool traced_stopped; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ traced_stopped = __task_is_stopped_or_traced(task); ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++#else ++ traced_stopped = __task_is_stopped_or_traced(task); ++#endif ++ return traced_stopped; ++} ++ ++static inline bool task_is_traced(struct task_struct *task) ++{ ++ bool traced = false; ++ ++ if (task->state & __TASK_TRACED) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* in case the task is sleeping on tasklist_lock */ ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->state & __TASK_TRACED) ++ traced = true; ++ else if (task->saved_state & __TASK_TRACED) ++ traced = true; ++ raw_spin_unlock_irq(&task->pi_lock); ++#endif ++ return traced; ++} ++ + /* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -135,7 +135,12 @@ static bool ptrace_freeze_traced(struct + + spin_lock_irq(&task->sighand->siglock); + if (task_is_traced(task) && !__fatal_signal_pending(task)) { +- task->state = __TASK_TRACED; ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->state & __TASK_TRACED) ++ task->state = __TASK_TRACED; ++ else ++ task->saved_state = __TASK_TRACED; ++ raw_spin_unlock_irq(&task->pi_lock); + ret = true; + } + spin_unlock_irq(&task->sighand->siglock); +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1122,6 +1122,18 @@ struct migration_arg { + + static int migration_cpu_stop(void *data); + ++static bool check_task_state(struct task_struct *p, long match_state) ++{ ++ bool match = false; ++ ++ raw_spin_lock_irq(&p->pi_lock); ++ if (p->state == match_state || p->saved_state == match_state) ++ match = true; ++ raw_spin_unlock_irq(&p->pi_lock); ++ ++ return match; ++} ++ + /* + * wait_task_inactive - wait for a thread to unschedule. + * +@@ -1166,7 +1178,7 @@ unsigned long wait_task_inactive(struct + * is actually now running somewhere else! + */ + while (task_running(rq, p)) { +- if (match_state && unlikely(p->state != match_state)) ++ if (match_state && !check_task_state(p, match_state)) + return 0; + cpu_relax(); + } +@@ -1181,7 +1193,8 @@ unsigned long wait_task_inactive(struct + running = task_running(rq, p); + on_rq = p->on_rq; + ncsw = 0; +- if (!match_state || p->state == match_state) ++ if (!match_state || p->state == match_state ++ || p->saved_state == match_state) + ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ + task_rq_unlock(rq, p, &flags); + +@@ -1685,7 +1698,7 @@ static void try_to_wake_up_local(struct + */ + int wake_up_process(struct task_struct *p) + { +- WARN_ON(task_is_stopped_or_traced(p)); ++ WARN_ON(__task_is_stopped_or_traced(p)); + return try_to_wake_up(p, TASK_NORMAL, 0); + } + EXPORT_SYMBOL(wake_up_process); diff --git a/debian/patches/features/all/rt/radix-tree-rt-aware.patch b/debian/patches/features/all/rt/radix-tree-rt-aware.patch new file mode 100644 index 000000000..9d619c8dc --- /dev/null +++ b/debian/patches/features/all/rt/radix-tree-rt-aware.patch @@ -0,0 +1,69 @@ +Subject: radix-tree-rt-aware.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:33:18 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/radix-tree.h | 7 ++++++- + lib/radix-tree.c | 5 ++++- + 2 files changed, 10 insertions(+), 2 deletions(-) + +--- a/include/linux/radix-tree.h ++++ b/include/linux/radix-tree.h +@@ -230,8 +230,13 @@ unsigned long radix_tree_next_hole(struc + unsigned long index, unsigned long max_scan); + unsigned long radix_tree_prev_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan); ++#ifndef CONFIG_PREEMPT_RT_FULL + int radix_tree_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload(gfp_t gfp_mask); ++#else ++static inline int radix_tree_preload(gfp_t gm) { return 0; } ++static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } ++#endif + void radix_tree_init(void); + void *radix_tree_tag_set(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +@@ -256,7 +261,7 @@ unsigned long radix_tree_locate_item(str + + static inline void radix_tree_preload_end(void) + { +- preempt_enable(); ++ preempt_enable_nort(); + } + + /** +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -221,12 +221,13 @@ radix_tree_node_alloc(struct radix_tree_ + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ +- rtp = &__get_cpu_var(radix_tree_preloads); ++ rtp = &get_cpu_var(radix_tree_preloads); + if (rtp->nr) { + ret = rtp->nodes[rtp->nr - 1]; + rtp->nodes[rtp->nr - 1] = NULL; + rtp->nr--; + } ++ put_cpu_var(radix_tree_preloads); + } + if (ret == NULL) + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); +@@ -261,6 +262,7 @@ radix_tree_node_free(struct radix_tree_n + call_rcu(&node->rcu_head, radix_tree_node_rcu_free); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Load up this CPU's radix_tree_node buffer with sufficient objects to + * ensure that the addition of a single element in the tree cannot fail. On +@@ -326,6 +328,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m + return 0; + } + EXPORT_SYMBOL(radix_tree_maybe_preload); ++#endif + + /* + * Return the maximum key which can be store into a diff --git a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch new file mode 100644 index 000000000..f83ba37cd --- /dev/null +++ b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch @@ -0,0 +1,119 @@ +Subject: random: Make it work on rt +From: Thomas Gleixner +Date: Tue, 21 Aug 2012 20:38:50 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Delegate the random insertion to the forced threaded interrupt +handler. Store the return IP of the hard interrupt handler in the irq +descriptor and feed it into the random generator as a source of +entropy. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + drivers/char/random.c | 11 +++++++---- + include/linux/irqdesc.h | 1 + + include/linux/random.h | 2 +- + kernel/irq/handle.c | 8 +++++++- + kernel/irq/manage.c | 6 ++++++ + 5 files changed, 22 insertions(+), 6 deletions(-) + +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -832,21 +832,20 @@ EXPORT_SYMBOL_GPL(add_input_randomness); + + static DEFINE_PER_CPU(struct fast_pool, irq_randomness); + +-void add_interrupt_randomness(int irq, int irq_flags) ++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) + { + struct entropy_store *r; + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); +- struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + cycles_t cycles = random_get_entropy(); + __u32 input[4], c_high, j_high; +- __u64 ip; + + c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; + j_high = (sizeof(now) > 4) ? now >> 32 : 0; + input[0] = cycles ^ j_high ^ irq; + input[1] = now ^ c_high; +- ip = regs ? instruction_pointer(regs) : _RET_IP_; ++ if (!ip) ++ ip = _RET_IP_; + input[2] = ip; + input[3] = ip >> 32; + +@@ -858,7 +857,11 @@ void add_interrupt_randomness(int irq, i + fast_pool->last = now; + + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; ++#ifndef CONFIG_PREEMPT_RT_FULL + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); ++#else ++ mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); ++#endif + /* + * If we don't have a valid cycle counter, and we see + * back-to-back timer interrupts, then skip giving credit for +--- a/include/linux/irqdesc.h ++++ b/include/linux/irqdesc.h +@@ -52,6 +52,7 @@ struct irq_desc { + unsigned int irq_count; /* For detecting broken IRQs */ + unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; ++ u64 random_ip; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + #ifdef CONFIG_SMP +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -11,7 +11,7 @@ + extern void add_device_randomness(const void *, unsigned int); + extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value); +-extern void add_interrupt_randomness(int irq, int irq_flags); ++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); + + extern void get_random_bytes(void *buf, int nbytes); + extern void get_random_bytes_arch(void *buf, int nbytes); +--- a/kernel/irq/handle.c ++++ b/kernel/irq/handle.c +@@ -132,6 +132,8 @@ static void irq_wake_thread(struct irq_d + irqreturn_t + handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) + { ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; + irqreturn_t retval = IRQ_NONE; + unsigned int flags = 0, irq = desc->irq_data.irq; + +@@ -172,7 +174,11 @@ handle_irq_event_percpu(struct irq_desc + action = action->next; + } while (action); + +- add_interrupt_randomness(irq, flags); ++#ifndef CONFIG_PREEMPT_RT_FULL ++ add_interrupt_randomness(irq, flags, ip); ++#else ++ desc->random_ip = ip; ++#endif + + if (!noirqdebug) + note_interrupt(irq, desc, retval); +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -947,6 +947,12 @@ static int irq_thread(void *data) + if (!noirqdebug) + note_interrupt(action->irq, desc, action_ret); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_disable(); ++ add_interrupt_randomness(action->irq, 0, ++ desc->random_ip ^ (unsigned long) action); ++ migrate_enable(); ++#endif + wake_threads_waitq(desc); + } + diff --git a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch new file mode 100644 index 000000000..cd7d3a2be --- /dev/null +++ b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch @@ -0,0 +1,409 @@ +From 410f7ce05ee95307fb938e8c8061c50bb451b933 Mon Sep 17 00:00:00 2001 +From: "Paul E. McKenney" +Date: Mon, 4 Nov 2013 13:21:10 -0800 +Subject: [PATCH] rcu: Eliminate softirq processing from rcutree +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Running RCU out of softirq is a problem for some workloads that would +like to manage RCU core processing independently of other softirq work, +for example, setting kthread priority. This commit therefore moves the +RCU core work from softirq to a per-CPU/per-flavor SCHED_OTHER kthread +named rcuc. The SCHED_OTHER approach avoids the scalability problems +that appeared with the earlier attempt to move RCU core processing to +from softirq to kthreads. That said, kernels built with RCU_BOOST=y +will run the rcuc kthreads at the RCU-boosting priority. + +Reported-by: Thomas Gleixner +Tested-by: Mike Galbraith +Signed-off-by: Paul E. McKenney +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/rcu/tree.c | 113 ++++++++++++++++++++++++++++++++++----- + kernel/rcu/tree.h | 3 - + kernel/rcu/tree_plugin.h | 134 ++++------------------------------------------- + 3 files changed, 113 insertions(+), 137 deletions(-) + +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -56,6 +56,11 @@ + #include + #include + #include ++#include ++#include ++#include ++#include ++#include "../time/tick-internal.h" + + #include "tree.h" + #include +@@ -145,8 +150,6 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); + */ + static int rcu_scheduler_fully_active __read_mostly; + +-#ifdef CONFIG_RCU_BOOST +- + /* + * Control variables for per-CPU and per-rcu_node kthreads. These + * handle all flavors of RCU. +@@ -156,8 +159,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kth + DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); + DEFINE_PER_CPU(char, rcu_cpu_has_work); + +-#endif /* #ifdef CONFIG_RCU_BOOST */ +- + static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); + static void invoke_rcu_core(void); + static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); +@@ -2378,16 +2379,14 @@ static void + /* + * Do RCU core processing for the current CPU. + */ +-static void rcu_process_callbacks(struct softirq_action *unused) ++static void rcu_process_callbacks(void) + { + struct rcu_state *rsp; + + if (cpu_is_offline(smp_processor_id())) + return; +- trace_rcu_utilization(TPS("Start RCU core")); + for_each_rcu_flavor(rsp) + __rcu_process_callbacks(rsp); +- trace_rcu_utilization(TPS("End RCU core")); + } + + /* +@@ -2401,18 +2400,105 @@ static void invoke_rcu_callbacks(struct + { + if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) + return; +- if (likely(!rsp->boost)) { +- rcu_do_batch(rsp, rdp); ++ rcu_do_batch(rsp, rdp); ++} ++ ++static void rcu_wake_cond(struct task_struct *t, int status) ++{ ++ /* ++ * If the thread is yielding, only wake it when this ++ * is invoked from idle ++ */ ++ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) ++ wake_up_process(t); ++} ++ ++/* ++ * Wake up this CPU's rcuc kthread to do RCU core processing. ++ */ ++static void invoke_rcu_core(void) ++{ ++ unsigned long flags; ++ struct task_struct *t; ++ ++ if (!cpu_online(smp_processor_id())) + return; ++ local_irq_save(flags); ++ __this_cpu_write(rcu_cpu_has_work, 1); ++ t = __this_cpu_read(rcu_cpu_kthread_task); ++ if (t != NULL && current != t) ++ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); ++ local_irq_restore(flags); ++} ++ ++static void rcu_cpu_kthread_park(unsigned int cpu) ++{ ++ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; ++} ++ ++static int rcu_cpu_kthread_should_run(unsigned int cpu) ++{ ++ return __this_cpu_read(rcu_cpu_has_work); ++} ++ ++/* ++ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the ++ * RCU softirq used in flavors and configurations of RCU that do not ++ * support RCU priority boosting. ++ */ ++static void rcu_cpu_kthread(unsigned int cpu) ++{ ++ unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); ++ char work, *workp = &__get_cpu_var(rcu_cpu_has_work); ++ int spincnt; ++ ++ for (spincnt = 0; spincnt < 10; spincnt++) { ++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); ++ local_bh_disable(); ++ *statusp = RCU_KTHREAD_RUNNING; ++ this_cpu_inc(rcu_cpu_kthread_loops); ++ local_irq_disable(); ++ work = *workp; ++ *workp = 0; ++ local_irq_enable(); ++ if (work) ++ rcu_process_callbacks(); ++ local_bh_enable(); ++ if (*workp == 0) { ++ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); ++ *statusp = RCU_KTHREAD_WAITING; ++ return; ++ } + } +- invoke_rcu_callbacks_kthread(); ++ *statusp = RCU_KTHREAD_YIELDING; ++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); ++ schedule_timeout_interruptible(2); ++ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); ++ *statusp = RCU_KTHREAD_WAITING; + } + +-static void invoke_rcu_core(void) ++static struct smp_hotplug_thread rcu_cpu_thread_spec = { ++ .store = &rcu_cpu_kthread_task, ++ .thread_should_run = rcu_cpu_kthread_should_run, ++ .thread_fn = rcu_cpu_kthread, ++ .thread_comm = "rcuc/%u", ++ .setup = rcu_cpu_kthread_setup, ++ .park = rcu_cpu_kthread_park, ++}; ++ ++/* ++ * Spawn per-CPU RCU core processing kthreads. ++ */ ++static int __init rcu_spawn_core_kthreads(void) + { +- if (cpu_online(smp_processor_id())) +- raise_softirq(RCU_SOFTIRQ); ++ int cpu; ++ ++ for_each_possible_cpu(cpu) ++ per_cpu(rcu_cpu_has_work, cpu) = 0; ++ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); ++ return 0; + } ++early_initcall(rcu_spawn_core_kthreads); + + /* + * Handle any core-RCU processing required by a call_rcu() invocation. +@@ -3489,7 +3575,6 @@ void __init rcu_init(void) + rcu_init_one(&rcu_bh_state, &rcu_bh_data); + rcu_init_one(&rcu_sched_state, &rcu_sched_data); + __rcu_init_preempt(); +- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); + + /* + * We don't need protection against CPU-hotplug here because +--- a/kernel/rcu/tree.h ++++ b/kernel/rcu/tree.h +@@ -531,10 +531,9 @@ static void rcu_report_exp_rnp(struct rc + static void __init __rcu_init_preempt(void); + static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); + static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); +-static void invoke_rcu_callbacks_kthread(void); + static bool rcu_is_callbacks_kthread(void); ++static void rcu_cpu_kthread_setup(unsigned int cpu); + #ifdef CONFIG_RCU_BOOST +-static void rcu_preempt_do_callbacks(void); + static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, + struct rcu_node *rnp); + #endif /* #ifdef CONFIG_RCU_BOOST */ +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -24,12 +24,6 @@ + * Paul E. McKenney + */ + +-#include +-#include +-#include +-#include +-#include "../time/tick-internal.h" +- + #define RCU_KTHREAD_PRIO 1 + + #ifdef CONFIG_RCU_BOOST +@@ -670,15 +664,6 @@ static void rcu_preempt_check_callbacks( + t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; + } + +-#ifdef CONFIG_RCU_BOOST +- +-static void rcu_preempt_do_callbacks(void) +-{ +- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); +-} +- +-#endif /* #ifdef CONFIG_RCU_BOOST */ +- + /* + * Queue a preemptible-RCU callback for invocation after a grace period. + */ +@@ -1146,6 +1131,19 @@ void exit_rcu(void) + + #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ + ++/* ++ * If boosting, set rcuc kthreads to realtime priority. ++ */ ++static void rcu_cpu_kthread_setup(unsigned int cpu) ++{ ++#ifdef CONFIG_RCU_BOOST ++ struct sched_param sp; ++ ++ sp.sched_priority = RCU_KTHREAD_PRIO; ++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); ++#endif /* #ifdef CONFIG_RCU_BOOST */ ++} ++ + #ifdef CONFIG_RCU_BOOST + + #include "../locking/rtmutex_common.h" +@@ -1177,16 +1175,6 @@ static void rcu_initiate_boost_trace(str + + #endif /* #else #ifdef CONFIG_RCU_TRACE */ + +-static void rcu_wake_cond(struct task_struct *t, int status) +-{ +- /* +- * If the thread is yielding, only wake it when this +- * is invoked from idle +- */ +- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) +- wake_up_process(t); +-} +- + /* + * Carry out RCU priority boosting on the task indicated by ->exp_tasks + * or ->boost_tasks, advancing the pointer to the next task in the +@@ -1331,23 +1319,6 @@ static void rcu_initiate_boost(struct rc + } + + /* +- * Wake up the per-CPU kthread to invoke RCU callbacks. +- */ +-static void invoke_rcu_callbacks_kthread(void) +-{ +- unsigned long flags; +- +- local_irq_save(flags); +- __this_cpu_write(rcu_cpu_has_work, 1); +- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && +- current != __this_cpu_read(rcu_cpu_kthread_task)) { +- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), +- __this_cpu_read(rcu_cpu_kthread_status)); +- } +- local_irq_restore(flags); +-} +- +-/* + * Is the current CPU running the RCU-callbacks kthread? + * Caller must have preemption disabled. + */ +@@ -1402,67 +1373,6 @@ static int rcu_spawn_one_boost_kthread(s + return 0; + } + +-static void rcu_kthread_do_work(void) +-{ +- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); +- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); +- rcu_preempt_do_callbacks(); +-} +- +-static void rcu_cpu_kthread_setup(unsigned int cpu) +-{ +- struct sched_param sp; +- +- sp.sched_priority = RCU_KTHREAD_PRIO; +- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); +-} +- +-static void rcu_cpu_kthread_park(unsigned int cpu) +-{ +- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; +-} +- +-static int rcu_cpu_kthread_should_run(unsigned int cpu) +-{ +- return __this_cpu_read(rcu_cpu_has_work); +-} +- +-/* +- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the +- * RCU softirq used in flavors and configurations of RCU that do not +- * support RCU priority boosting. +- */ +-static void rcu_cpu_kthread(unsigned int cpu) +-{ +- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); +- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); +- int spincnt; +- +- for (spincnt = 0; spincnt < 10; spincnt++) { +- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); +- local_bh_disable(); +- *statusp = RCU_KTHREAD_RUNNING; +- this_cpu_inc(rcu_cpu_kthread_loops); +- local_irq_disable(); +- work = *workp; +- *workp = 0; +- local_irq_enable(); +- if (work) +- rcu_kthread_do_work(); +- local_bh_enable(); +- if (*workp == 0) { +- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); +- *statusp = RCU_KTHREAD_WAITING; +- return; +- } +- } +- *statusp = RCU_KTHREAD_YIELDING; +- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); +- schedule_timeout_interruptible(2); +- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); +- *statusp = RCU_KTHREAD_WAITING; +-} +- + /* + * Set the per-rcu_node kthread's affinity to cover all CPUs that are + * served by the rcu_node in question. The CPU hotplug lock is still +@@ -1496,27 +1406,14 @@ static void rcu_boost_kthread_setaffinit + free_cpumask_var(cm); + } + +-static struct smp_hotplug_thread rcu_cpu_thread_spec = { +- .store = &rcu_cpu_kthread_task, +- .thread_should_run = rcu_cpu_kthread_should_run, +- .thread_fn = rcu_cpu_kthread, +- .thread_comm = "rcuc/%u", +- .setup = rcu_cpu_kthread_setup, +- .park = rcu_cpu_kthread_park, +-}; +- + /* + * Spawn all kthreads -- called as soon as the scheduler is running. + */ + static int __init rcu_spawn_kthreads(void) + { + struct rcu_node *rnp; +- int cpu; + + rcu_scheduler_fully_active = 1; +- for_each_possible_cpu(cpu) +- per_cpu(rcu_cpu_has_work, cpu) = 0; +- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); + rnp = rcu_get_root(rcu_state); + (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); + if (NUM_RCU_NODES > 1) { +@@ -1544,11 +1441,6 @@ static void rcu_initiate_boost(struct rc + raw_spin_unlock_irqrestore(&rnp->lock, flags); + } + +-static void invoke_rcu_callbacks_kthread(void) +-{ +- WARN_ON_ONCE(1); +-} +- + static bool rcu_is_callbacks_kthread(void) + { + return false; diff --git a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch new file mode 100644 index 000000000..a5b82edb6 --- /dev/null +++ b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch @@ -0,0 +1,25 @@ +Subject: rcu: Disable RCU_FAST_NO_HZ on RT +From: Thomas Gleixner +Date: Sun, 28 Oct 2012 13:26:09 +0000 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +This uses a timer_list timer from the irq disabled guts of the idle +code. Disable it for now to prevent wreckage. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + init/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -602,7 +602,7 @@ config RCU_FANOUT_EXACT + + config RCU_FAST_NO_HZ + bool "Accelerate last non-dyntick-idle CPU's grace periods" +- depends on NO_HZ_COMMON && SMP ++ depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL + default n + help + This option permits CPUs to enter dynticks-idle state even if diff --git a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch new file mode 100644 index 000000000..e330649d2 --- /dev/null +++ b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch @@ -0,0 +1,28 @@ +From d8038d5a87f2a68ab1e18bdf63eef2b6b54f7f7d Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 21 Mar 2014 20:19:05 +0100 +Subject: [PATCH] rcu: make RCU_BOOST default on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Since it is no longer invoked from the softirq people run into OOM more +often if the priority of the RCU thread is too low. Making boosting +default on RT should help in those case and it can be switched off if +someone knows better. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + init/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -629,7 +629,7 @@ config TREE_RCU_TRACE + config RCU_BOOST + bool "Enable RCU priority boosting" + depends on RT_MUTEXES && PREEMPT_RCU +- default n ++ default y if PREEMPT_RT_FULL + help + This option boosts the priority of preempted RCU readers that + block the current preemptible RCU grace period for too long. diff --git a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch new file mode 100644 index 000000000..6228a1b1c --- /dev/null +++ b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch @@ -0,0 +1,261 @@ +Subject: rcu: Merge RCU-bh into RCU-preempt +Date: Wed, 5 Oct 2011 11:59:38 -0700 +From: Thomas Gleixner +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The Linux kernel has long RCU-bh read-side critical sections that +intolerably increase scheduling latency under mainline's RCU-bh rules, +which include RCU-bh read-side critical sections being non-preemptible. +This patch therefore arranges for RCU-bh to be implemented in terms of +RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. + +This has the downside of defeating the purpose of RCU-bh, namely, +handling the case where the system is subjected to a network-based +denial-of-service attack that keeps at least one CPU doing full-time +softirq processing. This issue will be fixed by a later commit. + +The current commit will need some work to make it appropriate for +mainline use, for example, it needs to be extended to cover Tiny RCU. + +[ paulmck: Added a useful changelog ] + +Signed-off-by: Thomas Gleixner +Signed-off-by: Paul E. McKenney +Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com +Signed-off-by: Thomas Gleixner + +--- + include/linux/rcupdate.h | 25 +++++++++++++++++++++++++ + include/linux/rcutree.h | 18 ++++++++++++++++-- + kernel/rcu/tree.c | 10 ++++++++++ + kernel/rcu/update.c | 2 ++ + 4 files changed, 53 insertions(+), 2 deletions(-) + +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -128,6 +128,9 @@ void call_rcu(struct rcu_head *head, + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define call_rcu_bh call_rcu ++#else + /** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. +@@ -151,6 +154,7 @@ void call_rcu(struct rcu_head *head, + */ + void call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head)); ++#endif + + /** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. +@@ -225,7 +229,13 @@ static inline int rcu_preempt_depth(void + /* Internal to kernel */ + void rcu_init(void); + void rcu_sched_qs(int cpu); ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline void rcu_bh_qs(int cpu) { } ++#else + void rcu_bh_qs(int cpu); ++#endif ++ + void rcu_check_callbacks(int cpu, int user); + struct notifier_block; + void rcu_idle_enter(void); +@@ -370,7 +380,14 @@ static inline int rcu_read_lock_held(voi + * rcu_read_lock_bh_held() is defined out of line to avoid #include-file + * hell. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int rcu_read_lock_bh_held(void) ++{ ++ return rcu_read_lock_held(); ++} ++#else + int rcu_read_lock_bh_held(void); ++#endif + + /** + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? +@@ -863,10 +880,14 @@ static inline void rcu_read_unlock(void) + static inline void rcu_read_lock_bh(void) + { + local_bh_disable(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_lock(); ++#else + __acquire(RCU_BH); + rcu_lock_acquire(&rcu_bh_lock_map); + rcu_lockdep_assert(rcu_is_watching(), + "rcu_read_lock_bh() used illegally while idle"); ++#endif + } + + /* +@@ -876,10 +897,14 @@ static inline void rcu_read_lock_bh(void + */ + static inline void rcu_read_unlock_bh(void) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_unlock(); ++#else + rcu_lockdep_assert(rcu_is_watching(), + "rcu_read_unlock_bh() used illegally while idle"); + rcu_lock_release(&rcu_bh_lock_map); + __release(RCU_BH); ++#endif + local_bh_enable(); + } + +--- a/include/linux/rcutree.h ++++ b/include/linux/rcutree.h +@@ -44,7 +44,11 @@ static inline void rcu_virt_note_context + rcu_note_context_switch(cpu); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define synchronize_rcu_bh synchronize_rcu ++#else + void synchronize_rcu_bh(void); ++#endif + void synchronize_sched_expedited(void); + void synchronize_rcu_expedited(void); + +@@ -72,17 +76,19 @@ static inline void synchronize_rcu_bh_ex + } + + void rcu_barrier(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define rcu_barrier_bh rcu_barrier ++#else + void rcu_barrier_bh(void); ++#endif + void rcu_barrier_sched(void); + + extern unsigned long rcutorture_testseq; + extern unsigned long rcutorture_vernum; + long rcu_batches_completed(void); +-long rcu_batches_completed_bh(void); + long rcu_batches_completed_sched(void); + + void rcu_force_quiescent_state(void); +-void rcu_bh_force_quiescent_state(void); + void rcu_sched_force_quiescent_state(void); + + void exit_rcu(void); +@@ -92,4 +98,12 @@ extern int rcu_scheduler_active __read_m + + bool rcu_is_watching(void); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++void rcu_bh_force_quiescent_state(void); ++long rcu_batches_completed_bh(void); ++#else ++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state ++# define rcu_batches_completed_bh rcu_batches_completed ++#endif ++ + #endif /* __LINUX_RCUTREE_H */ +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -199,6 +199,7 @@ void rcu_sched_qs(int cpu) + rdp->passed_quiesce = 1; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void rcu_bh_qs(int cpu) + { + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); +@@ -207,6 +208,7 @@ void rcu_bh_qs(int cpu) + trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs")); + rdp->passed_quiesce = 1; + } ++#endif + + /* + * Note a context switch. This is a quiescent state for RCU-sched, +@@ -263,6 +265,7 @@ long rcu_batches_completed_sched(void) + } + EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Return the number of RCU BH batches processed thus far for debug & stats. + */ +@@ -280,6 +283,7 @@ void rcu_bh_force_quiescent_state(void) + force_quiescent_state(&rcu_bh_state); + } + EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); ++#endif + + /* + * Record the number of times rcutorture tests have been initiated and +@@ -2527,6 +2531,7 @@ void call_rcu_sched(struct rcu_head *hea + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Queue an RCU callback for invocation after a quicker grace period. + */ +@@ -2535,6 +2540,7 @@ void call_rcu_bh(struct rcu_head *head, + __call_rcu(head, func, &rcu_bh_state, -1, 0); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif + + /* + * Because a context switch is a grace period for RCU-sched and RCU-bh, +@@ -2612,6 +2618,7 @@ void synchronize_sched(void) + } + EXPORT_SYMBOL_GPL(synchronize_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. + * +@@ -2638,6 +2645,7 @@ void synchronize_rcu_bh(void) + wait_rcu_gp(call_rcu_bh); + } + EXPORT_SYMBOL_GPL(synchronize_rcu_bh); ++#endif + + static int synchronize_sched_expedited_cpu_stop(void *data) + { +@@ -3058,6 +3066,7 @@ static void _rcu_barrier(struct rcu_stat + mutex_unlock(&rsp->barrier_mutex); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. + */ +@@ -3066,6 +3075,7 @@ void rcu_barrier_bh(void) + _rcu_barrier(&rcu_bh_state); + } + EXPORT_SYMBOL_GPL(rcu_barrier_bh); ++#endif + + /** + * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. +--- a/kernel/rcu/update.c ++++ b/kernel/rcu/update.c +@@ -140,6 +140,7 @@ int notrace debug_lockdep_rcu_enabled(vo + } + EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? + * +@@ -166,6 +167,7 @@ int rcu_read_lock_bh_held(void) + return in_softirq() || irqs_disabled(); + } + EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); ++#endif + + #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + diff --git a/debian/patches/features/all/rt/rcu-more-swait-conversions.patch b/debian/patches/features/all/rt/rcu-more-swait-conversions.patch new file mode 100644 index 000000000..442c241df --- /dev/null +++ b/debian/patches/features/all/rt/rcu-more-swait-conversions.patch @@ -0,0 +1,117 @@ +From eddcd14571497d3d5d6ce7df0ee1bf2ecec72292 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Wed, 31 Jul 2013 19:00:35 +0200 +Subject: [PATCH] rcu-more-swait-conversions.patch +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner + +Merged Steven's + + static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { +- swait_wake(&rnp->nocb_gp_wq[rnp->completed & 0x1]); ++ wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); + } + +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/rcu/tree.h | 5 +++-- + kernel/rcu/tree_plugin.h | 16 ++++++++-------- + 2 files changed, 11 insertions(+), 10 deletions(-) + +--- a/kernel/rcu/tree.h ++++ b/kernel/rcu/tree.h +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + /* + * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and +@@ -200,7 +201,7 @@ struct rcu_node { + /* This can happen due to race conditions. */ + #endif /* #ifdef CONFIG_RCU_BOOST */ + #ifdef CONFIG_RCU_NOCB_CPU +- wait_queue_head_t nocb_gp_wq[2]; ++ struct swait_head nocb_gp_wq[2]; + /* Place for rcu_nocb_kthread() to wait GP. */ + #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ + int need_future_gp[2]; +@@ -334,7 +335,7 @@ struct rcu_data { + atomic_long_t nocb_q_count_lazy; /* (approximate). */ + int nocb_p_count; /* # CBs being invoked by kthread */ + int nocb_p_count_lazy; /* (approximate). */ +- wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ ++ struct swait_head nocb_wq; /* For nocb kthreads to sleep on. */ + struct task_struct *nocb_kthread; + bool nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ + #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -1977,7 +1977,7 @@ static int rcu_nocb_needs_gp(struct rcu_ + */ + static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) + { +- wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); ++ swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); + } + + /* +@@ -1995,8 +1995,8 @@ static void rcu_nocb_gp_set(struct rcu_n + + static void rcu_init_one_nocb(struct rcu_node *rnp) + { +- init_waitqueue_head(&rnp->nocb_gp_wq[0]); +- init_waitqueue_head(&rnp->nocb_gp_wq[1]); ++ init_swait_head(&rnp->nocb_gp_wq[0]); ++ init_swait_head(&rnp->nocb_gp_wq[1]); + } + + /* Is the specified CPU a no-CPUs CPU? */ +@@ -2041,7 +2041,7 @@ static void __call_rcu_nocb_enqueue(stru + len = atomic_long_read(&rdp->nocb_q_count); + if (old_rhpp == &rdp->nocb_head) { + if (!irqs_disabled_flags(flags)) { +- wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */ ++ swait_wake(&rdp->nocb_wq); /* ... if queue was empty ... */ + trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, + TPS("WakeEmpty")); + } else { +@@ -2145,7 +2145,7 @@ static void rcu_nocb_wait_gp(struct rcu_ + */ + trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); + for (;;) { +- wait_event_interruptible( ++ swait_event_interruptible( + rnp->nocb_gp_wq[c & 0x1], + (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); + if (likely(d)) +@@ -2176,7 +2176,7 @@ static int rcu_nocb_kthread(void *arg) + if (!rcu_nocb_poll) { + trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, + TPS("Sleep")); +- wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); ++ swait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); + /* Memory barrier provide by xchg() below. */ + } else if (firsttime) { + firsttime = 0; +@@ -2250,7 +2250,7 @@ static void do_nocb_deferred_wakeup(stru + if (!rcu_nocb_need_deferred_wakeup(rdp)) + return; + ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; +- wake_up(&rdp->nocb_wq); ++ swait_wake(&rdp->nocb_wq); + trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); + } + +@@ -2258,7 +2258,7 @@ static void do_nocb_deferred_wakeup(stru + static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) + { + rdp->nocb_tail = &rdp->nocb_head; +- init_waitqueue_head(&rdp->nocb_wq); ++ init_swait_head(&rdp->nocb_wq); + } + + /* Create a kthread for each RCU flavor for each no-CBs CPU. */ diff --git a/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch b/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch new file mode 100644 index 000000000..fda2f1ccf --- /dev/null +++ b/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch @@ -0,0 +1,28 @@ +Subject: rcu-more-fallout.patch +From: Thomas Gleixner +Date: Mon, 14 Nov 2011 10:57:54 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/rcu/tiny.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/kernel/rcu/tiny.c ++++ b/kernel/rcu/tiny.c +@@ -372,6 +372,7 @@ void call_rcu_sched(struct rcu_head *hea + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Post an RCU bottom-half callback to be invoked after any subsequent + * quiescent state. +@@ -381,6 +382,7 @@ void call_rcu_bh(struct rcu_head *head, + __call_rcu(head, func, &rcu_bh_ctrlblk); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif + + void rcu_init(void) + { diff --git a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch new file mode 100644 index 000000000..a1bf634bc --- /dev/null +++ b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch @@ -0,0 +1,50 @@ +From 9918ea31955628691dbfabf5dd9307ab40348958 Mon Sep 17 00:00:00 2001 +From: Tiejun Chen +Date: Wed, 18 Dec 2013 17:51:49 +0800 +Subject: [PATCH] rcutree/rcu_bh_qs: disable irq while calling rcu_preempt_qs() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Any callers to the function rcu_preempt_qs() must disable irqs in +order to protect the assignment to ->rcu_read_unlock_special. In +RT case, rcu_bh_qs() as the wrapper of rcu_preempt_qs() is called +in some scenarios where irq is enabled, like this path, + +do_single_softirq() + | + + local_irq_enable(); + + handle_softirq() + | | + | + rcu_bh_qs() + | | + | + rcu_preempt_qs() + | + + local_irq_disable() + +So here we'd better disable irq directly inside of rcu_bh_qs() to +fix this, otherwise the kernel may be freezable sometimes as +observed. And especially this way is also kind and safe for the +potential rcu_bh_qs() usage elsewhere in the future. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Tiejun Chen +Signed-off-by: Bin Jiang +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/rcu/tree.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -204,7 +204,12 @@ static void rcu_preempt_qs(int cpu); + + void rcu_bh_qs(int cpu) + { ++ unsigned long flags; ++ ++ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ ++ local_irq_save(flags); + rcu_preempt_qs(cpu); ++ local_irq_restore(flags); + } + #else + void rcu_bh_qs(int cpu) diff --git a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch new file mode 100644 index 000000000..98156ee24 --- /dev/null +++ b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch @@ -0,0 +1,35 @@ +From: Yong Zhang +Date: Thu, 28 Jul 2011 11:16:00 +0800 +Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +When retry happens, it's likely that the task has been migrated to +another cpu (except unplug failed), but it still derefernces the +original hotplug_pcp per cpu data. + +Update the pointer to hotplug_pcp in the retry path, so it points to +the current cpu. + +Signed-off-by: Yong Zhang +Cc: Peter Zijlstra +Link: http://lkml.kernel.org/r/20110728031600.GA338@windriver.com +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -81,9 +81,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp + */ + void pin_current_cpu(void) + { +- struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); ++ struct hotplug_pcp *hp; + + retry: ++ hp = &__get_cpu_var(hotplug_pcp); ++ + if (!hp->unplug || hp->refcount || preempt_count() > 1 || + hp->unplug == current) { + hp->refcount++; diff --git a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch new file mode 100644 index 000000000..aac4b2595 --- /dev/null +++ b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch @@ -0,0 +1,69 @@ +Subject: ARM: Initialize ptl->lock for vector page +From: Frank Rowand +Date: Sat, 1 Oct 2011 18:58:13 -0700 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if +PREEMPT_RT_FULL=y because vectors_user_mapping() creates a +VM_ALWAYSDUMP mapping of the vector page (address 0xffff0000), but no +ptl->lock has been allocated for the page. An attempt to coredump +that page will result in a kernel NULL pointer dereference when +follow_page() attempts to lock the page. + +The call tree to the NULL pointer dereference is: + + do_notify_resume() + get_signal_to_deliver() + do_coredump() + elf_core_dump() + get_dump_page() + __get_user_pages() + follow_page() + pte_offset_map_lock() <----- a #define + ... + rt_spin_lock() + +The underlying problem is exposed by mm-shrink-the-page-frame-to-rt-size.patch. + +Signed-off-by: Frank Rowand +Cc: Frank +Cc: Peter Zijlstra +Link: http://lkml.kernel.org/r/4E87C535.2030907@am.sony.com +Signed-off-by: Thomas Gleixner +--- + arch/arm/kernel/process.c | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + +--- a/arch/arm/kernel/process.c ++++ b/arch/arm/kernel/process.c +@@ -432,6 +432,30 @@ unsigned long arch_randomize_brk(struct + } + + #ifdef CONFIG_MMU ++/* ++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not ++ * initialized by pgtable_page_ctor() then a coredump of the vector page will ++ * fail. ++ */ ++static int __init vectors_user_mapping_init_page(void) ++{ ++ struct page *page; ++ unsigned long addr = 0xffff0000; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ page = pmd_page(*(pmd)); ++ ++ pgtable_page_ctor(page); ++ ++ return 0; ++} ++late_initcall(vectors_user_mapping_init_page); ++ + #ifdef CONFIG_KUSER_HELPERS + /* + * The vectors page is always readable from user space for the diff --git a/debian/patches/features/all/rt/read_lock-migrate_disable-pushdown-to-rt_read_lock.patch b/debian/patches/features/all/rt/read_lock-migrate_disable-pushdown-to-rt_read_lock.patch new file mode 100644 index 000000000..b8505dfac --- /dev/null +++ b/debian/patches/features/all/rt/read_lock-migrate_disable-pushdown-to-rt_read_lock.patch @@ -0,0 +1,174 @@ +From 64da626bb8995d097614a41b9d77235cacbb0740 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Thu, 2 Jan 2014 10:19:15 +0100 +Subject: [PATCH 5/7] read_lock migrate_disable pushdown to rt_read_lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +pushdown of migrate_disable/enable from read_*lock* to the rt_read_*lock* +api level + +general mapping to mutexes: + +read_*lock* + `-> rt_read_*lock* + `-> __spin_lock (the sleeping spin locks) + `-> rt_mutex + +The real read_lock* mapping: + + read_lock_irqsave -. +read_lock_irq `-> rt_read_lock_irqsave() + `->read_lock ---------. \ + read_lock_bh ------+ \ + `--> rt_read_lock() + if (rt_mutex_owner(lock) != current){ + `-> __rt_spin_lock() + rt_spin_lock_fastlock() + `->rt_mutex_cmpxchg() + migrate_disable() + } + rwlock->read_depth++; +read_trylock mapping: + +read_trylock + `-> rt_read_trylock + if (rt_mutex_owner(lock) != current){ + `-> rt_mutex_trylock() + rt_mutex_fasttrylock() + rt_mutex_cmpxchg() + migrate_disable() + } + rwlock->read_depth++; + +read_unlock* mapping: + +read_unlock_bh --------+ +read_unlock_irq -------+ +read_unlock_irqrestore + +read_unlock -----------+ + `-> rt_read_unlock() + if(--rwlock->read_depth==0){ + `-> __rt_spin_unlock() + rt_spin_lock_fastunlock() + `-> rt_mutex_cmpxchg() + migrate_disable() + } + +So calls to migrate_disable/enable() are better placed at the rt_read_* +level of lock/trylock/unlock as all of the read_*lock* API has this as a +common path. In the rt_read* API of lock/trylock/unlock the nesting level +is already being recorded in rwlock->read_depth, so we can push down the +migrate disable/enable to that level and condition it on the read_depth +going from 0 to 1 -> migrate_disable and 1 to 0 -> migrate_enable. This +eliminates the recursive calls that were needed when migrate_disable/enable +was done at the read_*lock* level. + +The approach to read_*_bh also eliminates the concerns raised with the +regards to api inbalances (read_lock_bh -> read_unlock+local_bh_enable) + +Tested-by: Carsten Emde +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/rwlock_rt.h | 6 ------ + kernel/locking/rt.c | 20 +++++++++++++------- + 2 files changed, 13 insertions(+), 13 deletions(-) + +--- a/include/linux/rwlock_rt.h ++++ b/include/linux/rwlock_rt.h +@@ -33,7 +33,6 @@ extern void __rt_rwlock_init(rwlock_t *r + #define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ +- migrate_disable(); \ + flags = rt_read_lock_irqsave(lock); \ + } while (0) + +@@ -45,14 +44,12 @@ extern void __rt_rwlock_init(rwlock_t *r + + #define read_lock(lock) \ + do { \ +- migrate_disable(); \ + rt_read_lock(lock); \ + } while (0) + + #define read_lock_bh(lock) \ + do { \ + local_bh_disable(); \ +- migrate_disable(); \ + rt_read_lock(lock); \ + } while (0) + +@@ -74,13 +71,11 @@ extern void __rt_rwlock_init(rwlock_t *r + #define read_unlock(lock) \ + do { \ + rt_read_unlock(lock); \ +- migrate_enable(); \ + } while (0) + + #define read_unlock_bh(lock) \ + do { \ + rt_read_unlock(lock); \ +- migrate_enable(); \ + local_bh_enable(); \ + } while (0) + +@@ -104,7 +99,6 @@ extern void __rt_rwlock_init(rwlock_t *r + typecheck(unsigned long, flags); \ + (void) flags; \ + rt_read_unlock(lock); \ +- migrate_enable(); \ + } while (0) + + #define write_unlock_irqrestore(lock, flags) \ +--- a/kernel/locking/rt.c ++++ b/kernel/locking/rt.c +@@ -211,17 +211,19 @@ int __lockfunc rt_read_trylock(rwlock_t + * but not when read_depth == 0 which means that the lock is + * write locked. + */ +- migrate_disable(); +- if (rt_mutex_owner(lock) != current) ++ if (rt_mutex_owner(lock) != current) { + ret = rt_mutex_trylock(lock); +- else if (!rwlock->read_depth) ++ if (ret) ++ migrate_disable(); ++ ++ } else if (!rwlock->read_depth) { + ret = 0; ++ } + + if (ret) { + rwlock->read_depth++; + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); +- } else +- migrate_enable(); ++ } + + return ret; + } +@@ -244,8 +246,10 @@ void __lockfunc rt_read_lock(rwlock_t *r + /* + * recursive read locks succeed when current owns the lock + */ +- if (rt_mutex_owner(lock) != current) ++ if (rt_mutex_owner(lock) != current) { + __rt_spin_lock(lock); ++ migrate_disable(); ++ } + rwlock->read_depth++; + } + +@@ -265,8 +269,10 @@ void __lockfunc rt_read_unlock(rwlock_t + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + + /* Release the lock only when read_depth is down to 0 */ +- if (--rwlock->read_depth == 0) ++ if (--rwlock->read_depth == 0) { + __rt_spin_unlock(&rwlock->lock); ++ migrate_enable(); ++ } + } + EXPORT_SYMBOL(rt_read_unlock); + diff --git a/debian/patches/features/all/rt/relay-fix-timer-madness.patch b/debian/patches/features/all/rt/relay-fix-timer-madness.patch new file mode 100644 index 000000000..6db2d767c --- /dev/null +++ b/debian/patches/features/all/rt/relay-fix-timer-madness.patch @@ -0,0 +1,53 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:44:07 -0500 +Subject: relay: fix timer madness +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +remove timer calls (!!!) from deep within the tracing infrastructure. +This was totally bogus code that can cause lockups and worse. Poll +the buffer every 2 jiffies for now. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/relay.c | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +--- a/kernel/relay.c ++++ b/kernel/relay.c +@@ -339,6 +339,10 @@ static void wakeup_readers(unsigned long + { + struct rchan_buf *buf = (struct rchan_buf *)data; + wake_up_interruptible(&buf->read_wait); ++ /* ++ * Stupid polling for now: ++ */ ++ mod_timer(&buf->timer, jiffies + 1); + } + + /** +@@ -356,6 +360,7 @@ static void __relay_reset(struct rchan_b + init_waitqueue_head(&buf->read_wait); + kref_init(&buf->kref); + setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); ++ mod_timer(&buf->timer, jiffies + 1); + } else + del_timer_sync(&buf->timer); + +@@ -739,15 +744,6 @@ size_t relay_switch_subbuf(struct rchan_ + else + buf->early_bytes += buf->chan->subbuf_size - + buf->padding[old_subbuf]; +- smp_mb(); +- if (waitqueue_active(&buf->read_wait)) +- /* +- * Calling wake_up_interruptible() from here +- * will deadlock if we happen to be logging +- * from the scheduler (trying to re-grab +- * rq->lock), so defer it. +- */ +- mod_timer(&buf->timer, jiffies + 1); + } + + old = buf->data; diff --git a/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch b/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch new file mode 100644 index 000000000..a435ac8e4 --- /dev/null +++ b/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch @@ -0,0 +1,87 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:44:33 -0500 +Subject: core: Do not disable interrupts on RT in res_counter.c +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Frederic Weisbecker reported this warning: + +[ 45.228562] BUG: sleeping function called from invalid context at kernel/rtmutex.c:683 +[ 45.228571] in_atomic(): 0, irqs_disabled(): 1, pid: 4290, name: ntpdate +[ 45.228576] INFO: lockdep is turned off. +[ 45.228580] irq event stamp: 0 +[ 45.228583] hardirqs last enabled at (0): [<(null)>] (null) +[ 45.228589] hardirqs last disabled at (0): [] copy_process+0x68d/0x1500 +[ 45.228602] softirqs last enabled at (0): [] copy_process+0x68d/0x1500 +[ 45.228609] softirqs last disabled at (0): [<(null)>] (null) +[ 45.228617] Pid: 4290, comm: ntpdate Tainted: G W 2.6.29-rc4-rt1-tip #1 +[ 45.228622] Call Trace: +[ 45.228632] [] ? print_irqtrace_events+0xd0/0xe0 +[ 45.228639] [] __might_sleep+0x113/0x130 +[ 45.228646] [] rt_spin_lock+0xa1/0xb0 +[ 45.228653] [] res_counter_charge+0x5d/0x130 +[ 45.228660] [] __mem_cgroup_try_charge+0x7f/0x180 +[ 45.228667] [] mem_cgroup_charge_common+0x57/0x90 +[ 45.228674] [] ? ftrace_call+0x5/0x2b +[ 45.228680] [] mem_cgroup_newpage_charge+0x5d/0x60 +[ 45.228688] [] __do_fault+0x29e/0x4c0 +[ 45.228694] [] ? rt_spin_unlock+0x23/0x80 +[ 45.228700] [] handle_mm_fault+0x205/0x890 +[ 45.228707] [] ? ftrace_call+0x5/0x2b +[ 45.228714] [] do_page_fault+0x11e/0x2a0 +[ 45.228720] [] page_fault+0x25/0x30 +[ 45.228727] [] ? __clear_user+0x3d/0x70 +[ 45.228733] [] ? __clear_user+0x21/0x70 + +The reason is the raw IRQ flag use of kernel/res_counter.c. + +The irq flags tricks there seem a bit pointless: it cannot protect the +c->parent linkage because local_irq_save() is only per CPU. + +So replace it with _nort(). This code needs a second look. + +Reported-by: Frederic Weisbecker +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/res_counter.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/kernel/res_counter.c ++++ b/kernel/res_counter.c +@@ -49,7 +49,7 @@ static int __res_counter_charge(struct r + + r = ret = 0; + *limit_fail_at = NULL; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + for (c = counter; c != NULL; c = c->parent) { + spin_lock(&c->lock); + r = res_counter_charge_locked(c, val, force); +@@ -69,7 +69,7 @@ static int __res_counter_charge(struct r + spin_unlock(&u->lock); + } + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return ret; + } +@@ -103,7 +103,7 @@ u64 res_counter_uncharge_until(struct re + struct res_counter *c; + u64 ret = 0; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + for (c = counter; c != top; c = c->parent) { + u64 r; + spin_lock(&c->lock); +@@ -112,7 +112,7 @@ u64 res_counter_uncharge_until(struct re + ret = r; + spin_unlock(&c->lock); + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return ret; + } + diff --git a/debian/patches/features/all/rt/rt-Cleanup-of-unnecessary-do-while-0-in-read-write-_.patch b/debian/patches/features/all/rt/rt-Cleanup-of-unnecessary-do-while-0-in-read-write-_.patch new file mode 100644 index 000000000..2c534c026 --- /dev/null +++ b/debian/patches/features/all/rt/rt-Cleanup-of-unnecessary-do-while-0-in-read-write-_.patch @@ -0,0 +1,68 @@ +From d72e9cb0b85791306e5af8ff2a5ac410ae00efe7 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Sat, 8 Feb 2014 12:39:20 +0100 +Subject: [PATCH] rt: Cleanup of unnecessary do while 0 in read/write _lock() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +With the migration pushdonw a few of the do{ }while(0) +loops became obsolete but got left over - this patch +only removes this fallout. + +Patch applies on top of 3.12.9-rt13 + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/rwlock_rt.h | 20 ++++---------------- + 1 file changed, 4 insertions(+), 16 deletions(-) + +--- a/include/linux/rwlock_rt.h ++++ b/include/linux/rwlock_rt.h +@@ -42,10 +42,7 @@ extern void __rt_rwlock_init(rwlock_t *r + flags = rt_write_lock_irqsave(lock); \ + } while (0) + +-#define read_lock(lock) \ +- do { \ +- rt_read_lock(lock); \ +- } while (0) ++#define read_lock(lock) rt_read_lock(lock) + + #define read_lock_bh(lock) \ + do { \ +@@ -55,10 +52,7 @@ extern void __rt_rwlock_init(rwlock_t *r + + #define read_lock_irq(lock) read_lock(lock) + +-#define write_lock(lock) \ +- do { \ +- rt_write_lock(lock); \ +- } while (0) ++#define write_lock(lock) rt_write_lock(lock) + + #define write_lock_bh(lock) \ + do { \ +@@ -68,10 +62,7 @@ extern void __rt_rwlock_init(rwlock_t *r + + #define write_lock_irq(lock) write_lock(lock) + +-#define read_unlock(lock) \ +- do { \ +- rt_read_unlock(lock); \ +- } while (0) ++#define read_unlock(lock) rt_read_unlock(lock) + + #define read_unlock_bh(lock) \ + do { \ +@@ -81,10 +72,7 @@ extern void __rt_rwlock_init(rwlock_t *r + + #define read_unlock_irq(lock) read_unlock(lock) + +-#define write_unlock(lock) \ +- do { \ +- rt_write_unlock(lock); \ +- } while (0) ++#define write_unlock(lock) rt_write_unlock(lock) + + #define write_unlock_bh(lock) \ + do { \ diff --git a/debian/patches/features/all/rt/rt-Make-cpu_chill-use-hrtimer-instead-of-msleep.patch b/debian/patches/features/all/rt/rt-Make-cpu_chill-use-hrtimer-instead-of-msleep.patch new file mode 100644 index 000000000..90c4a11e7 --- /dev/null +++ b/debian/patches/features/all/rt/rt-Make-cpu_chill-use-hrtimer-instead-of-msleep.patch @@ -0,0 +1,90 @@ +From 22ba430bbf9fc42367921a40e2e8a6327a84ff33 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Wed, 5 Feb 2014 11:51:25 -0500 +Subject: [PATCH] rt: Make cpu_chill() use hrtimer instead of msleep() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Ulrich Obergfell pointed out that cpu_chill() calls msleep() which is woken +up by the ksoftirqd running the TIMER softirq. But as the cpu_chill() is +called from softirq context, it may block the ksoftirqd() from running, in +which case, it may never wake up the msleep() causing the deadlock. + +I checked the vmcore, and irq/74-qla2xxx is stuck in the msleep() call, +running on CPU 8. The one ksoftirqd that is stuck, happens to be the one that +runs on CPU 8, and it is blocked on a lock held by irq/74-qla2xxx. As that +ksoftirqd is the one that will wake up irq/74-qla2xxx, and it happens to be +blocked on a lock that irq/74-qla2xxx holds, we have our deadlock. + +The solution is not to convert the cpu_chill() back to a cpu_relax() as that +will re-create a possible live lock that the cpu_chill() fixed earlier, and may +also leave this bug open on other softirqs. The fix is to remove the +dependency on ksoftirqd from cpu_chill(). That is, instead of calling +msleep() that requires ksoftirqd to wake it up, use the +hrtimer_nanosleep() code that does the wakeup from hard irq context. + +|Looks to be the lock of the block softirq. I don't have the core dump +|anymore, but from what I could tell the ksoftirqd was blocked on the +|block softirq lock, where the block softirq handler did a msleep +|(called by the qla2xxx interrupt handler). +| +|Looking at trigger_softirq() in block/blk-softirq.c, it can do a +|smp_callfunction() to another cpu to run the block softirq. If that +|happens to be the cpu where the qla2xx irq handler is doing the block +|softirq and is in a middle of a msleep(), I believe the ksoftirqd will +|try to run the softirq. If it does that, then BOOM, it's deadlocked +|because the ksoftirqd will never run the timer softirq either. + +|I should have also stated that it was only one lock that was involved. +|But the lock owner was doing a msleep() that requires a wakeup by +|ksoftirqd to continue. If ksoftirqd happens to be blocked on a lock +|held by the msleep() caller, then you have your deadlock. +| +|It's best not to have any softirqs going to sleep requiring another +|softirq to wake it up. Note, if we ever require a timer softirq to do a +|cpu_chill() it will most definitely hit this deadlock. + +Cc: stable-rt@vger.kernel.org +Found-by: Ulrich Obergfell +Signed-off-by: Steven Rostedt +[bigeasy: add the 4 | chapters from email] +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/delay.h | 2 +- + kernel/hrtimer.c | 15 +++++++++++++++ + 2 files changed, 16 insertions(+), 1 deletion(-) + +--- a/include/linux/delay.h ++++ b/include/linux/delay.h +@@ -53,7 +53,7 @@ static inline void ssleep(unsigned int s + } + + #ifdef CONFIG_PREEMPT_RT_FULL +-# define cpu_chill() msleep(1) ++extern void cpu_chill(void); + #else + # define cpu_chill() cpu_relax() + #endif +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1891,6 +1891,21 @@ SYSCALL_DEFINE2(nanosleep, struct timesp + return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Sleep for 1 ms in hope whoever holds what we want will let it go. ++ */ ++void cpu_chill(void) ++{ ++ struct timespec tu = { ++ .tv_nsec = NSEC_PER_MSEC, ++ }; ++ ++ hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); ++} ++EXPORT_SYMBOL(cpu_chill); ++#endif ++ + /* + * Functions related to boot-time initialization: + */ diff --git a/debian/patches/features/all/rt/rt-add-rt-locks.patch b/debian/patches/features/all/rt/rt-add-rt-locks.patch new file mode 100644 index 000000000..97db24940 --- /dev/null +++ b/debian/patches/features/all/rt/rt-add-rt-locks.patch @@ -0,0 +1,904 @@ +From: Thomas Gleixner +Date: Sun, 26 Jul 2009 19:39:56 +0200 +Subject: rt: Add the preempt-rt lock replacement APIs +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex +based locking functions for preempt-rt. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/locallock.h | 6 + include/linux/rwlock_rt.h | 123 ++++++++++ + include/linux/spinlock.h | 12 - + include/linux/spinlock_api_smp.h | 4 + include/linux/spinlock_rt.h | 155 +++++++++++++ + kernel/locking/Makefile | 7 + kernel/locking/rt.c | 442 +++++++++++++++++++++++++++++++++++++++ + kernel/locking/spinlock.c | 7 + kernel/locking/spinlock_debug.c | 5 + 9 files changed, 758 insertions(+), 3 deletions(-) + +--- a/include/linux/locallock.h ++++ b/include/linux/locallock.h +@@ -42,9 +42,15 @@ struct local_irq_lock { + * already takes care of the migrate_disable/enable + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define spin_lock_local(lock) rt_spin_lock(lock) ++# define spin_trylock_local(lock) rt_spin_trylock(lock) ++# define spin_unlock_local(lock) rt_spin_unlock(lock) ++#else + # define spin_lock_local(lock) spin_lock(lock) + # define spin_trylock_local(lock) spin_trylock(lock) + # define spin_unlock_local(lock) spin_unlock(lock) ++#endif + + static inline void __local_lock(struct local_irq_lock *lv) + { +--- /dev/null ++++ b/include/linux/rwlock_rt.h +@@ -0,0 +1,123 @@ ++#ifndef __LINUX_RWLOCK_RT_H ++#define __LINUX_RWLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#define rwlock_init(rwl) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(rwl)->lock); \ ++ __rt_rwlock_init(rwl, #rwl, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_write_lock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_lock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); ++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); ++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); ++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); ++ ++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) ++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) ++ ++#define write_trylock_irqsave(lock, flags) \ ++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) ++ ++#define read_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ migrate_disable(); \ ++ flags = rt_read_lock_irqsave(lock); \ ++ } while (0) ++ ++#define write_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ migrate_disable(); \ ++ flags = rt_write_lock_irqsave(lock); \ ++ } while (0) ++ ++#define read_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_irq(lock) read_lock(lock) ++ ++#define write_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_irq(lock) write_lock(lock) ++ ++#define read_unlock(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define read_unlock_bh(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define read_unlock_irq(lock) read_unlock(lock) ++ ++#define write_unlock(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define write_unlock_bh(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define write_unlock_irq(lock) write_unlock(lock) ++ ++#define read_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define write_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#endif +--- a/include/linux/spinlock.h ++++ b/include/linux/spinlock.h +@@ -272,7 +272,11 @@ static inline void do_raw_spin_unlock(ra + #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + + /* Include rwlock functions */ +-#include ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++# include ++#endif + + /* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: +@@ -283,6 +287,10 @@ static inline void do_raw_spin_unlock(ra + # include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else /* PREEMPT_RT_FULL */ ++ + /* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ +@@ -412,4 +420,6 @@ extern int _atomic_dec_and_lock(atomic_t + #define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* __LINUX_SPINLOCK_H */ +--- a/include/linux/spinlock_api_smp.h ++++ b/include/linux/spinlock_api_smp.h +@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh( + return 0; + } + +-#include ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_API_SMP_H */ +--- /dev/null ++++ b/include/linux/spinlock_rt.h +@@ -0,0 +1,155 @@ ++#ifndef __LINUX_SPINLOCK_RT_H ++#define __LINUX_SPINLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#include ++ ++extern void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); ++ ++#define spin_lock_init(slock) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(slock)->lock); \ ++ __rt_spin_lock_init(slock, #slock, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_spin_lock(spinlock_t *lock); ++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); ++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); ++extern void __lockfunc rt_spin_unlock(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); ++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock(spinlock_t *lock); ++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); ++ ++/* ++ * lockdep-less calls, for derived types like rwlock: ++ * (for trylock they can use rt_mutex_trylock() directly. ++ */ ++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); ++ ++#define spin_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_irq(lock) spin_lock(lock) ++ ++#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++ ++#ifdef CONFIG_LOCKDEP ++# define spin_lock_nested(lock, subclass) \ ++ do { \ ++ migrate_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ migrate_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++#else ++# define spin_lock_nested(lock, subclass) spin_lock(lock) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++#endif ++ ++#define spin_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++ ++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) ++{ ++ unsigned long flags = 0; ++#ifdef CONFIG_TRACE_IRQFLAGS ++ flags = rt_spin_lock_trace_flags(lock); ++#else ++ spin_lock(lock); /* lock_local */ ++#endif ++ return flags; ++} ++ ++/* FIXME: we need rt_spin_lock_nest_lock */ ++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) ++ ++#define spin_unlock(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define spin_unlock_bh(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define spin_unlock_irq(lock) spin_unlock(lock) ++ ++#define spin_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ spin_unlock(lock); \ ++ } while (0) ++ ++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) ++#define spin_trylock_irq(lock) spin_trylock(lock) ++ ++#define spin_trylock_irqsave(lock, flags) \ ++ rt_spin_trylock_irqsave(lock, &(flags)) ++ ++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) ++ ++#ifdef CONFIG_GENERIC_LOCKBREAK ++# define spin_is_contended(lock) ((lock)->break_lock) ++#else ++# define spin_is_contended(lock) (((void)(lock), 0)) ++#endif ++ ++static inline int spin_can_lock(spinlock_t *lock) ++{ ++ return !rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline int spin_is_locked(spinlock_t *lock) ++{ ++ return rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline void assert_spin_locked(spinlock_t *lock) ++{ ++ BUG_ON(!spin_is_locked(lock)); ++} ++ ++#define atomic_dec_and_lock(atomic, lock) \ ++ atomic_dec_and_spin_lock(atomic, lock) ++ ++#endif +--- a/kernel/locking/Makefile ++++ b/kernel/locking/Makefile +@@ -1,5 +1,5 @@ + +-obj-y += mutex.o semaphore.o rwsem.o lglock.o ++obj-y += semaphore.o lglock.o + + ifdef CONFIG_FUNCTION_TRACER + CFLAGS_REMOVE_lockdep.o = -pg +@@ -8,7 +8,11 @@ CFLAGS_REMOVE_mutex-debug.o = -pg + CFLAGS_REMOVE_rtmutex-debug.o = -pg + endif + ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) ++obj-y += mutex.o + obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o ++obj-y += rwsem.o ++endif + obj-$(CONFIG_LOCKDEP) += lockdep.o + ifeq ($(CONFIG_PROC_FS),y) + obj-$(CONFIG_LOCKDEP) += lockdep_proc.o +@@ -25,3 +29,4 @@ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += + obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o + endif + obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o ++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o +--- /dev/null ++++ b/kernel/locking/rt.c +@@ -0,0 +1,442 @@ ++/* ++ * kernel/rt.c ++ * ++ * Real-Time Preemption Support ++ * ++ * started by Ingo Molnar: ++ * ++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * ++ * historic credit for proving that Linux spinlocks can be implemented via ++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow ++ * and others) who prototyped it on 2.4 and did lots of comparative ++ * research and analysis; TimeSys, for proving that you can implement a ++ * fully preemptible kernel via the use of IRQ threading and mutexes; ++ * Bill Huey for persuasively arguing on lkml that the mutex model is the ++ * right one; and to MontaVista, who ported pmutexes to 2.6. ++ * ++ * This code is a from-scratch implementation and is not based on pmutexes, ++ * but the idea of converting spinlocks to mutexes is used here too. ++ * ++ * lock debugging, locking tree, deadlock detection: ++ * ++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey ++ * Released under the General Public License (GPL). ++ * ++ * Includes portions of the generic R/W semaphore implementation from: ++ * ++ * Copyright (c) 2001 David Howells (dhowells@redhat.com). ++ * - Derived partially from idea by Andrea Arcangeli ++ * - Derived also from comments by Linus ++ * ++ * Pending ownership of locks and ownership stealing: ++ * ++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt ++ * ++ * (also by Steven Rostedt) ++ * - Converted single pi_lock to individual task locks. ++ * ++ * By Esben Nielsen: ++ * Doing priority inheritance with help of the scheduler. ++ * ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * - major rework based on Esben Nielsens initial patch ++ * - replaced thread_info references by task_struct refs ++ * - removed task->pending_owner dependency ++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks ++ * in the scheduler return path as discussed with Steven Rostedt ++ * ++ * Copyright (C) 2006, Kihon Technologies Inc. ++ * Steven Rostedt ++ * - debugged and patched Thomas Gleixner's rework. ++ * - added back the cmpxchg to the rework. ++ * - turned atomic require back on for SMP. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "rtmutex_common.h" ++ ++/* ++ * struct mutex functions ++ */ ++void __mutex_do_init(struct mutex *mutex, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); ++ lockdep_init_map(&mutex->dep_map, name, key, 0); ++#endif ++ mutex->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__mutex_do_init); ++ ++void __lockfunc _mutex_lock(struct mutex *lock) ++{ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock); ++ ++int __lockfunc _mutex_lock_interruptible(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible); ++ ++int __lockfunc _mutex_lock_killable(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) ++{ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nested); ++ ++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) ++{ ++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nest_lock); ++ ++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible_nested); ++ ++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable_nested); ++#endif ++ ++int __lockfunc _mutex_trylock(struct mutex *lock) ++{ ++ int ret = rt_mutex_trylock(&lock->lock); ++ ++ if (ret) ++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_trylock); ++ ++void __lockfunc _mutex_unlock(struct mutex *lock) ++{ ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_unlock); ++ ++/* ++ * rwlock_t functions ++ */ ++int __lockfunc rt_write_trylock(rwlock_t *rwlock) ++{ ++ int ret = rt_mutex_trylock(&rwlock->lock); ++ ++ migrate_disable(); ++ if (ret) ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock); ++ ++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ migrate_disable(); ++ ret = rt_write_trylock(rwlock); ++ if (!ret) ++ migrate_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock_irqsave); ++ ++int __lockfunc rt_read_trylock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the lock, ++ * but not when read_depth == 0 which means that the lock is ++ * write locked. ++ */ ++ migrate_disable(); ++ if (rt_mutex_owner(lock) != current) ++ ret = rt_mutex_trylock(lock); ++ else if (!rwlock->read_depth) ++ ret = 0; ++ ++ if (ret) { ++ rwlock->read_depth++; ++ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); ++ } else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_read_trylock); ++ ++void __lockfunc rt_write_lock(rwlock_t *rwlock) ++{ ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ __rt_spin_lock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_write_lock); ++ ++void __lockfunc rt_read_lock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ ++ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); ++ ++ /* ++ * recursive read locks succeed when current owns the lock ++ */ ++ if (rt_mutex_owner(lock) != current) ++ __rt_spin_lock(lock); ++ rwlock->read_depth++; ++} ++ ++EXPORT_SYMBOL(rt_read_lock); ++ ++void __lockfunc rt_write_unlock(rwlock_t *rwlock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ __rt_spin_unlock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_write_unlock); ++ ++void __lockfunc rt_read_unlock(rwlock_t *rwlock) ++{ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ ++ /* Release the lock only when read_depth is down to 0 */ ++ if (--rwlock->read_depth == 0) ++ __rt_spin_unlock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_read_unlock); ++ ++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_write_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_write_lock_irqsave); ++ ++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_read_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_read_lock_irqsave); ++ ++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); ++ lockdep_init_map(&rwlock->dep_map, name, key, 0); ++#endif ++ rwlock->lock.save_state = 1; ++ rwlock->read_depth = 0; ++} ++EXPORT_SYMBOL(__rt_rwlock_init); ++ ++/* ++ * rw_semaphores ++ */ ++ ++void rt_up_write(struct rw_semaphore *rwsem) ++{ ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_up_write); ++ ++void rt_up_read(struct rw_semaphore *rwsem) ++{ ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); ++ if (--rwsem->read_depth == 0) ++ rt_mutex_unlock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_up_read); ++ ++/* ++ * downgrade a write lock into a read lock ++ * - just wake up any readers at the front of the queue ++ */ ++void rt_downgrade_write(struct rw_semaphore *rwsem) ++{ ++ BUG_ON(rt_mutex_owner(&rwsem->lock) != current); ++ rwsem->read_depth = 1; ++} ++EXPORT_SYMBOL(rt_downgrade_write); ++ ++int rt_down_write_trylock(struct rw_semaphore *rwsem) ++{ ++ int ret = rt_mutex_trylock(&rwsem->lock); ++ ++ if (ret) ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(rt_down_write_trylock); ++ ++void rt_down_write(struct rw_semaphore *rwsem) ++{ ++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write); ++ ++void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) ++{ ++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write_nested); ++ ++int rt_down_read_trylock(struct rw_semaphore *rwsem) ++{ ++ struct rt_mutex *lock = &rwsem->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the rwsem, ++ * but not when read_depth == 0 which means that the rwsem is ++ * write locked. ++ */ ++ if (rt_mutex_owner(lock) != current) ++ ret = rt_mutex_trylock(&rwsem->lock); ++ else if (!rwsem->read_depth) ++ ret = 0; ++ ++ if (ret) { ++ rwsem->read_depth++; ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_down_read_trylock); ++ ++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) ++{ ++ struct rt_mutex *lock = &rwsem->lock; ++ ++ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); ++ ++ if (rt_mutex_owner(lock) != current) ++ rt_mutex_lock(&rwsem->lock); ++ rwsem->read_depth++; ++} ++ ++void rt_down_read(struct rw_semaphore *rwsem) ++{ ++ __rt_down_read(rwsem, 0); ++} ++EXPORT_SYMBOL(rt_down_read); ++ ++void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) ++{ ++ __rt_down_read(rwsem, subclass); ++} ++EXPORT_SYMBOL(rt_down_read_nested); ++ ++void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); ++ lockdep_init_map(&rwsem->dep_map, name, key, 0); ++#endif ++ rwsem->read_depth = 0; ++ rwsem->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__rt_rwsem_init); ++ ++/** ++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 ++ * @cnt: the atomic which we are to dec ++ * @lock: the mutex to return holding if we dec to 0 ++ * ++ * return true and hold lock if we dec to 0, return false otherwise ++ */ ++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) ++{ ++ /* dec if we can't possibly hit 0 */ ++ if (atomic_add_unless(cnt, -1, 1)) ++ return 0; ++ /* we might hit 0, so take the lock */ ++ mutex_lock(lock); ++ if (!atomic_dec_and_test(cnt)) { ++ /* when we actually did the dec, we didn't hit 0 */ ++ mutex_unlock(lock); ++ return 0; ++ } ++ /* we hit 0, and we hold the lock */ ++ return 1; ++} ++EXPORT_SYMBOL(atomic_dec_and_mutex_lock); +--- a/kernel/locking/spinlock.c ++++ b/kernel/locking/spinlock.c +@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(loc + * __[spin|read|write]_lock_bh() + */ + BUILD_LOCK_OPS(spin, raw_spinlock); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + BUILD_LOCK_OPS(read, rwlock); + BUILD_LOCK_OPS(write, rwlock); ++#endif + + #endif + +@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_ + EXPORT_SYMBOL(_raw_spin_unlock_bh); + #endif + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #ifndef CONFIG_INLINE_READ_TRYLOCK + int __lockfunc _raw_read_trylock(rwlock_t *lock) + { +@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwl + EXPORT_SYMBOL(_raw_write_unlock_bh); + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #ifdef CONFIG_DEBUG_LOCK_ALLOC + + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) +--- a/kernel/locking/spinlock_debug.c ++++ b/kernel/locking/spinlock_debug.c +@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t + + EXPORT_SYMBOL(__raw_spin_lock_init); + ++#ifndef CONFIG_PREEMPT_RT_FULL + void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key) + { +@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const + } + + EXPORT_SYMBOL(__rwlock_init); ++#endif + + static void spin_dump(raw_spinlock_t *lock, const char *msg) + { +@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t * + arch_spin_unlock(&lock->raw_lock); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static void rwlock_bug(rwlock_t *lock, const char *msg) + { + if (!debug_locks_off()) +@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock) + debug_write_unlock(lock); + arch_write_unlock(&lock->raw_lock); + } ++ ++#endif diff --git a/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch b/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch new file mode 100644 index 000000000..4107654f2 --- /dev/null +++ b/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch @@ -0,0 +1,121 @@ +Subject: rt-add-rt-spinlocks.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 19:43:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rwlock_types_rt.h | 33 ++++++++++++++++++++++++ + include/linux/spinlock_types.h | 11 +++++--- + include/linux/spinlock_types_rt.h | 51 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 92 insertions(+), 3 deletions(-) + +--- /dev/null ++++ b/include/linux/rwlock_types_rt.h +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_RWLOCK_TYPES_RT_H ++#define __LINUX_RWLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * rwlocks - rtmutex which allows single reader recursion ++ */ ++typedef struct { ++ struct rt_mutex lock; ++ int read_depth; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} rwlock_t; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define RW_DEP_MAP_INIT(lockname) ++#endif ++ ++#define __RW_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) ++ ++#endif +--- a/include/linux/spinlock_types.h ++++ b/include/linux/spinlock_types.h +@@ -11,8 +11,13 @@ + + #include + +-#include +- +-#include ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++# include ++#else ++# include ++# include ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_TYPES_H */ +--- /dev/null ++++ b/include/linux/spinlock_types_rt.h +@@ -0,0 +1,51 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RT_H ++#define __LINUX_SPINLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++#include ++ ++/* ++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: ++ */ ++typedef struct spinlock { ++ struct rt_mutex lock; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} spinlock_t; ++ ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ .file = __FILE__, \ ++ .line = __LINE__ , \ ++ } ++#else ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ } ++#endif ++ ++/* ++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) ++*/ ++ ++#define __SPIN_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ ++ SPIN_DEP_MAP_INIT(name) } ++ ++#define __DEFINE_SPINLOCK(name) \ ++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) ++ ++#define DEFINE_SPINLOCK(name) \ ++ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) ++ ++#endif diff --git a/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch b/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch new file mode 100644 index 000000000..633ae9878 --- /dev/null +++ b/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch @@ -0,0 +1,141 @@ +Subject: rt-add-rt-to-mutex-headers.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 20:56:22 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/mutex.h | 20 +++++++---- + include/linux/mutex_rt.h | 84 +++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 97 insertions(+), 7 deletions(-) + +--- a/include/linux/mutex.h ++++ b/include/linux/mutex.h +@@ -18,6 +18,17 @@ + #include + #include + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ++ , .dep_map = { .name = #lockname } ++#else ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) ++#endif ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++ + /* + * Simple, straightforward mutexes with strict semantics: + * +@@ -99,13 +110,6 @@ do { \ + static inline void mutex_destroy(struct mutex *lock) {} + #endif + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +- , .dep_map = { .name = #lockname } +-#else +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +-#endif +- + #define __MUTEX_INITIALIZER(lockname) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ +@@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killa + extern int mutex_trylock(struct mutex *lock); + extern void mutex_unlock(struct mutex *lock); + ++#endif /* !PREEMPT_RT_FULL */ ++ + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + + #ifndef arch_mutex_cpu_relax +--- /dev/null ++++ b/include/linux/mutex_rt.h +@@ -0,0 +1,84 @@ ++#ifndef __LINUX_MUTEX_RT_H ++#define __LINUX_MUTEX_RT_H ++ ++#ifndef __LINUX_MUTEX_H ++#error "Please include mutex.h" ++#endif ++ ++#include ++ ++/* FIXME: Just for __lockfunc */ ++#include ++ ++struct mutex { ++ struct rt_mutex lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __MUTEX_INITIALIZER(mutexname) \ ++ { \ ++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ ++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ ++ } ++ ++#define DEFINE_MUTEX(mutexname) \ ++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) ++ ++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); ++extern void __lockfunc _mutex_lock(struct mutex *lock); ++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); ++extern int __lockfunc _mutex_lock_killable(struct mutex *lock); ++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); ++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); ++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_trylock(struct mutex *lock); ++extern void __lockfunc _mutex_unlock(struct mutex *lock); ++ ++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) ++#define mutex_lock(l) _mutex_lock(l) ++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) ++#define mutex_lock_killable(l) _mutex_lock_killable(l) ++#define mutex_trylock(l) _mutex_trylock(l) ++#define mutex_unlock(l) _mutex_unlock(l) ++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible_nested(l, s) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable_nested(l, s) ++ ++# define mutex_lock_nest_lock(lock, nest_lock) \ ++do { \ ++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ ++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ ++} while (0) ++ ++#else ++# define mutex_lock_nested(l, s) _mutex_lock(l) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible(l) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable(l) ++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) ++#endif ++ ++# define mutex_init(mutex) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), #mutex, &__key); \ ++} while (0) ++ ++# define __mutex_init(mutex, name, key) \ ++do { \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), name, key); \ ++} while (0) ++ ++#endif diff --git a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch new file mode 100644 index 000000000..22e1d97e2 --- /dev/null +++ b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch @@ -0,0 +1,29 @@ +Subject: rt: Introduce cpu_chill() +From: Thomas Gleixner +Date: Wed, 07 Mar 2012 20:51:03 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Retry loops on RT might loop forever when the modifying side was +preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() +defaults to cpu_relax() for non RT. On RT it puts the looping task to +sleep for a tick so the preempted task can make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + include/linux/delay.h | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/include/linux/delay.h ++++ b/include/linux/delay.h +@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int s + msleep(seconds * 1000); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define cpu_chill() msleep(1) ++#else ++# define cpu_chill() cpu_relax() ++#endif ++ + #endif /* defined(_LINUX_DELAY_H) */ diff --git a/debian/patches/features/all/rt/rt-local-irq-lock.patch b/debian/patches/features/all/rt/rt-local-irq-lock.patch new file mode 100644 index 000000000..6082b6420 --- /dev/null +++ b/debian/patches/features/all/rt/rt-local-irq-lock.patch @@ -0,0 +1,267 @@ +Subject: rt-local-irq-lock.patch +From: Thomas Gleixner +Date: Mon, 20 Jun 2011 09:03:47 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/locallock.h | 254 ++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 254 insertions(+) + +--- /dev/null ++++ b/include/linux/locallock.h +@@ -0,0 +1,254 @@ ++#ifndef _LINUX_LOCALLOCK_H ++#define _LINUX_LOCALLOCK_H ++ ++#include ++#include ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define LL_WARN(cond) WARN_ON(cond) ++#else ++# define LL_WARN(cond) do { } while (0) ++#endif ++ ++/* ++ * per cpu lock based substitute for local_irq_*() ++ */ ++struct local_irq_lock { ++ spinlock_t lock; ++ struct task_struct *owner; ++ int nestcnt; ++ unsigned long flags; ++}; ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ ++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ ++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } ++ ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ ++ DECLARE_PER_CPU(struct local_irq_lock, lvar) ++ ++#define local_irq_lock_init(lvar) \ ++ do { \ ++ int __cpu; \ ++ for_each_possible_cpu(__cpu) \ ++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ ++ } while (0) ++ ++static inline void __local_lock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ spin_lock(&lv->lock); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ } ++ lv->nestcnt++; ++} ++ ++#define local_lock(lvar) \ ++ do { __local_lock(&get_local_var(lvar)); } while (0) ++ ++static inline int __local_trylock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current && spin_trylock(&lv->lock)) { ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++ return 1; ++ } ++ return 0; ++} ++ ++#define local_trylock(lvar) \ ++ ({ \ ++ int __locked; \ ++ __locked = __local_trylock(&get_local_var(lvar)); \ ++ if (!__locked) \ ++ put_local_var(lvar); \ ++ __locked; \ ++ }) ++ ++static inline void __local_unlock(struct local_irq_lock *lv) ++{ ++ LL_WARN(lv->nestcnt == 0); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return; ++ ++ lv->owner = NULL; ++ spin_unlock(&lv->lock); ++} ++ ++#define local_unlock(lvar) \ ++ do { \ ++ __local_unlock(&__get_cpu_var(lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++static inline void __local_lock_irq(struct local_irq_lock *lv) ++{ ++ spin_lock_irqsave(&lv->lock, lv->flags); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++} ++ ++#define local_lock_irq(lvar) \ ++ do { __local_lock_irq(&get_local_var(lvar)); } while (0) ++ ++#define local_lock_irq_on(lvar, cpu) \ ++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline void __local_unlock_irq(struct local_irq_lock *lv) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ lv->owner = NULL; ++ lv->nestcnt = 0; ++ spin_unlock_irq(&lv->lock); ++} ++ ++#define local_unlock_irq(lvar) \ ++ do { \ ++ __local_unlock_irq(&__get_cpu_var(lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irq_on(lvar, cpu) \ ++ do { \ ++ __local_unlock_irq(&per_cpu(lvar, cpu)); \ ++ } while (0) ++ ++static inline int __local_lock_irqsave(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ __local_lock_irq(lv); ++ return 0; ++ } else { ++ lv->nestcnt++; ++ return 1; ++ } ++} ++ ++#define local_lock_irqsave(lvar, _flags) \ ++ do { \ ++ if (__local_lock_irqsave(&get_local_var(lvar))) \ ++ put_local_var(lvar); \ ++ _flags = __get_cpu_var(lvar).flags; \ ++ } while (0) ++ ++#define local_lock_irqsave_on(lvar, _flags, cpu) \ ++ do { \ ++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ ++ _flags = per_cpu(lvar, cpu).flags; \ ++ } while (0) ++ ++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, ++ unsigned long flags) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return 0; ++ ++ lv->owner = NULL; ++ spin_unlock_irqrestore(&lv->lock, lv->flags); ++ return 1; ++} ++ ++#define local_unlock_irqrestore(lvar, flags) \ ++ do { \ ++ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irqrestore_on(lvar, flags, cpu) \ ++ do { \ ++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ ++ } while (0) ++ ++#define local_spin_trylock_irq(lvar, lock) \ ++ ({ \ ++ int __locked; \ ++ local_lock_irq(lvar); \ ++ __locked = spin_trylock(lock); \ ++ if (!__locked) \ ++ local_unlock_irq(lvar); \ ++ __locked; \ ++ }) ++ ++#define local_spin_lock_irq(lvar, lock) \ ++ do { \ ++ local_lock_irq(lvar); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irq(lvar, lock) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irq(lvar); \ ++ } while (0) ++ ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ do { \ ++ local_lock_irqsave(lvar, flags); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irqrestore(lvar, flags); \ ++ } while (0) ++ ++#define get_locked_var(lvar, var) \ ++ (*({ \ ++ local_lock(lvar); \ ++ &__get_cpu_var(var); \ ++ })) ++ ++#define put_locked_var(lvar, var) local_unlock(lvar) ++ ++#define local_lock_cpu(lvar) \ ++ ({ \ ++ local_lock(lvar); \ ++ smp_processor_id(); \ ++ }) ++ ++#define local_unlock_cpu(lvar) local_unlock(lvar) ++ ++#else /* PREEMPT_RT_BASE */ ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar ++ ++static inline void local_irq_lock_init(int lvar) { } ++ ++#define local_lock(lvar) preempt_disable() ++#define local_unlock(lvar) preempt_enable() ++#define local_lock_irq(lvar) local_irq_disable() ++#define local_unlock_irq(lvar) local_irq_enable() ++#define local_lock_irqsave(lvar, flags) local_irq_save(flags) ++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) ++ ++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) ++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) ++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ spin_lock_irqsave(lock, flags) ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ spin_unlock_irqrestore(lock, flags) ++ ++#define get_locked_var(lvar, var) get_cpu_var(var) ++#define put_locked_var(lvar, var) put_cpu_var(var) ++ ++#define local_lock_cpu(lvar) get_cpu() ++#define local_unlock_cpu(lvar) put_cpu() ++ ++#endif ++ ++#endif diff --git a/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch b/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch new file mode 100644 index 000000000..1669159d1 --- /dev/null +++ b/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch @@ -0,0 +1,599 @@ +Subject: rt-mutex-add-sleeping-spinlocks-support.patch +From: Thomas Gleixner +Date: Fri, 10 Jun 2011 11:21:25 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rtmutex.h | 32 ++- + kernel/futex.c | 5 + kernel/locking/rtmutex.c | 375 +++++++++++++++++++++++++++++++++++++--- + kernel/locking/rtmutex_common.h | 11 + + 4 files changed, 394 insertions(+), 29 deletions(-) + +--- a/include/linux/rtmutex.h ++++ b/include/linux/rtmutex.h +@@ -18,6 +18,10 @@ + + extern int max_lock_depth; /* for sysctl */ + ++#ifdef CONFIG_DEBUG_MUTEXES ++#include ++#endif ++ + /** + * The rt_mutex structure + * +@@ -31,8 +35,8 @@ struct rt_mutex { + struct rb_root waiters; + struct rb_node *waiters_leftmost; + struct task_struct *owner; +-#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; ++#ifdef CONFIG_DEBUG_RT_MUTEXES + const char *name, *file; + int line; + void *magic; +@@ -58,19 +62,35 @@ struct hrtimer_sleeper; + #ifdef CONFIG_DEBUG_RT_MUTEXES + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) ++ + extern void rt_mutex_debug_task_free(struct task_struct *tsk); + #else + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) + # define rt_mutex_debug_task_free(t) do { } while (0) + #endif + +-#define __RT_MUTEX_INITIALIZER(mutexname) \ +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ ++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .waiters = RB_ROOT \ + , .owner = NULL \ +- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} ++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) ++ ++#define __RT_MUTEX_INITIALIZER(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } ++ ++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ , .save_state = 1 } + + #define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2469,10 +2469,7 @@ static int futex_wait_requeue_pi(u32 __u + * The waiter is allocated on our stack, manipulated by the requeue + * code while we sleep on uaddr. + */ +- debug_rt_mutex_init_waiter(&rt_waiter); +- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); +- RB_CLEAR_NODE(&rt_waiter.tree_entry); +- rt_waiter.task = NULL; ++ rt_mutex_init_waiter(&rt_waiter, false); + + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); + if (unlikely(ret != 0)) +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -8,6 +8,12 @@ + * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt + * Copyright (C) 2006 Esben Nielsen + * ++ * Adaptive Spinlocks: ++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, ++ * and Peter Morreale, ++ * Adaptive Spinlocks simplification: ++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt ++ * + * See Documentation/rt-mutex-design.txt for details. + */ + #include +@@ -261,6 +267,14 @@ static void rt_mutex_adjust_prio(struct + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + } + ++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) ++{ ++ if (waiter->savestate) ++ wake_up_lock_sleeper(waiter->task); ++ else ++ wake_up_process(waiter->task); ++} ++ + /* + * Max number of times we'll walk the boosting chain: + */ +@@ -385,13 +399,15 @@ static int rt_mutex_adjust_prio_chain(st + /* Release the task */ + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + if (!rt_mutex_owner(lock)) { ++ struct rt_mutex_waiter *lock_top_waiter; ++ + /* + * If the requeue above changed the top waiter, then we need + * to wake the new top waiter up to try to get the lock. + */ +- +- if (top_waiter != rt_mutex_top_waiter(lock)) +- wake_up_process(rt_mutex_top_waiter(lock)->task); ++ lock_top_waiter = rt_mutex_top_waiter(lock); ++ if (top_waiter != lock_top_waiter) ++ rt_mutex_wake_waiter(lock_top_waiter); + raw_spin_unlock(&lock->wait_lock); + goto out_put_task; + } +@@ -434,6 +450,25 @@ static int rt_mutex_adjust_prio_chain(st + return ret; + } + ++ ++#define STEAL_NORMAL 0 ++#define STEAL_LATERAL 1 ++ ++/* ++ * Note that RT tasks are excluded from lateral-steals to prevent the ++ * introduction of an unbounded latency ++ */ ++static inline int lock_is_stealable(struct task_struct *task, ++ struct task_struct *pendowner, int mode) ++{ ++ if (mode == STEAL_NORMAL || rt_task(task)) { ++ if (task->prio >= pendowner->prio) ++ return 0; ++ } else if (task->prio > pendowner->prio) ++ return 0; ++ return 1; ++} ++ + /* + * Try to take an rt-mutex + * +@@ -443,8 +478,9 @@ static int rt_mutex_adjust_prio_chain(st + * @task: the task which wants to acquire the lock + * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) + */ +-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +- struct rt_mutex_waiter *waiter) ++static int ++__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter, int mode) + { + /* + * We have to be careful here if the atomic speedups are +@@ -477,12 +513,14 @@ static int try_to_take_rt_mutex(struct r + * 3) it is top waiter + */ + if (rt_mutex_has_waiters(lock)) { +- if (task->prio >= rt_mutex_top_waiter(lock)->prio) { +- if (!waiter || waiter != rt_mutex_top_waiter(lock)) +- return 0; +- } ++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task; ++ ++ if (task != pown && !lock_is_stealable(task, pown, mode)) ++ return 0; + } + ++ /* We got the lock. */ ++ + if (waiter || rt_mutex_has_waiters(lock)) { + unsigned long flags; + struct rt_mutex_waiter *top; +@@ -506,7 +544,6 @@ static int try_to_take_rt_mutex(struct r + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + } + +- /* We got the lock. */ + debug_rt_mutex_lock(lock); + + rt_mutex_set_owner(lock, task); +@@ -516,6 +553,13 @@ static int try_to_take_rt_mutex(struct r + return 1; + } + ++static inline int ++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter) ++{ ++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); ++} ++ + /* + * Task blocks on lock. + * +@@ -629,7 +673,7 @@ static void wakeup_next_waiter(struct rt + + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); + +- wake_up_process(waiter->task); ++ rt_mutex_wake_waiter(waiter); + } + + /* +@@ -706,18 +750,314 @@ void rt_mutex_adjust_pi(struct task_stru + return; + } + +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); +- + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(task); ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * preemptible spin_lock functions: ++ */ ++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ might_sleep(); ++ ++ if (likely(rt_mutex_cmpxchg(lock, NULL, current))) ++ rt_mutex_deadlock_account_lock(lock, current); ++ else ++ slowfn(lock); ++} ++ ++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) ++ rt_mutex_deadlock_account_unlock(current); ++ else ++ slowfn(lock); ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * Note that owner is a speculative pointer and dereferencing relies ++ * on rcu_read_lock() and the check against the lock owner. ++ */ ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *owner) ++{ ++ int res = 0; ++ ++ rcu_read_lock(); ++ for (;;) { ++ if (owner != rt_mutex_owner(lock)) ++ break; ++ /* ++ * Ensure that owner->on_cpu is dereferenced _after_ ++ * checking the above to be valid. ++ */ ++ barrier(); ++ if (!owner->on_cpu) { ++ res = 1; ++ break; ++ } ++ cpu_relax(); ++ } ++ rcu_read_unlock(); ++ return res; ++} ++#else ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *orig_owner) ++{ ++ return 1; ++} ++#endif ++ ++# define pi_lock(lock) raw_spin_lock_irq(lock) ++# define pi_unlock(lock) raw_spin_unlock_irq(lock) ++ ++/* ++ * Slow path lock function spin_lock style: this variant is very ++ * careful not to miss any non-lock wakeups. ++ * ++ * We store the current state under p->pi_lock in p->saved_state and ++ * the try_to_wake_up() code handles this accordingly. ++ */ ++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) ++{ ++ struct task_struct *lock_owner, *self = current; ++ struct rt_mutex_waiter waiter, *top_waiter; ++ int ret; ++ ++ rt_mutex_init_waiter(&waiter, true); ++ ++ raw_spin_lock(&lock->wait_lock); ++ ++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { ++ raw_spin_unlock(&lock->wait_lock); ++ return; ++ } ++ ++ BUG_ON(rt_mutex_owner(lock) == self); ++ ++ /* ++ * We save whatever state the task is in and we'll restore it ++ * after acquiring the lock taking real wakeups into account ++ * as well. We are serialized via pi_lock against wakeups. See ++ * try_to_wake_up(). ++ */ ++ pi_lock(&self->pi_lock); ++ self->saved_state = self->state; ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ pi_unlock(&self->pi_lock); ++ ++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0); ++ BUG_ON(ret); ++ ++ for (;;) { ++ /* Try to acquire the lock again. */ ++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) ++ break; ++ ++ top_waiter = rt_mutex_top_waiter(lock); ++ lock_owner = rt_mutex_owner(lock); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ debug_rt_mutex_print_deadlock(&waiter); ++ ++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) ++ schedule_rt_mutex(lock); ++ ++ raw_spin_lock(&lock->wait_lock); ++ ++ pi_lock(&self->pi_lock); ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ pi_unlock(&self->pi_lock); ++ } ++ ++ /* ++ * Restore the task state to current->saved_state. We set it ++ * to the original state above and the try_to_wake_up() code ++ * has possibly updated it when a real (non-rtmutex) wakeup ++ * happened while we were blocked. Clear saved_state so ++ * try_to_wakeup() does not get confused. ++ */ ++ pi_lock(&self->pi_lock); ++ __set_current_state(self->saved_state); ++ self->saved_state = TASK_RUNNING; ++ pi_unlock(&self->pi_lock); ++ ++ /* ++ * try_to_take_rt_mutex() sets the waiter bit ++ * unconditionally. We might have to fix that up: ++ */ ++ fixup_rt_mutex_waiters(lock); ++ ++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); ++ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry)); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ debug_rt_mutex_free_waiter(&waiter); ++} ++ ++/* ++ * Slow path to release a rt_mutex spin_lock style ++ */ ++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) ++{ ++ raw_spin_lock(&lock->wait_lock); ++ ++ debug_rt_mutex_unlock(lock); ++ ++ rt_mutex_deadlock_account_unlock(current); ++ ++ if (!rt_mutex_has_waiters(lock)) { ++ lock->owner = NULL; ++ raw_spin_unlock(&lock->wait_lock); ++ return; ++ } ++ ++ wakeup_next_waiter(lock); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ /* Undo pi boosting.when necessary */ ++ rt_mutex_adjust_prio(current); ++} ++ ++void __lockfunc rt_spin_lock(spinlock_t *lock) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); ++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock); ++ ++void __lockfunc __rt_spin_lock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); ++} ++EXPORT_SYMBOL(__rt_spin_lock); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); ++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock_nested); ++#endif ++ ++void __lockfunc rt_spin_unlock(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(rt_spin_unlock); ++ ++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(__rt_spin_unlock); ++ ++/* ++ * Wait for the lock to get unlocked: instead of polling for an unlock ++ * (like raw spinlocks do), we lock and unlock, to force the kernel to ++ * schedule if there's contention: ++ */ ++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ spin_unlock(lock); ++} ++EXPORT_SYMBOL(rt_spin_unlock_wait); ++ ++int __lockfunc rt_spin_trylock(spinlock_t *lock) ++{ ++ int ret; ++ ++ migrate_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock); ++ ++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) ++{ ++ int ret; ++ ++ local_bh_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } else ++ local_bh_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_bh); ++ ++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ migrate_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_irqsave); ++ ++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) ++{ ++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ ++ if (atomic_add_unless(atomic, -1, 1)) ++ return 0; ++ migrate_disable(); ++ rt_spin_lock(lock); ++ if (atomic_dec_and_test(atomic)) ++ return 1; ++ rt_spin_unlock(lock); ++ migrate_enable(); ++ return 0; ++} ++EXPORT_SYMBOL(atomic_dec_and_spin_lock); ++ ++void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); ++ lockdep_init_map(&lock->dep_map, name, key, 0); ++#endif ++} ++EXPORT_SYMBOL(__rt_spin_lock_init); ++ ++#endif /* PREEMPT_RT_FULL */ ++ + /** + * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * @lock: the rt_mutex to take + * @state: the state the task should block in (TASK_INTERRUPTIBLE +- * or TASK_UNINTERRUPTIBLE) ++ * or TASK_UNINTERRUPTIBLE) + * @timeout: the pre-initialized and started timer, or NULL for none + * @waiter: the pre-initialized rt_mutex_waiter + * +@@ -773,9 +1113,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, + struct rt_mutex_waiter waiter; + int ret = 0; + +- debug_rt_mutex_init_waiter(&waiter); +- RB_CLEAR_NODE(&waiter.pi_tree_entry); +- RB_CLEAR_NODE(&waiter.tree_entry); ++ rt_mutex_init_waiter(&waiter, false); + + raw_spin_lock(&lock->wait_lock); + +@@ -1062,7 +1400,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); + void __rt_mutex_init(struct rt_mutex *lock, const char *name) + { + lock->owner = NULL; +- raw_spin_lock_init(&lock->wait_lock); + lock->waiters = RB_ROOT; + lock->waiters_leftmost = NULL; + +@@ -1083,7 +1420,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); + void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner) + { +- __rt_mutex_init(lock, NULL); ++ rt_mutex_init(lock); + debug_rt_mutex_proxy_lock(lock, proxy_owner); + rt_mutex_set_owner(lock, proxy_owner); + rt_mutex_deadlock_account_lock(lock, proxy_owner); +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -49,6 +49,7 @@ struct rt_mutex_waiter { + struct rb_node pi_tree_entry; + struct task_struct *task; + struct rt_mutex *lock; ++ bool savestate; + #ifdef CONFIG_DEBUG_RT_MUTEXES + unsigned long ip; + struct pid *deadlock_task_pid; +@@ -127,4 +128,14 @@ extern int rt_mutex_finish_proxy_lock(st + # include "rtmutex.h" + #endif + ++static inline void ++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) ++{ ++ debug_rt_mutex_init_waiter(waiter); ++ waiter->task = NULL; ++ waiter->savestate = savestate; ++ RB_CLEAR_NODE(&waiter->pi_tree_entry); ++ RB_CLEAR_NODE(&waiter->tree_entry); ++} ++ + #endif diff --git a/debian/patches/features/all/rt/rt-preempt-base-config.patch b/debian/patches/features/all/rt/rt-preempt-base-config.patch new file mode 100644 index 000000000..e59d5f671 --- /dev/null +++ b/debian/patches/features/all/rt/rt-preempt-base-config.patch @@ -0,0 +1,50 @@ +Subject: rt-preempt-base-config.patch +From: Thomas Gleixner +Date: Fri, 17 Jun 2011 12:39:57 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/Kconfig.preempt | 19 +++++++++++++++++-- + 1 file changed, 17 insertions(+), 2 deletions(-) + +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -1,3 +1,10 @@ ++config PREEMPT ++ bool ++ select PREEMPT_COUNT ++ ++config PREEMPT_RT_BASE ++ bool ++ select PREEMPT + + choice + prompt "Preemption Model" +@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY + + Select this if you are building a kernel for a desktop system. + +-config PREEMPT ++config PREEMPT__LL + bool "Preemptible Kernel (Low-Latency Desktop)" +- select PREEMPT_COUNT ++ select PREEMPT + select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK + help + This option reduces the latency of the kernel by making +@@ -52,6 +59,14 @@ config PREEMPT + embedded system with latency requirements in the milliseconds + range. + ++config PREEMPT_RTB ++ bool "Preemptible Kernel (Basic RT)" ++ select PREEMPT_RT_BASE ++ help ++ This option is basically the same as (Low-Latency Desktop) but ++ enables changes which are preliminary for the full preemptible ++ RT kernel. ++ + endchoice + + config PREEMPT_COUNT diff --git a/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch b/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch new file mode 100644 index 000000000..1e5c262f5 --- /dev/null +++ b/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch @@ -0,0 +1,127 @@ +Subject: rt: rwsem/rwlock: lockdep annotations +From: Thomas Gleixner +Date: Fri, 28 Sep 2012 10:49:42 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +rwlocks and rwsems on RT do not allow multiple readers. Annotate the +lockdep acquire functions accordingly. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + kernel/locking/rt.c | 44 +++++++++++++++++++++++++------------------- + 1 file changed, 25 insertions(+), 19 deletions(-) + +--- a/kernel/locking/rt.c ++++ b/kernel/locking/rt.c +@@ -213,17 +213,16 @@ int __lockfunc rt_read_trylock(rwlock_t + */ + if (rt_mutex_owner(lock) != current) { + ret = rt_mutex_trylock(lock); +- if (ret) ++ if (ret) { ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); + migrate_disable(); +- ++ } + } else if (!rwlock->read_depth) { + ret = 0; + } + +- if (ret) { ++ if (ret) + rwlock->read_depth++; +- rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); +- } + + return ret; + } +@@ -241,12 +240,11 @@ void __lockfunc rt_read_lock(rwlock_t *r + { + struct rt_mutex *lock = &rwlock->lock; + +- rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); +- + /* + * recursive read locks succeed when current owns the lock + */ + if (rt_mutex_owner(lock) != current) { ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + __rt_spin_lock(lock); + migrate_disable(); + } +@@ -266,10 +264,9 @@ EXPORT_SYMBOL(rt_write_unlock); + + void __lockfunc rt_read_unlock(rwlock_t *rwlock) + { +- rwlock_release(&rwlock->dep_map, 1, _RET_IP_); +- + /* Release the lock only when read_depth is down to 0 */ + if (--rwlock->read_depth == 0) { ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + __rt_spin_unlock(&rwlock->lock); + migrate_enable(); + } +@@ -319,9 +316,10 @@ EXPORT_SYMBOL(rt_up_write); + + void rt_up_read(struct rw_semaphore *rwsem) + { +- rwsem_release(&rwsem->dep_map, 1, _RET_IP_); +- if (--rwsem->read_depth == 0) ++ if (--rwsem->read_depth == 0) { ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&rwsem->lock); ++ } + } + EXPORT_SYMBOL(rt_up_read); + +@@ -360,6 +358,13 @@ void rt_down_write_nested(struct rw_sem + } + EXPORT_SYMBOL(rt_down_write_nested); + ++void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest) ++{ ++ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++ + int rt_down_read_trylock(struct rw_semaphore *rwsem) + { + struct rt_mutex *lock = &rwsem->lock; +@@ -370,15 +375,16 @@ int rt_down_read_trylock(struct rw_sema + * but not when read_depth == 0 which means that the rwsem is + * write locked. + */ +- if (rt_mutex_owner(lock) != current) ++ if (rt_mutex_owner(lock) != current) { + ret = rt_mutex_trylock(&rwsem->lock); +- else if (!rwsem->read_depth) ++ if (ret) ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ } else if (!rwsem->read_depth) { + ret = 0; ++ } + +- if (ret) { ++ if (ret) + rwsem->read_depth++; +- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); +- } + return ret; + } + EXPORT_SYMBOL(rt_down_read_trylock); +@@ -387,10 +393,10 @@ static void __rt_down_read(struct rw_sem + { + struct rt_mutex *lock = &rwsem->lock; + +- rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); +- +- if (rt_mutex_owner(lock) != current) ++ if (rt_mutex_owner(lock) != current) { ++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); ++ } + rwsem->read_depth++; + } + diff --git a/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch b/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch new file mode 100644 index 000000000..740bf5d13 --- /dev/null +++ b/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch @@ -0,0 +1,39 @@ +Subject: sched: Do not compare cpu masks in scheduler +Date: Tue, 27 Sep 2011 08:40:24 -0400 +From: Peter Zijlstra +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Peter Zijlstra +Cc: Peter Zijlstra +Cc: Clark Williams +Link: http://lkml.kernel.org/r/20110927124423.128129033@goodmis.org +Signed-off-by: Thomas Gleixner + +--- + kernel/sched/core.c | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2668,16 +2668,12 @@ static inline void update_migrate_disabl + */ + mask = tsk_cpus_allowed(p); + +- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); + +- if (!cpumask_equal(&p->cpus_allowed, mask)) { +- if (p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); +- +- /* Let migrate_enable know to fix things back up */ +- p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; +- } ++ /* Let migrate_enable know to fix things back up */ ++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; + } + + void migrate_disable(void) diff --git a/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch b/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch new file mode 100644 index 000000000..48ec23244 --- /dev/null +++ b/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch @@ -0,0 +1,69 @@ +Subject: sched: Have migrate_disable ignore bounded threads +Date: Tue, 27 Sep 2011 08:40:25 -0400 +From: Peter Zijlstra +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Peter Zijlstra +Cc: Peter Zijlstra +Cc: Clark Williams +Link: http://lkml.kernel.org/r/20110927124423.567944215@goodmis.org +Signed-off-by: Thomas Gleixner + +--- + kernel/sched/core.c | 23 +++++++++-------------- + 1 file changed, 9 insertions(+), 14 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2680,7 +2680,7 @@ void migrate_disable(void) + { + struct task_struct *p = current; + +- if (in_atomic()) { ++ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { + #ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; + #endif +@@ -2710,7 +2710,7 @@ void migrate_enable(void) + unsigned long flags; + struct rq *rq; + +- if (in_atomic()) { ++ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { + #ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; + #endif +@@ -2730,26 +2730,21 @@ void migrate_enable(void) + preempt_disable(); + if (unlikely(migrate_disabled_updated(p))) { + /* +- * See comment in update_migrate_disable() about locking. ++ * Undo whatever update_migrate_disable() did, also see there ++ * about locking. + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); +- mask = tsk_cpus_allowed(p); ++ + /* + * Clearing migrate_disable causes tsk_cpus_allowed to + * show the tasks original cpu affinity. + */ + p->migrate_disable = 0; +- +- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); +- +- if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) { +- /* Get the mask now that migration is enabled */ +- mask = tsk_cpus_allowed(p); +- if (p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); +- } ++ mask = tsk_cpus_allowed(p); ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } else + p->migrate_disable = 0; diff --git a/debian/patches/features/all/rt/rt-sched-numa-Move-task_numa_free-to-__put_task_stru.patch b/debian/patches/features/all/rt/rt-sched-numa-Move-task_numa_free-to-__put_task_stru.patch new file mode 100644 index 000000000..0162e2738 --- /dev/null +++ b/debian/patches/features/all/rt/rt-sched-numa-Move-task_numa_free-to-__put_task_stru.patch @@ -0,0 +1,48 @@ +From 66c416e976841ccc4d94048a5cadfc624b203042 Mon Sep 17 00:00:00 2001 +From: Mike Galbraith +Date: Fri, 28 Feb 2014 07:23:11 +0100 +Subject: [PATCH] rt,sched,numa: Move task_numa_free() to __put_task_struct(), + which -rt offloads +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Bad idea: +[ 908.026136] [] rt_spin_lock_slowlock+0xaa/0x2c0 +[ 908.026145] [] task_numa_free+0x31/0x130 +[ 908.026151] [] finish_task_switch+0xce/0x100 +[ 908.026156] [] thread_return+0x48/0x4ae +[ 908.026160] [] schedule+0x25/0xa0 +[ 908.026163] [] rt_spin_lock_slowlock+0xd5/0x2c0 +[ 908.026170] [] get_signal_to_deliver+0xaf/0x680 +[ 908.026175] [] do_signal+0x3d/0x5b0 +[ 908.026179] [] do_notify_resume+0x90/0xe0 +[ 908.026186] [] int_signal+0x12/0x17 +[ 908.026193] [<00007ff2a388b1d0>] 0x7ff2a388b1cf + +Signed-off-by: Mike Galbraith +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/fork.c | 1 + + kernel/sched/core.c | 2 -- + 2 files changed, 1 insertion(+), 2 deletions(-) + +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -237,6 +237,7 @@ void __put_task_struct(struct task_struc + WARN_ON(atomic_read(&tsk->usage)); + WARN_ON(tsk == current); + ++ task_numa_free(tsk); + security_task_free(tsk); + exit_creds(tsk); + delayacct_tsk_free(tsk); +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2149,8 +2149,6 @@ static void finish_task_switch(struct rq + if (mm) + mmdrop(mm); + if (unlikely(prev_state == TASK_DEAD)) { +- task_numa_free(prev); +- + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); + diff --git a/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch b/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch new file mode 100644 index 000000000..3e9cd1ca2 --- /dev/null +++ b/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch @@ -0,0 +1,306 @@ +Subject: sched: Postpone actual migration disalbe to schedule +From: Steven Rostedt +Date: Tue, 27 Sep 2011 08:40:23 -0400 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The migrate_disable() can cause a bit of a overhead to the RT kernel, +as changing the affinity is expensive to do at every lock encountered. +As a running task can not migrate, the actual disabling of migration +does not need to occur until the task is about to schedule out. + +In most cases, a task that disables migration will enable it before +it schedules making this change improve performance tremendously. + +[ Frank Rowand: UP compile fix ] + +Signed-off-by: Steven Rostedt +Cc: Peter Zijlstra +Cc: Clark Williams +Link: http://lkml.kernel.org/r/20110927124422.779693167@goodmis.org +Signed-off-by: Thomas Gleixner + +--- + kernel/sched/core.c | 251 +++++++++++++++++++++++++++------------------------- + 1 file changed, 132 insertions(+), 119 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2640,6 +2640,135 @@ static inline void schedule_debug(struct + schedstat_inc(this_rq(), sched_count); + } + ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) ++#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */ ++#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN) ++#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN) ++ ++static inline void update_migrate_disable(struct task_struct *p) ++{ ++ const struct cpumask *mask; ++ ++ if (likely(!p->migrate_disable)) ++ return; ++ ++ /* Did we already update affinity? */ ++ if (unlikely(migrate_disabled_updated(p))) ++ return; ++ ++ /* ++ * Since this is always current we can get away with only locking ++ * rq->lock, the ->cpus_allowed value can normally only be changed ++ * while holding both p->pi_lock and rq->lock, but seeing that this ++ * is current, we cannot actually be waking up, so all code that ++ * relies on serialization against p->pi_lock is out of scope. ++ * ++ * Having rq->lock serializes us against things like ++ * set_cpus_allowed_ptr() that can still happen concurrently. ++ */ ++ mask = tsk_cpus_allowed(p); ++ ++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ ++ if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); ++ ++ /* Let migrate_enable know to fix things back up */ ++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; ++ } ++} ++ ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ ++ preempt_disable(); ++ if (p->migrate_disable) { ++ p->migrate_disable++; ++ preempt_enable(); ++ return; ++ } ++ ++ pin_current_cpu(); ++ p->migrate_disable = 1; ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ const struct cpumask *mask; ++ unsigned long flags; ++ struct rq *rq; ++ ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ ++ preempt_disable(); ++ if (migrate_disable_count(p) > 1) { ++ p->migrate_disable--; ++ preempt_enable(); ++ return; ++ } ++ ++ if (unlikely(migrate_disabled_updated(p))) { ++ /* ++ * See comment in update_migrate_disable() about locking. ++ */ ++ rq = this_rq(); ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ mask = tsk_cpus_allowed(p); ++ /* ++ * Clearing migrate_disable causes tsk_cpus_allowed to ++ * show the tasks original cpu affinity. ++ */ ++ p->migrate_disable = 0; ++ ++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ ++ if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) { ++ /* Get the mask now that migration is enabled */ ++ mask = tsk_cpus_allowed(p); ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); ++ } ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ } else ++ p->migrate_disable = 0; ++ ++ unpin_current_cpu(); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_enable); ++#else ++static inline void update_migrate_disable(struct task_struct *p) { } ++#define migrate_disabled_updated(p) 0 ++#endif ++ + static void put_prev_task(struct rq *rq, struct task_struct *prev) + { + if (prev->on_rq || rq->skip_clock_update < 0) +@@ -2739,6 +2868,8 @@ static void __sched __schedule(void) + smp_mb__before_spinlock(); + raw_spin_lock_irq(&rq->lock); + ++ update_migrate_disable(prev); ++ + switch_count = &prev->nivcsw; + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { + if (unlikely(signal_pending_state(prev->state, prev))) { +@@ -4622,7 +4753,7 @@ void init_idle(struct task_struct *idle, + #ifdef CONFIG_SMP + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { +- if (!__migrate_disabled(p)) { ++ if (!migrate_disabled_updated(p)) { + if (p->sched_class && p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask); +@@ -4692,124 +4823,6 @@ int set_cpus_allowed_ptr(struct task_str + } + EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); + +-#ifdef CONFIG_PREEMPT_RT_FULL +-void migrate_disable(void) +-{ +- struct task_struct *p = current; +- const struct cpumask *mask; +- unsigned long flags; +- struct rq *rq; +- +- if (in_atomic()) { +-#ifdef CONFIG_SCHED_DEBUG +- p->migrate_disable_atomic++; +-#endif +- return; +- } +- +-#ifdef CONFIG_SCHED_DEBUG +- WARN_ON_ONCE(p->migrate_disable_atomic); +-#endif +- +- preempt_disable(); +- if (p->migrate_disable) { +- p->migrate_disable++; +- preempt_enable(); +- return; +- } +- +- pin_current_cpu(); +- if (unlikely(!scheduler_running)) { +- p->migrate_disable = 1; +- preempt_enable(); +- return; +- } +- +- /* +- * Since this is always current we can get away with only locking +- * rq->lock, the ->cpus_allowed value can normally only be changed +- * while holding both p->pi_lock and rq->lock, but seeing that this +- * it current, we cannot actually be waking up, so all code that +- * relies on serialization against p->pi_lock is out of scope. +- * +- * Taking rq->lock serializes us against things like +- * set_cpus_allowed_ptr() that can still happen concurrently. +- */ +- rq = this_rq(); +- raw_spin_lock_irqsave(&rq->lock, flags); +- p->migrate_disable = 1; +- mask = tsk_cpus_allowed(p); +- +- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); +- +- if (!cpumask_equal(&p->cpus_allowed, mask)) { +- if (p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); +- } +- raw_spin_unlock_irqrestore(&rq->lock, flags); +- preempt_enable(); +-} +-EXPORT_SYMBOL(migrate_disable); +- +-void migrate_enable(void) +-{ +- struct task_struct *p = current; +- const struct cpumask *mask; +- unsigned long flags; +- struct rq *rq; +- +- if (in_atomic()) { +-#ifdef CONFIG_SCHED_DEBUG +- p->migrate_disable_atomic--; +-#endif +- return; +- } +- +-#ifdef CONFIG_SCHED_DEBUG +- WARN_ON_ONCE(p->migrate_disable_atomic); +-#endif +- WARN_ON_ONCE(p->migrate_disable <= 0); +- +- preempt_disable(); +- if (p->migrate_disable > 1) { +- p->migrate_disable--; +- preempt_enable(); +- return; +- } +- +- if (unlikely(!scheduler_running)) { +- p->migrate_disable = 0; +- unpin_current_cpu(); +- preempt_enable(); +- return; +- } +- +- /* +- * See comment in migrate_disable(). +- */ +- rq = this_rq(); +- raw_spin_lock_irqsave(&rq->lock, flags); +- mask = tsk_cpus_allowed(p); +- p->migrate_disable = 0; +- +- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); +- +- if (!cpumask_equal(&p->cpus_allowed, mask)) { +- /* Get the mask now that migration is enabled */ +- mask = tsk_cpus_allowed(p); +- if (p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); +- } +- +- raw_spin_unlock_irqrestore(&rq->lock, flags); +- unpin_current_cpu(); +- preempt_enable(); +-} +-EXPORT_SYMBOL(migrate_enable); +-#endif /* CONFIG_PREEMPT_RT_FULL */ +- + /* + * Move (not current) task off this cpu, onto dest cpu. We're doing + * this because either it can't run here any more (set_cpus_allowed() diff --git a/debian/patches/features/all/rt/rt-serial-warn-fix.patch b/debian/patches/features/all/rt/rt-serial-warn-fix.patch new file mode 100644 index 000000000..3a34d71c5 --- /dev/null +++ b/debian/patches/features/all/rt/rt-serial-warn-fix.patch @@ -0,0 +1,38 @@ +Subject: rt: Improve the serial console PASS_LIMIT +From: Ingo Molnar +Date: Wed Dec 14 13:05:54 CET 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Beyond the warning: + + drivers/tty/serial/8250/8250.c:1613:6: warning: unused variable ‘pass_counter’ [-Wunused-variable] + +the solution of just looping infinitely was ugly - up it to 1 million to +give it a chance to continue in some really ugly situation. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +--- + drivers/tty/serial/8250/8250_core.c | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -80,7 +80,16 @@ static unsigned int skip_txen_test; /* f + #define DEBUG_INTR(fmt...) do { } while (0) + #endif + +-#define PASS_LIMIT 512 ++/* ++ * On -rt we can have a more delays, and legitimately ++ * so - so don't drop work spuriously and spam the ++ * syslog: ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define PASS_LIMIT 1000000 ++#else ++# define PASS_LIMIT 512 ++#endif + + #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + diff --git a/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch b/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch new file mode 100644 index 000000000..455000da3 --- /dev/null +++ b/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch @@ -0,0 +1,46 @@ +Subject: tracing: Show padding as unsigned short +From: Steven Rostedt +Date: Wed, 16 Nov 2011 13:19:35 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +RT added two bytes to trace migrate disable counting to the trace events +and used two bytes of the padding to make the change. The structures and +all were updated correctly, but the display in the event formats was +not: + +cat /debug/tracing/events/sched/sched_switch/format + +name: sched_switch +ID: 51 +format: + field:unsigned short common_type; offset:0; size:2; signed:0; + field:unsigned char common_flags; offset:2; size:1; signed:0; + field:unsigned char common_preempt_count; offset:3; size:1; signed:0; + field:int common_pid; offset:4; size:4; signed:1; + field:unsigned short common_migrate_disable; offset:8; size:2; signed:0; + field:int common_padding; offset:10; size:2; signed:0; + + +The field for common_padding has the correct size and offset, but the +use of "int" might confuse some parsers (and people that are reading +it). This needs to be changed to "unsigned short". + +Signed-off-by: Steven Rostedt +Link: http://lkml.kernel.org/r/1321467575.4181.36.camel@frodo +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner + +--- + kernel/trace/trace_events.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -161,6 +161,7 @@ static int trace_define_common_fields(vo + __common_field(unsigned char, preempt_count); + __common_field(int, pid); + __common_field(unsigned short, migrate_disable); ++ __common_field(unsigned short, padding); + + return ret; + } diff --git a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch new file mode 100644 index 000000000..f124e5210 --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch @@ -0,0 +1,391 @@ +From fbebd5600c1b72bb707ada50d040ad719656a301 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 28 Oct 2013 09:36:37 +0100 +Subject: [PATCH] rtmutex: add a first shot of ww_mutex +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +lockdep says: +| -------------------------------------------------------------------------- +| | Wound/wait tests | +| --------------------- +| ww api failures: ok | ok | ok | +| ww contexts mixing: ok | ok | +| finishing ww context: ok | ok | ok | ok | +| locking mismatches: ok | ok | ok | +| EDEADLK handling: ok | ok | ok | ok | ok | ok | ok | ok | ok | ok | +| spinlock nest unlocked: ok | +| ----------------------------------------------------- +| |block | try |context| +| ----------------------------------------------------- +| context: ok | ok | ok | +| try: ok | ok | ok | +| block: ok | ok | ok | +| spinlock: ok | ok | ok | + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/locking/rtmutex.c | 235 ++++++++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 222 insertions(+), 13 deletions(-) + +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + #include "rtmutex_common.h" + +@@ -1052,6 +1053,39 @@ EXPORT_SYMBOL(__rt_spin_lock_init); + + #endif /* PREEMPT_RT_FULL */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int __sched ++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); ++ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); ++ ++ if (!hold_ctx) ++ return 0; ++ ++ if (unlikely(ctx == hold_ctx)) ++ return -EALREADY; ++ ++ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && ++ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { ++#ifdef CONFIG_DEBUG_MUTEXES ++ DEBUG_LOCKS_WARN_ON(ctx->contending_lock); ++ ctx->contending_lock = ww; ++#endif ++ return -EDEADLK; ++ } ++ ++ return 0; ++} ++#else ++static inline int __sched ++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ BUG(); ++} ++ ++#endif ++ + /** + * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * @lock: the rt_mutex to take +@@ -1065,7 +1099,8 @@ EXPORT_SYMBOL(__rt_spin_lock_init); + static int __sched + __rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- struct rt_mutex_waiter *waiter) ++ struct rt_mutex_waiter *waiter, ++ struct ww_acquire_ctx *ww_ctx) + { + int ret = 0; + +@@ -1088,6 +1123,12 @@ static int __sched + break; + } + ++ if (ww_ctx && ww_ctx->acquired > 0) { ++ ret = __mutex_lock_check_stamp(lock, ww_ctx); ++ if (ret) ++ break; ++ } ++ + raw_spin_unlock(&lock->wait_lock); + + debug_rt_mutex_print_deadlock(waiter); +@@ -1101,13 +1142,89 @@ static int __sched + return ret; + } + ++static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++#ifdef CONFIG_DEBUG_MUTEXES ++ /* ++ * If this WARN_ON triggers, you used ww_mutex_lock to acquire, ++ * but released with a normal mutex_unlock in this call. ++ * ++ * This should never happen, always use ww_mutex_unlock. ++ */ ++ DEBUG_LOCKS_WARN_ON(ww->ctx); ++ ++ /* ++ * Not quite done after calling ww_acquire_done() ? ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); ++ ++ if (ww_ctx->contending_lock) { ++ /* ++ * After -EDEADLK you tried to ++ * acquire a different ww_mutex? Bad! ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); ++ ++ /* ++ * You called ww_mutex_lock after receiving -EDEADLK, ++ * but 'forgot' to unlock everything else first? ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); ++ ww_ctx->contending_lock = NULL; ++ } ++ ++ /* ++ * Naughty, using a different class will lead to undefined behavior! ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); ++#endif ++ ww_ctx->acquired++; ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void ww_mutex_account_lock(struct rt_mutex *lock, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); ++ struct rt_mutex_waiter *waiter, *n; ++ ++ /* ++ * This branch gets optimized out for the common case, ++ * and is only important for ww_mutex_lock. ++ */ ++ ww_mutex_lock_acquired(ww, ww_ctx); ++ ww->ctx = ww_ctx; ++ ++ /* ++ * Give any possible sleeping processes the chance to wake up, ++ * so they can recheck if they have to back off. ++ */ ++ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters, ++ tree_entry) { ++ /* XXX debug rt mutex waiter wakeup */ ++ ++ BUG_ON(waiter->lock != lock); ++ rt_mutex_wake_waiter(waiter); ++ } ++} ++ ++#else ++ ++static void ww_mutex_account_lock(struct rt_mutex *lock, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ BUG(); ++} ++#endif ++ + /* + * Slow path lock function: + */ + static int __sched + rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- int detect_deadlock) ++ int detect_deadlock, struct ww_acquire_ctx *ww_ctx) + { + struct rt_mutex_waiter waiter; + int ret = 0; +@@ -1118,6 +1235,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock, current, NULL)) { ++ if (ww_ctx) ++ ww_mutex_account_lock(lock, ww_ctx); + raw_spin_unlock(&lock->wait_lock); + return 0; + } +@@ -1134,12 +1253,14 @@ rt_mutex_slowlock(struct rt_mutex *lock, + ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); + + if (likely(!ret)) +- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); ++ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ww_ctx); + + set_current_state(TASK_RUNNING); + + if (unlikely(ret)) + remove_waiter(lock, &waiter); ++ else if (ww_ctx) ++ ww_mutex_account_lock(lock, ww_ctx); + + /* + * try_to_take_rt_mutex() sets the waiter bit +@@ -1217,30 +1338,33 @@ rt_mutex_slowunlock(struct rt_mutex *loc + */ + static inline int + rt_mutex_fastlock(struct rt_mutex *lock, int state, +- int detect_deadlock, ++ int detect_deadlock, struct ww_acquire_ctx *ww_ctx, + int (*slowfn)(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- int detect_deadlock)) ++ int detect_deadlock, ++ struct ww_acquire_ctx *ww_ctx)) + { + if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { + rt_mutex_deadlock_account_lock(lock, current); + return 0; + } else +- return slowfn(lock, state, NULL, detect_deadlock); ++ return slowfn(lock, state, NULL, detect_deadlock, ww_ctx); + } + + static inline int + rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, int detect_deadlock, ++ struct ww_acquire_ctx *ww_ctx, + int (*slowfn)(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- int detect_deadlock)) ++ int detect_deadlock, ++ struct ww_acquire_ctx *ww_ctx)) + { + if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { + rt_mutex_deadlock_account_lock(lock, current); + return 0; + } else +- return slowfn(lock, state, timeout, detect_deadlock); ++ return slowfn(lock, state, timeout, detect_deadlock, ww_ctx); + } + + static inline int +@@ -1273,7 +1397,7 @@ void __sched rt_mutex_lock(struct rt_mut + { + might_sleep(); + +- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); ++ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, rt_mutex_slowlock); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock); + +@@ -1294,7 +1418,7 @@ int __sched rt_mutex_lock_interruptible( + might_sleep(); + + return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, +- detect_deadlock, rt_mutex_slowlock); ++ detect_deadlock, NULL, rt_mutex_slowlock); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + +@@ -1315,7 +1439,7 @@ int __sched rt_mutex_lock_killable(struc + might_sleep(); + + return rt_mutex_fastlock(lock, TASK_KILLABLE, +- detect_deadlock, rt_mutex_slowlock); ++ detect_deadlock, NULL, rt_mutex_slowlock); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); + +@@ -1341,7 +1465,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc + might_sleep(); + + return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, +- detect_deadlock, rt_mutex_slowlock); ++ detect_deadlock, NULL, rt_mutex_slowlock); + } + EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); + +@@ -1566,7 +1690,7 @@ int rt_mutex_finish_proxy_lock(struct rt + + set_current_state(TASK_INTERRUPTIBLE); + +- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); ++ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); + + set_current_state(TASK_RUNNING); + +@@ -1583,3 +1707,88 @@ int rt_mutex_finish_proxy_lock(struct rt + + return ret; + } ++ ++static inline int ++ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH ++ unsigned tmp; ++ ++ if (ctx->deadlock_inject_countdown-- == 0) { ++ tmp = ctx->deadlock_inject_interval; ++ if (tmp > UINT_MAX/4) ++ tmp = UINT_MAX; ++ else ++ tmp = tmp*2 + tmp + tmp/2; ++ ++ ctx->deadlock_inject_interval = tmp; ++ ctx->deadlock_inject_countdown = tmp; ++ ctx->contending_lock = lock; ++ ++ ww_mutex_unlock(lock); ++ ++ return -EDEADLK; ++ } ++#endif ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++int __sched ++__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) ++{ ++ int ret; ++ ++ might_sleep(); ++ ++ mutex_acquire(&lock->base.dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); ++ if (ret) ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ else if (!ret && ww_ctx->acquired > 1) ++ return ww_mutex_deadlock_injection(lock, ww_ctx); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); ++ ++int __sched ++__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) ++{ ++ int ret; ++ ++ might_sleep(); ++ ++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, ++ _RET_IP_); ++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); ++ if (ret) ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ else if (!ret && ww_ctx->acquired > 1) ++ return ww_mutex_deadlock_injection(lock, ww_ctx); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(__ww_mutex_lock); ++ ++void __sched ww_mutex_unlock(struct ww_mutex *lock) ++{ ++ /* ++ * The unlocking fastpath is the 0->1 transition from 'locked' ++ * into 'unlocked' state: ++ */ ++ if (lock->ctx) { ++#ifdef CONFIG_DEBUG_MUTEXES ++ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); ++#endif ++ if (lock->ctx->acquired > 0) ++ lock->ctx->acquired--; ++ lock->ctx = NULL; ++ } ++ ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&lock->base.lock); ++} ++EXPORT_SYMBOL(ww_mutex_unlock); ++#endif diff --git a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch new file mode 100644 index 000000000..09e3ad2c0 --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch @@ -0,0 +1,21 @@ +Subject: rtmutex-avoid-include-hell.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 20:06:39 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rtmutex.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/include/linux/rtmutex.h ++++ b/include/linux/rtmutex.h +@@ -14,7 +14,7 @@ + + #include + #include +-#include ++#include + + extern int max_lock_depth; /* for sysctl */ + diff --git a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch new file mode 100644 index 000000000..67b09ef2e --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch @@ -0,0 +1,215 @@ +Subject: rtmutex-futex-prepare-rt.patch +From: Thomas Gleixner +Date: Fri, 10 Jun 2011 11:04:15 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/futex.c | 77 ++++++++++++++++++++++++++++++++-------- + kernel/locking/rtmutex.c | 30 +++++++++++++-- + kernel/locking/rtmutex_common.h | 2 + + 3 files changed, 90 insertions(+), 19 deletions(-) + +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -1579,6 +1579,16 @@ static int futex_requeue(u32 __user *uad + requeue_pi_wake_futex(this, &key2, hb2); + drop_count++; + continue; ++ } else if (ret == -EAGAIN) { ++ /* ++ * Waiter was woken by timeout or ++ * signal and has set pi_blocked_on to ++ * PI_WAKEUP_INPROGRESS before we ++ * tried to enqueue it on the rtmutex. ++ */ ++ this->pi_state = NULL; ++ free_pi_state(pi_state); ++ continue; + } else if (ret) { + /* -EDEADLK */ + this->pi_state = NULL; +@@ -2432,7 +2442,7 @@ static int futex_wait_requeue_pi(u32 __u + struct hrtimer_sleeper timeout, *to = NULL; + struct rt_mutex_waiter rt_waiter; + struct rt_mutex *pi_mutex = NULL; +- struct futex_hash_bucket *hb; ++ struct futex_hash_bucket *hb, *hb2; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; + int res, ret; +@@ -2481,20 +2491,55 @@ static int futex_wait_requeue_pi(u32 __u + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ + futex_wait_queue_me(hb, &q, to); + +- spin_lock(&hb->lock); +- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); +- spin_unlock(&hb->lock); +- if (ret) +- goto out_put_keys; ++ /* ++ * On RT we must avoid races with requeue and trying to block ++ * on two mutexes (hb->lock and uaddr2's rtmutex) by ++ * serializing access to pi_blocked_on with pi_lock. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ if (current->pi_blocked_on) { ++ /* ++ * We have been requeued or are in the process of ++ * being requeued. ++ */ ++ raw_spin_unlock_irq(¤t->pi_lock); ++ } else { ++ /* ++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS ++ * prevents a concurrent requeue from moving us to the ++ * uaddr2 rtmutex. After that we can safely acquire ++ * (and possibly block on) hb->lock. ++ */ ++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ spin_lock(&hb->lock); ++ ++ /* ++ * Clean up pi_blocked_on. We might leak it otherwise ++ * when we succeeded with the hb->lock in the fast ++ * path. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ current->pi_blocked_on = NULL; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); ++ spin_unlock(&hb->lock); ++ if (ret) ++ goto out_put_keys; ++ } + + /* +- * In order for us to be here, we know our q.key == key2, and since +- * we took the hb->lock above, we also know that futex_requeue() has +- * completed and we no longer have to concern ourselves with a wakeup +- * race with the atomic proxy lock acquisition by the requeue code. The +- * futex_requeue dropped our key1 reference and incremented our key2 +- * reference count. ++ * In order to be here, we have either been requeued, are in ++ * the process of being requeued, or requeue successfully ++ * acquired uaddr2 on our behalf. If pi_blocked_on was ++ * non-null above, we may be racing with a requeue. Do not ++ * rely on q->lock_ptr to be hb2->lock until after blocking on ++ * hb->lock or hb2->lock. The futex_requeue dropped our key1 ++ * reference and incremented our key2 reference count. + */ ++ hb2 = hash_futex(&key2); + + /* Check if the requeue code acquired the second futex for us. */ + if (!q.rt_waiter) { +@@ -2503,9 +2548,10 @@ static int futex_wait_requeue_pi(u32 __u + * did a lock-steal - fix up the PI-state in that case. + */ + if (q.pi_state && (q.pi_state->owner != current)) { +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); +- spin_unlock(q.lock_ptr); ++ spin_unlock(&hb2->lock); + } + } else { + /* +@@ -2518,7 +2564,8 @@ static int futex_wait_requeue_pi(u32 __u + ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); + debug_rt_mutex_free_waiter(&rt_waiter); + +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -69,6 +69,11 @@ static void fixup_rt_mutex_waiters(struc + clear_rt_mutex_waiters(lock); + } + ++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) ++{ ++ return waiter && waiter != PI_WAKEUP_INPROGRESS; ++} ++ + /* + * We can speed up the acquire/release, if the architecture + * supports cmpxchg and if there's no debugging state to be set up +@@ -328,7 +333,7 @@ static int rt_mutex_adjust_prio_chain(st + * reached or the state of the chain has changed while we + * dropped the locks. + */ +- if (!waiter) ++ if (!rt_mutex_real_waiter(waiter)) + goto out_unlock_pi; + + /* +@@ -528,6 +533,23 @@ static int task_blocks_on_rt_mutex(struc + int chain_walk = 0, res; + + raw_spin_lock_irqsave(&task->pi_lock, flags); ++ ++ /* ++ * In the case of futex requeue PI, this will be a proxy ++ * lock. The task will wake unaware that it is enqueueed on ++ * this lock. Avoid blocking on two locks and corrupting ++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS ++ * flag. futex_wait_requeue_pi() sets this when it wakes up ++ * before requeue (due to a signal or timeout). Do not enqueue ++ * the task if PI_WAKEUP_INPROGRESS is set. ++ */ ++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ return -EAGAIN; ++ } ++ ++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); ++ + __rt_mutex_adjust_prio(task); + waiter->task = task; + waiter->lock = lock; +@@ -551,7 +573,7 @@ static int task_blocks_on_rt_mutex(struc + rt_mutex_enqueue_pi(owner, waiter); + + __rt_mutex_adjust_prio(owner); +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); + } +@@ -645,7 +667,7 @@ static void remove_waiter(struct rt_mute + } + __rt_mutex_adjust_prio(owner); + +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); +@@ -677,7 +699,7 @@ void rt_mutex_adjust_pi(struct task_stru + raw_spin_lock_irqsave(&task->pi_lock, flags); + + waiter = task->pi_blocked_on; +- if (!waiter || (waiter->prio == task->prio && ++ if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio && + !dl_prio(task->prio))) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return; +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -104,6 +104,8 @@ static inline struct task_struct *rt_mut + /* + * PI-futex support (proxy locking functions, etc.): + */ ++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++ + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); diff --git a/debian/patches/features/all/rt/rtmutex-lock-killable.patch b/debian/patches/features/all/rt/rtmutex-lock-killable.patch new file mode 100644 index 000000000..8f7671e71 --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-lock-killable.patch @@ -0,0 +1,81 @@ +Subject: rtmutex-lock-killable.patch +From: Thomas Gleixner +Date: Thu, 09 Jun 2011 11:43:52 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rtmutex.h | 1 + + kernel/locking/rtmutex.c | 33 +++++++++++++++++++++++++++------ + 2 files changed, 28 insertions(+), 6 deletions(-) + +--- a/include/linux/rtmutex.h ++++ b/include/linux/rtmutex.h +@@ -92,6 +92,7 @@ extern void rt_mutex_destroy(struct rt_m + extern void rt_mutex_lock(struct rt_mutex *lock); + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, + int detect_deadlock); ++extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock); + extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout, + int detect_deadlock); +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -943,12 +943,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); + /** + * rt_mutex_lock_interruptible - lock a rt_mutex interruptible + * +- * @lock: the rt_mutex to be locked ++ * @lock: the rt_mutex to be locked + * @detect_deadlock: deadlock detection on/off + * + * Returns: +- * 0 on success +- * -EINTR when interrupted by a signal ++ * 0 on success ++ * -EINTR when interrupted by a signal + * -EDEADLK when the lock would deadlock (when deadlock detection is on) + */ + int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, +@@ -962,17 +962,38 @@ int __sched rt_mutex_lock_interruptible( + EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + + /** ++ * rt_mutex_lock_killable - lock a rt_mutex killable ++ * ++ * @lock: the rt_mutex to be locked ++ * @detect_deadlock: deadlock detection on/off ++ * ++ * Returns: ++ * 0 on success ++ * -EINTR when interrupted by a signal ++ * -EDEADLK when the lock would deadlock (when deadlock detection is on) ++ */ ++int __sched rt_mutex_lock_killable(struct rt_mutex *lock, ++ int detect_deadlock) ++{ ++ might_sleep(); ++ ++ return rt_mutex_fastlock(lock, TASK_KILLABLE, ++ detect_deadlock, rt_mutex_slowlock); ++} ++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); ++ ++/** + * rt_mutex_timed_lock - lock a rt_mutex interruptible + * the timeout structure is provided + * by the caller + * +- * @lock: the rt_mutex to be locked ++ * @lock: the rt_mutex to be locked + * @timeout: timeout structure or NULL (no timeout) + * @detect_deadlock: deadlock detection on/off + * + * Returns: +- * 0 on success +- * -EINTR when interrupted by a signal ++ * 0 on success ++ * -EINTR when interrupted by a signal + * -ETIMEDOUT when the timeout expired + * -EDEADLK when the lock would deadlock (when deadlock detection is on) + */ diff --git a/debian/patches/features/all/rt/rtmutex-use-a-trylock-for-waiter-lock-in-trylock.patch b/debian/patches/features/all/rt/rtmutex-use-a-trylock-for-waiter-lock-in-trylock.patch new file mode 100644 index 000000000..5c38422fc --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-use-a-trylock-for-waiter-lock-in-trylock.patch @@ -0,0 +1,125 @@ +From c290567d076d3b59d30c6778bc3f2e401d8289fe Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 15 Nov 2013 15:46:50 +0100 +Subject: [PATCH] rtmutex: use a trylock for waiter lock in trylock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Mike Galbraith captered the following: +| >#11 [ffff88017b243e90] _raw_spin_lock at ffffffff815d2596 +| >#12 [ffff88017b243e90] rt_mutex_trylock at ffffffff815d15be +| >#13 [ffff88017b243eb0] get_next_timer_interrupt at ffffffff81063b42 +| >#14 [ffff88017b243f00] tick_nohz_stop_sched_tick at ffffffff810bd1fd +| >#15 [ffff88017b243f70] tick_nohz_irq_exit at ffffffff810bd7d2 +| >#16 [ffff88017b243f90] irq_exit at ffffffff8105b02d +| >#17 [ffff88017b243fb0] reschedule_interrupt at ffffffff815db3dd +| >--- --- +| >#18 [ffff88017a2a9bc8] reschedule_interrupt at ffffffff815db3dd +| > [exception RIP: task_blocks_on_rt_mutex+51] +| >#19 [ffff88017a2a9ce0] rt_spin_lock_slowlock at ffffffff815d183c +| >#20 [ffff88017a2a9da0] lock_timer_base.isra.35 at ffffffff81061cbf +| >#21 [ffff88017a2a9dd0] schedule_timeout at ffffffff815cf1ce +| >#22 [ffff88017a2a9e50] rcu_gp_kthread at ffffffff810f9bbb +| >#23 [ffff88017a2a9ed0] kthread at ffffffff810796d5 +| >#24 [ffff88017a2a9f50] ret_from_fork at ffffffff815da04c + +lock_timer_base() does a try_lock() which deadlocks on the waiter lock +not the lock itself. +This patch takes the waiter_lock with trylock so it should work from interrupt +context as well. If the fastpath doesn't work and the waiter_lock itself is +taken then it seems that the lock itself taken. +This patch also adds "rt_spin_unlock_after_trylock_in_irq" to keep lockdep +happy. If we managed to take the wait_lock in the first place we should also +be able to take it in the unlock path. + +Cc: stable-rt@vger.kernel.org +Reported-by: Mike Galbraith +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/spinlock_rt.h | 1 + + kernel/locking/rtmutex.c | 31 +++++++++++++++++++++++++++---- + kernel/timer.c | 2 +- + 3 files changed, 29 insertions(+), 5 deletions(-) + +--- a/include/linux/spinlock_rt.h ++++ b/include/linux/spinlock_rt.h +@@ -22,6 +22,7 @@ extern void __lockfunc rt_spin_lock(spin + extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); + extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); + extern void __lockfunc rt_spin_unlock(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock); + extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); + extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); + extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -909,10 +909,8 @@ static void noinline __sched rt_spin_lo + /* + * Slow path to release a rt_mutex spin_lock style + */ +-static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) ++static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock) + { +- raw_spin_lock(&lock->wait_lock); +- + debug_rt_mutex_unlock(lock); + + rt_mutex_deadlock_account_unlock(current); +@@ -931,6 +929,23 @@ static void noinline __sched rt_spin_lo + rt_mutex_adjust_prio(current); + } + ++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) ++{ ++ raw_spin_lock(&lock->wait_lock); ++ __rt_spin_lock_slowunlock(lock); ++} ++ ++static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock) ++{ ++ int ret; ++ ++ do { ++ ret = raw_spin_trylock(&lock->wait_lock); ++ } while (!ret); ++ ++ __rt_spin_lock_slowunlock(lock); ++} ++ + void __lockfunc rt_spin_lock(spinlock_t *lock) + { + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); +@@ -961,6 +976,13 @@ void __lockfunc rt_spin_unlock(spinlock_ + } + EXPORT_SYMBOL(rt_spin_unlock); + ++void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq); ++} ++ + void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) + { + rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); +@@ -1283,7 +1305,8 @@ rt_mutex_slowtrylock(struct rt_mutex *lo + { + int ret = 0; + +- raw_spin_lock(&lock->wait_lock); ++ if (!raw_spin_trylock(&lock->wait_lock)) ++ return ret; + + if (likely(rt_mutex_owner(lock) != current)) { + +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1400,7 +1400,7 @@ unsigned long get_next_timer_interrupt(u + expires = base->next_timer; + } + #ifdef CONFIG_PREEMPT_RT_FULL +- rt_spin_unlock(&base->lock); ++ rt_spin_unlock_after_trylock_in_irq(&base->lock); + #else + spin_unlock(&base->lock); + #endif diff --git a/debian/patches/features/all/rt/rtmutex-ww-bad-return-value-in-__mutex_lock_check_stamp.patch b/debian/patches/features/all/rt/rtmutex-ww-bad-return-value-in-__mutex_lock_check_stamp.patch new file mode 100644 index 000000000..e1b89ac9c --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-ww-bad-return-value-in-__mutex_lock_check_stamp.patch @@ -0,0 +1,28 @@ +From 9fe76c849612387dd1991acfcc7dc9527b390caf Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Sun, 15 Dec 2013 15:40:47 +0100 +Subject: [PATCH] bad return value in __mutex_lock_check_stamp +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Bad return value in _mutex_lock_check_stamp - this problem only would show +up with 3.12.1 rt4 applied but CONFIG_PREEMPT_RT_FULL not enabled +currently it would be returning what ever vprintk_emit ended up with +(atleast on x86), which probably is not the intended behavior. Added a +return 0; as in the case with CONFIG_PREEMPT_RT_FULL enabled. + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/locking/rtmutex.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1082,6 +1082,7 @@ static inline int __sched + __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) + { + BUG(); ++ return 0; + } + + #endif diff --git a/debian/patches/features/all/rt/rwsem-add-rt-variant.patch b/debian/patches/features/all/rt/rwsem-add-rt-variant.patch new file mode 100644 index 000000000..1ef7aa959 --- /dev/null +++ b/debian/patches/features/all/rt/rwsem-add-rt-variant.patch @@ -0,0 +1,180 @@ +Subject: rwsem-add-rt-variant.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 21:02:53 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rwsem.h | 6 ++ + include/linux/rwsem_rt.h | 134 +++++++++++++++++++++++++++++++++++++++++++++++ + kernel/locking/Makefile | 2 + 3 files changed, 142 insertions(+) + +--- a/include/linux/rwsem.h ++++ b/include/linux/rwsem.h +@@ -16,6 +16,10 @@ + + #include + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#include ++#else /* PREEMPT_RT_FULL */ ++ + struct rw_semaphore; + + #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +@@ -160,4 +164,6 @@ extern void up_read_non_owner(struct rw_ + # define up_read_non_owner(sem) up_read(sem) + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* _LINUX_RWSEM_H */ +--- /dev/null ++++ b/include/linux/rwsem_rt.h +@@ -0,0 +1,134 @@ ++#ifndef _LINUX_RWSEM_RT_H ++#define _LINUX_RWSEM_RT_H ++ ++#ifndef _LINUX_RWSEM_H ++#error "Include rwsem.h" ++#endif ++ ++/* ++ * RW-semaphores are a spinlock plus a reader-depth count. ++ * ++ * Note that the semantics are different from the usual ++ * Linux rw-sems, in PREEMPT_RT mode we do not allow ++ * multiple readers to hold the lock at once, we only allow ++ * a read-lock owner to read-lock recursively. This is ++ * better for latency, makes the implementation inherently ++ * fair and makes it simpler as well. ++ */ ++ ++#include ++ ++struct rw_semaphore { ++ struct rt_mutex lock; ++ int read_depth; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __RWSEM_INITIALIZER(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DECLARE_RWSEM(lockname) \ ++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) ++ ++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key); ++ ++#define __rt_init_rwsem(sem, name, key) \ ++ do { \ ++ rt_mutex_init(&(sem)->lock); \ ++ __rt_rwsem_init((sem), (name), (key));\ ++ } while (0) ++ ++#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) ++ ++# define rt_init_rwsem(sem) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __rt_init_rwsem((sem), #sem, &__key); \ ++} while (0) ++ ++extern void rt_down_write(struct rw_semaphore *rwsem); ++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest); ++extern void rt_down_read(struct rw_semaphore *rwsem); ++extern int rt_down_write_trylock(struct rw_semaphore *rwsem); ++extern int rt_down_read_trylock(struct rw_semaphore *rwsem); ++extern void rt_up_read(struct rw_semaphore *rwsem); ++extern void rt_up_write(struct rw_semaphore *rwsem); ++extern void rt_downgrade_write(struct rw_semaphore *rwsem); ++ ++#define init_rwsem(sem) rt_init_rwsem(sem) ++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) ++ ++static inline int rwsem_is_contended(struct rw_semaphore *sem) ++{ ++ /* rt_mutex_has_waiters() */ ++ return !RB_EMPTY_ROOT(&sem->lock.waiters); ++} ++ ++static inline void down_read(struct rw_semaphore *sem) ++{ ++ rt_down_read(sem); ++} ++ ++static inline int down_read_trylock(struct rw_semaphore *sem) ++{ ++ return rt_down_read_trylock(sem); ++} ++ ++static inline void down_write(struct rw_semaphore *sem) ++{ ++ rt_down_write(sem); ++} ++ ++static inline int down_write_trylock(struct rw_semaphore *sem) ++{ ++ return rt_down_write_trylock(sem); ++} ++ ++static inline void up_read(struct rw_semaphore *sem) ++{ ++ rt_up_read(sem); ++} ++ ++static inline void up_write(struct rw_semaphore *sem) ++{ ++ rt_up_write(sem); ++} ++ ++static inline void downgrade_write(struct rw_semaphore *sem) ++{ ++ rt_downgrade_write(sem); ++} ++ ++static inline void down_read_nested(struct rw_semaphore *sem, int subclass) ++{ ++ return rt_down_read_nested(sem, subclass); ++} ++ ++static inline void down_write_nested(struct rw_semaphore *sem, int subclass) ++{ ++ rt_down_write_nested(sem, subclass); ++} ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) ++{ ++ rt_down_write_nested_lock(sem, &nest_lock->dep_map); ++} ++ ++#else ++ ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) ++{ ++ rt_down_write_nested_lock(sem, NULL); ++} ++#endif ++#endif +--- a/kernel/locking/Makefile ++++ b/kernel/locking/Makefile +@@ -20,6 +20,8 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmute + obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) + obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o + obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o ++endif + obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o diff --git a/debian/patches/features/all/rt/sched-Add-better-debug-output-for-might_sleep.patch b/debian/patches/features/all/rt/sched-Add-better-debug-output-for-might_sleep.patch new file mode 100644 index 000000000..507be726e --- /dev/null +++ b/debian/patches/features/all/rt/sched-Add-better-debug-output-for-might_sleep.patch @@ -0,0 +1,77 @@ +From 8f47b1871b8aac98f1a9d93bc3467fb97b65199a Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 7 Feb 2014 20:58:39 +0100 +Subject: [PATCH 3/6] sched: Add better debug output for might_sleep() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +might_sleep() can tell us where interrupts have been disabled, but we +have no idea what disabled preemption. Add some debug infrastructure. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1391803122-4425-4-git-send-email-bigeasy@linutronix.de +Signed-off-by: Ingo Molnar +--- + include/linux/sched.h | 3 +++ + kernel/sched/core.c | 23 +++++++++++++++++++++-- + 2 files changed, 24 insertions(+), 2 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1460,6 +1460,9 @@ struct task_struct { + struct mutex perf_event_mutex; + struct list_head perf_event_list; + #endif ++#ifdef CONFIG_DEBUG_PREEMPT ++ unsigned long preempt_disable_ip; ++#endif + #ifdef CONFIG_NUMA + struct mempolicy *mempolicy; /* Protected by alloc_lock */ + short il_next; +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2508,8 +2508,13 @@ void __kprobes preempt_count_add(int val + DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= + PREEMPT_MASK - 10); + #endif +- if (preempt_count() == val) +- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++ if (preempt_count() == val) { ++ unsigned long ip = get_parent_ip(CALLER_ADDR1); ++#ifdef CONFIG_DEBUG_PREEMPT ++ current->preempt_disable_ip = ip; ++#endif ++ trace_preempt_off(CALLER_ADDR0, ip); ++ } + } + EXPORT_SYMBOL(preempt_count_add); + +@@ -2552,6 +2557,13 @@ static noinline void __schedule_bug(stru + print_modules(); + if (irqs_disabled()) + print_irqtrace_events(prev); ++#ifdef CONFIG_DEBUG_PREEMPT ++ if (in_atomic_preempt_off()) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(current->preempt_disable_ip); ++ pr_cont("\n"); ++ } ++#endif + dump_stack(); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); + } +@@ -6965,6 +6977,13 @@ void __might_sleep(const char *file, int + debug_show_held_locks(current); + if (irqs_disabled()) + print_irqtrace_events(current); ++#ifdef CONFIG_DEBUG_PREEMPT ++ if (!preempt_count_equals(preempt_offset)) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(current->preempt_disable_ip); ++ pr_cont("\n"); ++ } ++#endif + dump_stack(); + } + EXPORT_SYMBOL(__might_sleep); diff --git a/debian/patches/features/all/rt/sched-Adjust-p-sched_reset_on_fork-when-nothing-else.patch b/debian/patches/features/all/rt/sched-Adjust-p-sched_reset_on_fork-when-nothing-else.patch new file mode 100644 index 000000000..0433ccf0d --- /dev/null +++ b/debian/patches/features/all/rt/sched-Adjust-p-sched_reset_on_fork-when-nothing-else.patch @@ -0,0 +1,30 @@ +From d6b1e9119787fd2e31dcf0f0ce90b71197604206 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 7 Feb 2014 20:58:40 +0100 +Subject: [PATCH 4/6] sched: Adjust p->sched_reset_on_fork when nothing else + changes +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +If the policy and priority remain unchanged a possible modification of +p->sched_reset_on_fork gets lost in the early exit path. + +Signed-off-by: Thomas Gleixner +[ Rebase ontop of v3.14-rc1. ] +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1391803122-4425-5-git-send-email-bigeasy@linutronix.de +Signed-off-by: Ingo Molnar +--- + kernel/sched/core.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3409,6 +3409,7 @@ static int __sched_setscheduler(struct t + if (dl_policy(policy)) + goto change; + ++ p->sched_reset_on_fork = reset_on_fork; + task_rq_unlock(rq, p, &flags); + return 0; + } diff --git a/debian/patches/features/all/rt/sched-Check-for-idle-task-in-might_sleep.patch b/debian/patches/features/all/rt/sched-Check-for-idle-task-in-might_sleep.patch new file mode 100644 index 000000000..55b26a1c4 --- /dev/null +++ b/debian/patches/features/all/rt/sched-Check-for-idle-task-in-might_sleep.patch @@ -0,0 +1,29 @@ +From db273be2a7d42f92b3471e0f717982928214a650 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 7 Feb 2014 20:58:38 +0100 +Subject: [PATCH 2/6] sched: Check for idle task in might_sleep() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Idle is not allowed to call sleeping functions ever! + +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1391803122-4425-3-git-send-email-bigeasy@linutronix.de +Signed-off-by: Ingo Molnar +--- + kernel/sched/core.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -6946,7 +6946,8 @@ void __might_sleep(const char *file, int + static unsigned long prev_jiffy; /* ratelimiting */ + + rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ +- if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && ++ !is_idle_task(current)) || + system_state != SYSTEM_RUNNING || oops_in_progress) + return; + if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) diff --git a/debian/patches/features/all/rt/sched-Consider-pi-boosting-in-setscheduler.patch b/debian/patches/features/all/rt/sched-Consider-pi-boosting-in-setscheduler.patch new file mode 100644 index 000000000..3e69001b6 --- /dev/null +++ b/debian/patches/features/all/rt/sched-Consider-pi-boosting-in-setscheduler.patch @@ -0,0 +1,168 @@ +From c365c292d05908c6ea6f32708f331e21033fe71d Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 7 Feb 2014 20:58:42 +0100 +Subject: [PATCH 6/6] sched: Consider pi boosting in setscheduler() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +If a PI boosted task policy/priority is modified by a setscheduler() +call we unconditionally dequeue and requeue the task if it is on the +runqueue even if the new priority is lower than the current effective +boosted priority. This can result in undesired reordering of the +priority bucket list. + +If the new priority is less or equal than the current effective we +just store the new parameters in the task struct and leave the +scheduler class and the runqueue untouched. This is handled when the +task deboosts itself. Only if the new priority is higher than the +effective boosted priority we apply the change immediately. + +Signed-off-by: Thomas Gleixner +[ Rebase ontop of v3.14-rc1. ] +Signed-off-by: Sebastian Andrzej Siewior +Cc: Dario Faggioli +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1391803122-4425-7-git-send-email-bigeasy@linutronix.de +Signed-off-by: Ingo Molnar +--- + include/linux/sched/rt.h | 7 +++++++ + kernel/locking/rtmutex.c | 12 ++++++++++++ + kernel/sched/core.c | 41 ++++++++++++++++++++++++++++++----------- + 3 files changed, 49 insertions(+), 11 deletions(-) + +--- a/include/linux/sched/rt.h ++++ b/include/linux/sched/rt.h +@@ -35,6 +35,7 @@ static inline int rt_task(struct task_st + #ifdef CONFIG_RT_MUTEXES + extern int rt_mutex_getprio(struct task_struct *p); + extern void rt_mutex_setprio(struct task_struct *p, int prio); ++extern int rt_mutex_check_prio(struct task_struct *task, int newprio); + extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); + extern void rt_mutex_adjust_pi(struct task_struct *p); + static inline bool tsk_is_pi_blocked(struct task_struct *tsk) +@@ -46,6 +47,12 @@ static inline int rt_mutex_getprio(struc + { + return p->normal_prio; + } ++ ++static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) ++{ ++ return 0; ++} ++ + static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) + { + return NULL; +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -213,6 +213,18 @@ struct task_struct *rt_mutex_get_top_tas + } + + /* ++ * Called by sched_setscheduler() to check whether the priority change ++ * is overruled by a possible priority boosting. ++ */ ++int rt_mutex_check_prio(struct task_struct *task, int newprio) ++{ ++ if (!task_has_pi_waiters(task)) ++ return 0; ++ ++ return task_top_pi_waiter(task)->task->prio <= newprio; ++} ++ ++/* + * Adjust the priority of a task, after its pi_waiters got modified. + * + * This can be both boosting and unboosting. task->pi_lock must be held. +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2918,7 +2918,8 @@ EXPORT_SYMBOL(sleep_on_timeout); + * This function changes the 'effective' priority of a task. It does + * not touch ->normal_prio like __setscheduler(). + * +- * Used by the rt_mutex code to implement priority inheritance logic. ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. + */ + void rt_mutex_setprio(struct task_struct *p, int prio) + { +@@ -3199,9 +3200,8 @@ static void + dl_se->dl_new = 1; + } + +-/* Actually do priority change: must hold pi & rq lock. */ +-static void __setscheduler(struct rq *rq, struct task_struct *p, +- const struct sched_attr *attr) ++static void __setscheduler_params(struct task_struct *p, ++ const struct sched_attr *attr) + { + int policy = attr->sched_policy; + +@@ -3221,9 +3221,14 @@ static void __setscheduler(struct rq *rq + * getparam()/getattr() don't report silly values for !rt tasks. + */ + p->rt_priority = attr->sched_priority; ++ set_load_weight(p); ++} + +- p->normal_prio = normal_prio(p); +- p->prio = rt_mutex_getprio(p); ++/* Actually do priority change: must hold pi & rq lock. */ ++static void __setscheduler(struct rq *rq, struct task_struct *p, ++ const struct sched_attr *attr) ++{ ++ __setscheduler_params(p, attr); + + if (dl_prio(p->prio)) + p->sched_class = &dl_sched_class; +@@ -3231,8 +3236,6 @@ static void __setscheduler(struct rq *rq + p->sched_class = &rt_sched_class; + else + p->sched_class = &fair_sched_class; +- +- set_load_weight(p); + } + + static void +@@ -3285,6 +3288,7 @@ static int __sched_setscheduler(struct t + const struct sched_attr *attr, + bool user) + { ++ int newprio = MAX_RT_PRIO - 1 - attr->sched_priority; + int retval, oldprio, oldpolicy = -1, on_rq, running; + int policy = attr->sched_policy; + unsigned long flags; +@@ -3463,6 +3467,24 @@ static int __sched_setscheduler(struct t + return -EBUSY; + } + ++ p->sched_reset_on_fork = reset_on_fork; ++ oldprio = p->prio; ++ ++ /* ++ * Special case for priority boosted tasks. ++ * ++ * If the new priority is lower or equal (user space view) ++ * than the current (boosted) priority, we just store the new ++ * normal parameters and do not touch the scheduler class and ++ * the runqueue. This will be done when the task deboost ++ * itself. ++ */ ++ if (rt_mutex_check_prio(p, newprio)) { ++ __setscheduler_params(p, attr); ++ task_rq_unlock(rq, p, &flags); ++ return 0; ++ } ++ + on_rq = p->on_rq; + running = task_current(rq, p); + if (on_rq) +@@ -3470,9 +3492,6 @@ static int __sched_setscheduler(struct t + if (running) + p->sched_class->put_prev_task(rq, p); + +- p->sched_reset_on_fork = reset_on_fork; +- +- oldprio = p->prio; + prev_class = p->sched_class; + __setscheduler(rq, p, attr); + diff --git a/debian/patches/features/all/rt/sched-Fix-broken-setscheduler.patch b/debian/patches/features/all/rt/sched-Fix-broken-setscheduler.patch new file mode 100644 index 000000000..5754063da --- /dev/null +++ b/debian/patches/features/all/rt/sched-Fix-broken-setscheduler.patch @@ -0,0 +1,79 @@ +From 383afd0971538b3d77532a56404b24cfe967b5dd Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Tue, 11 Mar 2014 19:24:20 -0400 +Subject: [PATCH] sched: Fix broken setscheduler() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +I decided to run my tests on linux-next, and my wakeup_rt tracer was +broken. After running a bisect, I found that the problem commit was: + + linux-next commit c365c292d059 + "sched: Consider pi boosting in setscheduler()" + +And the reason the wake_rt tracer test was failing, was because it had +no RT task to trace. I first noticed this when running with +sched_switch event and saw that my RT task still had normal SCHED_OTHER +priority. Looking at the problem commit, I found: + + - p->normal_prio = normal_prio(p); + - p->prio = rt_mutex_getprio(p); + +With no + + + p->normal_prio = normal_prio(p); + + p->prio = rt_mutex_getprio(p); + +Reading what the commit is suppose to do, I realize that the p->prio +can't be set if the task is boosted with a higher prio, but the +p->normal_prio still needs to be set regardless, otherwise, when the +task is deboosted, it wont get the new priority. + +The p->prio has to be set before "check_class_changed()" is called, +otherwise the class wont be changed. + +Also added fix to newprio to include a check for deadline policy that +was missing. This change was suggested by Juri Lelli. + +Signed-off-by: Steven Rostedt +Cc: SebastianAndrzej Siewior +Cc: Juri Lelli +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/20140306120438.638bfe94@gandalf.local.home +Signed-off-by: Ingo Molnar +--- + kernel/sched/core.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3221,6 +3221,7 @@ static void __setscheduler_params(struct + * getparam()/getattr() don't report silly values for !rt tasks. + */ + p->rt_priority = attr->sched_priority; ++ p->normal_prio = normal_prio(p); + set_load_weight(p); + } + +@@ -3230,6 +3231,12 @@ static void __setscheduler(struct rq *rq + { + __setscheduler_params(p, attr); + ++ /* ++ * If we get here, there was no pi waiters boosting the ++ * task. It is safe to use the normal prio. ++ */ ++ p->prio = normal_prio(p); ++ + if (dl_prio(p->prio)) + p->sched_class = &dl_sched_class; + else if (rt_prio(p->prio)) +@@ -3288,7 +3295,8 @@ static int __sched_setscheduler(struct t + const struct sched_attr *attr, + bool user) + { +- int newprio = MAX_RT_PRIO - 1 - attr->sched_priority; ++ int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : ++ MAX_RT_PRIO - 1 - attr->sched_priority; + int retval, oldprio, oldpolicy = -1, on_rq, running; + int policy = attr->sched_policy; + unsigned long flags; diff --git a/debian/patches/features/all/rt/sched-Init-idle-on_rq-in-init_idle.patch b/debian/patches/features/all/rt/sched-Init-idle-on_rq-in-init_idle.patch new file mode 100644 index 000000000..0b3970460 --- /dev/null +++ b/debian/patches/features/all/rt/sched-Init-idle-on_rq-in-init_idle.patch @@ -0,0 +1,40 @@ +From 77177856e3bf39d435b3ae4bfd164ca3c8cd4577 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 7 Feb 2014 20:58:37 +0100 +Subject: [PATCH 1/6] sched: Init idle->on_rq in init_idle() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +We stumbled in RT over a SMP bringup issue on ARM where the +idle->on_rq == 0 was causing try_to_wakeup() on the other cpu to run +into nada land. + +After adding that idle->on_rq = 1; I was able to find the root cause +of the lockup: the idle task on the newly woken up cpu was fiddling +with a sleeping spinlock, which is a nono. + +I kept the init of idle->on_rq to keep the state consistent and to +avoid another long lasting debug session. + +As a side note, the whole debug mess could have been avoided if +might_sleep() would have yelled when called from the idle task. That's +fixed with patch 2/6 - and that one actually has a changelog :) + +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1391803122-4425-2-git-send-email-bigeasy@linutronix.de +Signed-off-by: Ingo Molnar +--- + kernel/sched/core.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4481,6 +4481,7 @@ void init_idle(struct task_struct *idle, + rcu_read_unlock(); + + rq->curr = rq->idle = idle; ++ idle->on_rq = 1; + #if defined(CONFIG_SMP) + idle->on_cpu = 1; + #endif diff --git a/debian/patches/features/all/rt/sched-Queue-RT-tasks-to-head-when-prio-drops.patch b/debian/patches/features/all/rt/sched-Queue-RT-tasks-to-head-when-prio-drops.patch new file mode 100644 index 000000000..63403d56f --- /dev/null +++ b/debian/patches/features/all/rt/sched-Queue-RT-tasks-to-head-when-prio-drops.patch @@ -0,0 +1,73 @@ +From 81a44c5441d7f7d2c3dc9105f4d65ad0d5818617 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 7 Feb 2014 20:58:41 +0100 +Subject: [PATCH 5/6] sched: Queue RT tasks to head when prio drops +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The following scenario does not work correctly: + +Runqueue of CPUx contains two runnable and pinned tasks: + + T1: SCHED_FIFO, prio 80 + T2: SCHED_FIFO, prio 80 + +T1 is on the cpu and executes the following syscalls (classic priority +ceiling scenario): + + sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 90); + ... + sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 80); + ... + +Now T1 gets preempted by T3 (SCHED_FIFO, prio 95). After T3 goes back +to sleep the scheduler picks T2. Surprise! + +The same happens w/o actual preemption when T1 is forced into the +scheduler due to a sporadic NEED_RESCHED event. The scheduler invokes +pick_next_task() which returns T2. So T1 gets preempted and scheduled +out. + +This happens because sched_setscheduler() dequeues T1 from the prio 90 +list and then enqueues it on the tail of the prio 80 list behind T2. +This violates the POSIX spec and surprises user space which relies on +the guarantee that SCHED_FIFO tasks are not scheduled out unless they +give the CPU up voluntarily or are preempted by a higher priority +task. In the latter case the preempted task must get back on the CPU +after the preempting task schedules out again. + +We fixed a similar issue already in commit 60db48c (sched: Queue a +deboosted task to the head of the RT prio queue). The same treatment +is necessary for sched_setscheduler(). So enqueue to head of the prio +bucket list if the priority of the task is lowered. + +It might be possible that existing user space relies on the current +behaviour, but it can be considered highly unlikely due to the corner +case nature of the application scenario. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1391803122-4425-6-git-send-email-bigeasy@linutronix.de +Signed-off-by: Ingo Molnar +--- + kernel/sched/core.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3478,8 +3478,13 @@ static int __sched_setscheduler(struct t + + if (running) + p->sched_class->set_curr_task(rq); +- if (on_rq) +- enqueue_task(rq, p, 0); ++ if (on_rq) { ++ /* ++ * We enqueue to tail when the priority of a task is ++ * increased (user space view). ++ */ ++ enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); ++ } + + check_class_changed(rq, p, prev_class, oldprio); + task_rq_unlock(rq, p, &flags); diff --git a/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch b/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch new file mode 100644 index 000000000..40193ad75 --- /dev/null +++ b/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch @@ -0,0 +1,25 @@ +Subject: sched-clear-pf-thread-bound-on-fallback-rq.patch +From: Thomas Gleixner +Date: Fri, 04 Nov 2011 20:48:36 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1345,6 +1345,12 @@ static int select_fallback_rq(int cpu, s + } + } + ++ /* ++ * Clear PF_NO_SETAFFINITY, otherwise we wreckage ++ * migrate_disable/enable. See optimization for ++ * PF_NO_SETAFFINITY tasks there. ++ */ ++ p->flags &= ~PF_NO_SETAFFINITY; + return dest_cpu; + } + diff --git a/debian/patches/features/all/rt/sched-cond-resched.patch b/debian/patches/features/all/rt/sched-cond-resched.patch new file mode 100644 index 000000000..6f3b6a32b --- /dev/null +++ b/debian/patches/features/all/rt/sched-cond-resched.patch @@ -0,0 +1,32 @@ +Subject: sched-cond-resched.patch +From: Thomas Gleixner +Date: Tue, 07 Jun 2011 11:25:03 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 13 ++++++++++--- + 1 file changed, 10 insertions(+), 3 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4177,9 +4177,16 @@ SYSCALL_DEFINE0(sched_yield) + + static void __cond_resched(void) + { +- __preempt_count_add(PREEMPT_ACTIVE); +- __schedule(); +- __preempt_count_sub(PREEMPT_ACTIVE); ++ do { ++ __preempt_count_add(PREEMPT_ACTIVE); ++ __schedule(); ++ __preempt_count_sub(PREEMPT_ACTIVE); ++ /* ++ * Check again in case we missed a preemption ++ * opportunity between schedule and now. ++ */ ++ barrier(); ++ } while (need_resched()); + } + + int __sched _cond_resched(void) diff --git a/debian/patches/features/all/rt/sched-delay-put-task.patch b/debian/patches/features/all/rt/sched-delay-put-task.patch new file mode 100644 index 000000000..e85701a3a --- /dev/null +++ b/debian/patches/features/all/rt/sched-delay-put-task.patch @@ -0,0 +1,79 @@ +Subject: sched-delay-put-task.patch +From: Thomas Gleixner +Date: Tue, 31 May 2011 16:59:16 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 13 +++++++++++++ + kernel/fork.c | 15 ++++++++++++++- + 2 files changed, 27 insertions(+), 1 deletion(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1600,6 +1600,9 @@ struct task_struct { + unsigned int sequential_io; + unsigned int sequential_io_avg; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head put_rcu; ++#endif + }; + + /* Future-safe accessor for struct task_struct's cpus_allowed. */ +@@ -1780,6 +1783,15 @@ extern struct pid *cad_pid; + extern void free_task(struct task_struct *tsk); + #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __put_task_struct_cb(struct rcu_head *rhp); ++ ++static inline void put_task_struct(struct task_struct *t) ++{ ++ if (atomic_dec_and_test(&t->usage)) ++ call_rcu(&t->put_rcu, __put_task_struct_cb); ++} ++#else + extern void __put_task_struct(struct task_struct *t); + + static inline void put_task_struct(struct task_struct *t) +@@ -1787,6 +1799,7 @@ static inline void put_task_struct(struc + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); + } ++#endif + + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + extern void task_cputime(struct task_struct *t, +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -230,7 +230,9 @@ static inline void put_signal_struct(str + if (atomic_dec_and_test(&sig->sigcnt)) + free_signal_struct(sig); + } +- ++#ifdef CONFIG_PREEMPT_RT_BASE ++static ++#endif + void __put_task_struct(struct task_struct *tsk) + { + WARN_ON(!tsk->exit_state); +@@ -246,7 +248,18 @@ void __put_task_struct(struct task_struc + if (!profile_handoff_task(tsk)) + free_task(tsk); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + EXPORT_SYMBOL_GPL(__put_task_struct); ++#else ++void __put_task_struct_cb(struct rcu_head *rhp) ++{ ++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); ++ ++ __put_task_struct(tsk); ++ ++} ++EXPORT_SYMBOL_GPL(__put_task_struct_cb); ++#endif + + void __init __weak arch_task_cache_init(void) { } + diff --git a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch new file mode 100644 index 000000000..731f35f91 --- /dev/null +++ b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch @@ -0,0 +1,29 @@ +Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 17:03:52 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Carsten reported problems when running: + + taskset 01 chrt -f 1 sleep 1 + +from within rc.local on a F15 machine. The task stays running and +never gets on the run queue because some of the run queues have +rt_throttled=1 which does not go away. Works nice from a ssh login +shell. Disabling CONFIG_RT_GROUP_SCHED solves that as well. + +Signed-off-by: Thomas Gleixner +--- + init/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1035,6 +1035,7 @@ config CFS_BANDWIDTH + config RT_GROUP_SCHED + bool "Group scheduling for SCHED_RR/FIFO" + depends on CGROUP_SCHED ++ depends on !PREEMPT_RT_FULL + default n + help + This feature lets you explicitly allocate real CPU bandwidth diff --git a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch new file mode 100644 index 000000000..b1082f934 --- /dev/null +++ b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch @@ -0,0 +1,28 @@ +Subject: sched-disable-ttwu-queue.patch +From: Thomas Gleixner +Date: Tue, 13 Sep 2011 16:42:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/features.h | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -50,11 +50,15 @@ SCHED_FEAT(LB_BIAS, true) + */ + SCHED_FEAT(NONTASK_POWER, true) + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Queue remote wakeups on the target CPU and process them + * using the scheduler IPI. Reduces rq->lock contention/bounces. + */ + SCHED_FEAT(TTWU_QUEUE, true) ++#else ++SCHED_FEAT(TTWU_QUEUE, false) ++#endif + + SCHED_FEAT(FORCE_SD_OVERLAP, false) + SCHED_FEAT(RT_RUNTIME_SHARE, true) diff --git a/debian/patches/features/all/rt/sched-dont-calculate-hweight-in-update_migrate_disab.patch b/debian/patches/features/all/rt/sched-dont-calculate-hweight-in-update_migrate_disab.patch new file mode 100644 index 000000000..0c6342a54 --- /dev/null +++ b/debian/patches/features/all/rt/sched-dont-calculate-hweight-in-update_migrate_disab.patch @@ -0,0 +1,51 @@ +From f87f3c003eb4ad513e6732dfaa76f442b8aec9ba Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Mon, 24 Mar 2014 13:18:48 +0100 +Subject: [PATCH] sched: dont calculate hweight in update_migrate_disable() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Proposal for a minor optimization in update_migrate_disable - its only a few +instructions saved but those are in the hot path of locks so it might be worth +it + +When being scheduled out while migrate_disable > 0 and migrate_disabled_updated +is not yet set we end up here (kernel/sched/core.c): + +static inline void update_migrate_disable(struct task_struct *p) +{ + ... + + mask = tsk_cpus_allowed(p); + + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); + +as we only can get here if migrate_disable > 0 there is no need to calculate +the cpumask_weight(mask) as tsk_cpus_allowed in that case will return +cpumask_of(task_cpu(p)) which only can have a hamming weight of 1 anyway. +So we can simply do: + + p->nr_cpus_allowed = 1; + +without changing the behavior. + +Reviewed-by: Steven Rostedt +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/sched/core.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2670,7 +2670,8 @@ static inline void update_migrate_disabl + + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); ++ /* mask==cpumask_of(task_cpu(p)) which has a cpumask_weight==1 */ ++ p->nr_cpus_allowed = 1; + + /* Let migrate_enable know to fix things back up */ + p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; diff --git a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch new file mode 100644 index 000000000..9377b8e6b --- /dev/null +++ b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch @@ -0,0 +1,24 @@ +Subject: sched-limit-nr-migrate.patch +From: Thomas Gleixner +Date: Mon, 06 Jun 2011 12:12:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -272,7 +272,11 @@ late_initcall(sched_init_debug); + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + const_debug unsigned int sysctl_sched_nr_migrate = 32; ++#else ++const_debug unsigned int sysctl_sched_nr_migrate = 8; ++#endif + + /* + * period over which we average the RT time consumption, measured diff --git a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch new file mode 100644 index 000000000..1137518fa --- /dev/null +++ b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch @@ -0,0 +1,46 @@ +Subject: sched-might-sleep-do-not-account-rcu-depth.patch +From: Thomas Gleixner +Date: Tue, 07 Jun 2011 09:19:06 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rcupdate.h | 7 +++++++ + kernel/sched/core.c | 3 ++- + 2 files changed, 9 insertions(+), 1 deletion(-) + +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -190,6 +190,11 @@ void synchronize_rcu(void); + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ + #define rcu_preempt_depth() (current->rcu_read_lock_nesting) ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++#else ++static inline int sched_rcu_preempt_depth(void) { return 0; } ++#endif + + #else /* #ifdef CONFIG_PREEMPT_RCU */ + +@@ -213,6 +218,8 @@ static inline int rcu_preempt_depth(void + return 0; + } + ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++ + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + + /* Internal to kernel */ +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7052,7 +7052,8 @@ void __init sched_init(void) + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + static inline int preempt_count_equals(int preempt_offset) + { +- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); ++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) + ++ sched_rcu_preempt_depth(); + + return (nested == preempt_offset); + } diff --git a/debian/patches/features/all/rt/sched-migrate-disable.patch b/debian/patches/features/all/rt/sched-migrate-disable.patch new file mode 100644 index 000000000..96921fe3a --- /dev/null +++ b/debian/patches/features/all/rt/sched-migrate-disable.patch @@ -0,0 +1,193 @@ +Subject: sched-migrate-disable.patch +From: Thomas Gleixner +Date: Thu, 16 Jun 2011 13:26:08 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/preempt.h | 8 ++++ + include/linux/sched.h | 13 +++++-- + kernel/sched/core.c | 88 +++++++++++++++++++++++++++++++++++++++++++++--- + lib/smp_processor_id.c | 6 +-- + 4 files changed, 104 insertions(+), 11 deletions(-) + +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -148,6 +148,14 @@ do { \ + set_preempt_need_resched(); \ + } while (0) + ++#ifdef CONFIG_SMP ++extern void migrate_disable(void); ++extern void migrate_enable(void); ++#else ++# define migrate_disable() barrier() ++# define migrate_enable() barrier() ++#endif ++ + #ifdef CONFIG_PREEMPT_RT_FULL + # define preempt_disable_rt() preempt_disable() + # define preempt_enable_rt() preempt_enable() +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1198,6 +1198,7 @@ struct task_struct { + #endif + + unsigned int policy; ++ int migrate_disable; + int nr_cpus_allowed; + cpumask_t cpus_allowed; + +@@ -1606,9 +1607,6 @@ struct task_struct { + #endif + }; + +-/* Future-safe accessor for struct task_struct's cpus_allowed. */ +-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +- + #define TNF_MIGRATED 0x01 + #define TNF_NO_GROUP 0x02 + #define TNF_SHARED 0x04 +@@ -2953,6 +2951,15 @@ static inline void set_task_cpu(struct t + + #endif /* CONFIG_SMP */ + ++/* Future-safe accessor for struct task_struct's cpus_allowed. */ ++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) ++{ ++ if (p->migrate_disable) ++ return cpumask_of(task_cpu(p)); ++ ++ return &p->cpus_allowed; ++} ++ + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); + extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4622,11 +4622,12 @@ void init_idle(struct task_struct *idle, + #ifdef CONFIG_SMP + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { +- if (p->sched_class && p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, new_mask); +- ++ if (!p->migrate_disable) { ++ if (p->sched_class && p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, new_mask); ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++ } + cpumask_copy(&p->cpus_allowed, new_mask); +- p->nr_cpus_allowed = cpumask_weight(new_mask); + } + + /* +@@ -4672,7 +4673,7 @@ int set_cpus_allowed_ptr(struct task_str + do_set_cpus_allowed(p, new_mask); + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable) + goto out; + + dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); +@@ -4691,6 +4692,83 @@ int set_cpus_allowed_ptr(struct task_str + } + EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); + ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ const struct cpumask *mask; ++ unsigned long flags; ++ struct rq *rq; ++ ++ preempt_disable(); ++ if (p->migrate_disable) { ++ p->migrate_disable++; ++ preempt_enable(); ++ return; ++ } ++ ++ pin_current_cpu(); ++ if (unlikely(!scheduler_running)) { ++ p->migrate_disable = 1; ++ preempt_enable(); ++ return; ++ } ++ rq = task_rq_lock(p, &flags); ++ p->migrate_disable = 1; ++ mask = tsk_cpus_allowed(p); ++ ++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ ++ if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); ++ } ++ task_rq_unlock(rq, p, &flags); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ const struct cpumask *mask; ++ unsigned long flags; ++ struct rq *rq; ++ ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ ++ preempt_disable(); ++ if (p->migrate_disable > 1) { ++ p->migrate_disable--; ++ preempt_enable(); ++ return; ++ } ++ ++ if (unlikely(!scheduler_running)) { ++ p->migrate_disable = 0; ++ unpin_current_cpu(); ++ preempt_enable(); ++ return; ++ } ++ ++ rq = task_rq_lock(p, &flags); ++ p->migrate_disable = 0; ++ mask = tsk_cpus_allowed(p); ++ ++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ ++ if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); ++ } ++ ++ task_rq_unlock(rq, p, &flags); ++ unpin_current_cpu(); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_enable); ++ + /* + * Move (not current) task off this cpu, onto dest cpu. We're doing + * this because either it can't run here any more (set_cpus_allowed() +--- a/lib/smp_processor_id.c ++++ b/lib/smp_processor_id.c +@@ -38,9 +38,9 @@ notrace unsigned int debug_smp_processor + if (!printk_ratelimit()) + goto out_enable; + +- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " +- "code: %s/%d\n", +- preempt_count() - 1, current->comm, current->pid); ++ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " ++ "code: %s/%d\n", preempt_count() - 1, ++ current->migrate_disable, current->comm, current->pid); + print_symbol("caller is %s\n", (long)__builtin_return_address(0)); + dump_stack(); + diff --git a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch new file mode 100644 index 000000000..5be1a1786 --- /dev/null +++ b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch @@ -0,0 +1,135 @@ +Subject: sched-mmdrop-delayed.patch +From: Thomas Gleixner +Date: Mon, 06 Jun 2011 12:20:33 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Needs thread context (pgd_lock) -> ifdeffed. workqueues wont work with +RT + +Signed-off-by: Thomas Gleixner +--- + include/linux/mm_types.h | 4 ++++ + include/linux/sched.h | 12 ++++++++++++ + kernel/fork.c | 13 +++++++++++++ + kernel/sched/core.c | 19 +++++++++++++++++-- + 4 files changed, 46 insertions(+), 2 deletions(-) + +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -453,6 +454,9 @@ struct mm_struct { + bool tlb_flush_pending; + #endif + struct uprobes_state uprobes_state; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head delayed_drop; ++#endif + }; + + static inline void mm_init_cpumask(struct mm_struct *mm) +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2320,12 +2320,24 @@ extern struct mm_struct * mm_alloc(void) + + /* mmdrop drops the mm and the page tables */ + extern void __mmdrop(struct mm_struct *); ++ + static inline void mmdrop(struct mm_struct * mm) + { + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __mmdrop_delayed(struct rcu_head *rhp); ++static inline void mmdrop_delayed(struct mm_struct *mm) ++{ ++ if (atomic_dec_and_test(&mm->mm_count)) ++ call_rcu(&mm->delayed_drop, __mmdrop_delayed); ++} ++#else ++# define mmdrop_delayed(mm) mmdrop(mm) ++#endif ++ + /* mmput gets rid of the mappings and all user-space */ + extern void mmput(struct mm_struct *); + /* Grab a reference to a task's mm, if it is not already going away */ +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -612,6 +612,19 @@ void __mmdrop(struct mm_struct *mm) + } + EXPORT_SYMBOL_GPL(__mmdrop); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++/* ++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't ++ * want another facility to make this work. ++ */ ++void __mmdrop_delayed(struct rcu_head *rhp) ++{ ++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); ++ ++ __mmdrop(mm); ++} ++#endif ++ + /* + * Decrement the use count and release all resources for an mm. + */ +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2164,8 +2164,12 @@ static void finish_task_switch(struct rq + finish_arch_post_lock_switch(); + + fire_sched_in_preempt_notifiers(current); ++ /* ++ * We use mmdrop_delayed() here so we don't have to do the ++ * full __mmdrop() when we are the last user. ++ */ + if (mm) +- mmdrop(mm); ++ mmdrop_delayed(mm); + if (unlikely(prev_state == TASK_DEAD)) { + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); +@@ -4763,6 +4767,8 @@ static int migration_cpu_stop(void *data + + #ifdef CONFIG_HOTPLUG_CPU + ++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); ++ + /* + * Ensures that the idle task is using init_mm right before its cpu goes + * offline. +@@ -4775,7 +4781,12 @@ void idle_task_exit(void) + + if (mm != &init_mm) + switch_mm(mm, &init_mm, current); +- mmdrop(mm); ++ ++ /* ++ * Defer the cleanup to an alive cpu. On RT we can neither ++ * call mmdrop() nor mmdrop_delayed() from here. ++ */ ++ per_cpu(idle_last_mm, smp_processor_id()) = mm; + } + + /* +@@ -5099,6 +5110,10 @@ migration_call(struct notifier_block *nf + + case CPU_DEAD: + calc_load_migrate(rq); ++ if (per_cpu(idle_last_mm, cpu)) { ++ mmdrop(per_cpu(idle_last_mm, cpu)); ++ per_cpu(idle_last_mm, cpu) = NULL; ++ } + break; + #endif + } diff --git a/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch b/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch new file mode 100644 index 000000000..bf1c7fde6 --- /dev/null +++ b/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch @@ -0,0 +1,64 @@ +Subject: sched, rt: Fix migrate_enable() thinko +From: Mike Galbraith +Date: Tue, 23 Aug 2011 16:12:43 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Assigning mask = tsk_cpus_allowed(p) after p->migrate_disable = 0 ensures +that we won't see a mask change.. no push/pull, we stack tasks on one CPU. + +Also add a couple fields to sched_debug for the next guy. + +[ Build fix from Stratos Psomadakis ] + +Signed-off-by: Mike Galbraith +Cc: Paul E. McKenney +Cc: Peter Zijlstra +Link: http://lkml.kernel.org/r/1314108763.6689.4.camel@marge.simson.net +Signed-off-by: Thomas Gleixner + +--- + kernel/sched/core.c | 4 +++- + kernel/sched/debug.c | 7 +++++++ + 2 files changed, 10 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4769,12 +4769,14 @@ void migrate_enable(void) + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); +- p->migrate_disable = 0; + mask = tsk_cpus_allowed(p); ++ p->migrate_disable = 0; + + WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); + + if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ /* Get the mask now that migration is enabled */ ++ mask = tsk_cpus_allowed(p); + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -260,6 +260,9 @@ void print_rt_rq(struct seq_file *m, int + P(rt_throttled); + PN(rt_time); + PN(rt_runtime); ++#ifdef CONFIG_SMP ++ P(rt_nr_migratory); ++#endif + + #undef PN + #undef P +@@ -639,6 +642,10 @@ void proc_sched_show_task(struct task_st + #endif + P(policy); + P(prio); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ P(migrate_disable); ++#endif ++ P(nr_cpus_allowed); + #undef PN + #undef __PN + #undef P diff --git a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch new file mode 100644 index 000000000..c864a09e6 --- /dev/null +++ b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch @@ -0,0 +1,88 @@ +Subject: sched-rt-mutex-wakeup.patch +From: Thomas Gleixner +Date: Sat, 25 Jun 2011 09:21:04 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 2 ++ + kernel/sched/core.c | 31 ++++++++++++++++++++++++++++++- + kernel/sched/sched.h | 1 + + 3 files changed, 33 insertions(+), 1 deletion(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1161,6 +1161,7 @@ enum perf_event_task_context { + + struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ ++ volatile long saved_state; /* saved state for "spinlock sleepers" */ + void *stack; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ +@@ -2206,6 +2207,7 @@ extern void xtime_update(unsigned long t + + extern int wake_up_state(struct task_struct *tsk, unsigned int state); + extern int wake_up_process(struct task_struct *tsk); ++extern int wake_up_lock_sleeper(struct task_struct * tsk); + extern void wake_up_new_task(struct task_struct *tsk); + #ifdef CONFIG_SMP + extern void kick_process(struct task_struct *tsk); +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1610,8 +1610,25 @@ try_to_wake_up(struct task_struct *p, un + */ + smp_mb__before_spinlock(); + raw_spin_lock_irqsave(&p->pi_lock, flags); +- if (!(p->state & state)) ++ if (!(p->state & state)) { ++ /* ++ * The task might be running due to a spinlock sleeper ++ * wakeup. Check the saved state and set it to running ++ * if the wakeup condition is true. ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) { ++ if (p->saved_state & state) ++ p->saved_state = TASK_RUNNING; ++ } + goto out; ++ } ++ ++ /* ++ * If this is a regular wakeup, then we can unconditionally ++ * clear the saved state of a "lock sleeper". ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) ++ p->saved_state = TASK_RUNNING; + + success = 1; /* we're going to change ->state */ + cpu = task_cpu(p); +@@ -1708,6 +1725,18 @@ int wake_up_process(struct task_struct * + } + EXPORT_SYMBOL(wake_up_process); + ++/** ++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" ++ * @p: The process to be woken up. ++ * ++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate ++ * the nature of the wakeup. ++ */ ++int wake_up_lock_sleeper(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); ++} ++ + int wake_up_state(struct task_struct *p, unsigned int state) + { + return try_to_wake_up(p, state, 0); +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1047,6 +1047,7 @@ static inline void finish_lock_switch(st + #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ + #define WF_FORK 0x02 /* child wakeup after fork */ + #define WF_MIGRATED 0x4 /* internal use, task got migrated */ ++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ + + /* + * To aid in avoiding the subversion of "niceness" due to uneven distribution diff --git a/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch b/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch new file mode 100644 index 000000000..82252a9d6 --- /dev/null +++ b/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch @@ -0,0 +1,89 @@ +Subject: sched: Teach migrate_disable about atomic contexts +From: Peter Zijlstra +Date: Fri, 02 Sep 2011 14:41:37 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Subject: sched: teach migrate_disable about atomic contexts +From: Peter Zijlstra +Date: Fri Sep 02 14:29:27 CEST 2011 + + [] spin_bug+0x94/0xa8 + [] do_raw_spin_lock+0x43/0xea + [] _raw_spin_lock_irqsave+0x6b/0x85 + [] ? migrate_disable+0x75/0x12d + [] ? pin_current_cpu+0x36/0xb0 + [] migrate_disable+0x75/0x12d + [] pagefault_disable+0xe/0x1f + [] copy_from_user_nmi+0x74/0xe6 + [] perf_callchain_user+0xf3/0x135 + +Now clearly we can't go around taking locks from NMI context, cure +this by short-circuiting migrate_disable() when we're in an atomic +context already. + +Add some extra debugging to avoid things like: + + preempt_disable() + migrate_disable(); + + preempt_enable(); + migrate_enable(); + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1314967297.1301.14.camel@twins +Signed-off-by: Thomas Gleixner +Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org +--- + include/linux/sched.h | 3 +++ + kernel/sched/core.c | 21 +++++++++++++++++++++ + 2 files changed, 24 insertions(+) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1200,6 +1200,9 @@ struct task_struct { + unsigned int policy; + #ifdef CONFIG_PREEMPT_RT_FULL + int migrate_disable; ++# ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable_atomic; ++# endif + #endif + int nr_cpus_allowed; + cpumask_t cpus_allowed; +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4700,6 +4700,17 @@ void migrate_disable(void) + unsigned long flags; + struct rq *rq; + ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ + preempt_disable(); + if (p->migrate_disable) { + p->migrate_disable++; +@@ -4748,6 +4759,16 @@ void migrate_enable(void) + unsigned long flags; + struct rq *rq; + ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif + WARN_ON_ONCE(p->migrate_disable <= 0); + + preempt_disable(); diff --git a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch new file mode 100644 index 000000000..8a2542057 --- /dev/null +++ b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch @@ -0,0 +1,35 @@ +Subject: sched: ttwu: Return success when only changing the saved_state value +From: Thomas Gleixner +Date: Tue, 13 Dec 2011 21:42:19 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +When a task blocks on a rt lock, it saves the current state in +p->saved_state, so a lock related wake up will not destroy the +original state. + +When a real wakeup happens, while the task is running due to a lock +wakeup already, we update p->saved_state to TASK_RUNNING, but we do +not return success, which might cause another wakeup in the waitqueue +code and the task remains in the waitqueue list. Return success in +that case as well. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + kernel/sched/core.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1617,8 +1617,10 @@ try_to_wake_up(struct task_struct *p, un + * if the wakeup condition is true. + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) { +- if (p->saved_state & state) ++ if (p->saved_state & state) { + p->saved_state = TASK_RUNNING; ++ success = 1; ++ } + } + goto out; + } diff --git a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch new file mode 100644 index 000000000..467947629 --- /dev/null +++ b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch @@ -0,0 +1,40 @@ +From b24ee416f22bd2a2325b8f6afa5a4065dd3560e9 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Mon, 18 Mar 2013 15:12:49 -0400 +Subject: [PATCH] sched/workqueue: Only wake up idle workers if not blocked on + sleeping spin lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +In -rt, most spin_locks() turn into mutexes. One of these spin_lock +conversions is performed on the workqueue gcwq->lock. When the idle +worker is worken, the first thing it will do is grab that same lock and +it too will block, possibly jumping into the same code, but because +nr_running would already be decremented it prevents an infinite loop. + +But this is still a waste of CPU cycles, and it doesn't follow the method +of mainline, as new workers should only be woken when a worker thread is +truly going to sleep, and not just blocked on a spin_lock(). + +Check the saved_state too before waking up new workers. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/sched/core.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2751,8 +2751,10 @@ static void __sched __schedule(void) + * If a worker went to sleep, notify and ask workqueue + * whether it wants to wake up a task to maintain + * concurrency. ++ * Only call wake up if prev isn't blocked on a sleeping ++ * spin lock. + */ +- if (prev->flags & PF_WQ_WORKER) { ++ if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { + struct task_struct *to_wakeup; + + to_wakeup = wq_worker_sleeping(prev, cpu); diff --git a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch new file mode 100644 index 000000000..6a4e4e970 --- /dev/null +++ b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch @@ -0,0 +1,112 @@ +Subject: scsi-fcoe-rt-aware.patch +From: Thomas Gleixner +Date: Sat, 12 Nov 2011 14:00:48 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + drivers/scsi/fcoe/fcoe.c | 18 +++++++++--------- + drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++-- + drivers/scsi/libfc/fc_exch.c | 4 ++-- + 3 files changed, 13 insertions(+), 13 deletions(-) + +--- a/drivers/scsi/fcoe/fcoe.c ++++ b/drivers/scsi/fcoe/fcoe.c +@@ -1286,7 +1286,7 @@ static void fcoe_percpu_thread_destroy(u + struct sk_buff *skb; + #ifdef CONFIG_SMP + struct fcoe_percpu_s *p0; +- unsigned targ_cpu = get_cpu(); ++ unsigned targ_cpu = get_cpu_light(); + #endif /* CONFIG_SMP */ + + FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); +@@ -1342,7 +1342,7 @@ static void fcoe_percpu_thread_destroy(u + kfree_skb(skb); + spin_unlock_bh(&p->fcoe_rx_list.lock); + } +- put_cpu(); ++ put_cpu_light(); + #else + /* + * This a non-SMP scenario where the singular Rx thread is +@@ -1566,11 +1566,11 @@ static int fcoe_rcv(struct sk_buff *skb, + static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) + { + struct fcoe_percpu_s *fps; +- int rc; ++ int rc, cpu = get_cpu_light(); + +- fps = &get_cpu_var(fcoe_percpu); ++ fps = &per_cpu(fcoe_percpu, cpu); + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); +- put_cpu_var(fcoe_percpu); ++ put_cpu_light(); + + return rc; + } +@@ -1768,11 +1768,11 @@ static inline int fcoe_filter_frames(str + return 0; + } + +- stats = per_cpu_ptr(lport->stats, get_cpu()); ++ stats = per_cpu_ptr(lport->stats, get_cpu_light()); + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); +- put_cpu(); ++ put_cpu_light(); + return -EINVAL; + } + +@@ -1848,13 +1848,13 @@ static void fcoe_recv_frame(struct sk_bu + goto drop; + + if (!fcoe_filter_frames(lport, fp)) { +- put_cpu(); ++ put_cpu_light(); + fc_exch_recv(lport, fp); + return; + } + drop: + stats->ErrorFrames++; +- put_cpu(); ++ put_cpu_light(); + kfree_skb(skb); + } + +--- a/drivers/scsi/fcoe/fcoe_ctlr.c ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -831,7 +831,7 @@ static unsigned long fcoe_ctlr_age_fcfs( + + INIT_LIST_HEAD(&del_list); + +- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); ++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; +@@ -867,7 +867,7 @@ static unsigned long fcoe_ctlr_age_fcfs( + sel_time = fcf->time; + } + } +- put_cpu(); ++ put_cpu_light(); + + list_for_each_entry_safe(fcf, next, &del_list, list) { + /* Removes fcf from current list */ +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -816,10 +816,10 @@ static struct fc_exch *fc_exch_em_alloc( + } + memset(ep, 0, sizeof(*ep)); + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); +- put_cpu(); ++ put_cpu_light(); + + /* peek cache of free slot */ + if (pool->left != FC_XID_UNKNOWN) { diff --git a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch new file mode 100644 index 000000000..76fe11330 --- /dev/null +++ b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch @@ -0,0 +1,48 @@ +Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll +From: John Kacur +Date: Fri, 27 Apr 2012 12:48:46 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +RT triggers the following: + +[ 11.307652] [] __might_sleep+0xe7/0x110 +[ 11.307663] [] rt_spin_lock+0x24/0x60 +[ 11.307670] [] ? rt_spin_lock_slowunlock+0x78/0x90 +[ 11.307703] [] qla24xx_intr_handler+0x63/0x2d0 [qla2xxx] +[ 11.307736] [] qla2x00_poll+0x67/0x90 [qla2xxx] + +Function qla2x00_poll does local_irq_save() before calling qla24xx_intr_handler +which has a spinlock. Since spinlocks are sleepable on rt, it is not allowed +to call them with interrupts disabled. Therefore we use local_irq_save_nort() +instead which saves flags without disabling interrupts. + +This fix needs to be applied to v3.0-rt, v3.2-rt and v3.4-rt + +Suggested-by: Thomas Gleixner +Signed-off-by: John Kacur +Cc: Steven Rostedt +Cc: David Sommerseth +Link: http://lkml.kernel.org/r/1335523726-10024-1-git-send-email-jkacur@redhat.com +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +--- + drivers/scsi/qla2xxx/qla_inline.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_inline.h ++++ b/drivers/scsi/qla2xxx/qla_inline.h +@@ -58,12 +58,12 @@ qla2x00_poll(struct rsp_que *rsp) + { + unsigned long flags; + struct qla_hw_data *ha = rsp->hw; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (IS_P3P_TYPE(ha)) + qla82xx_poll(0, rsp); + else + ha->isp_ops->intr_handler(0, rsp); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + static inline uint8_t * diff --git a/debian/patches/features/all/rt/seqlock-consolidate-spin_lock-unlock-waiting-with-sp.patch b/debian/patches/features/all/rt/seqlock-consolidate-spin_lock-unlock-waiting-with-sp.patch new file mode 100644 index 000000000..3955d58b2 --- /dev/null +++ b/debian/patches/features/all/rt/seqlock-consolidate-spin_lock-unlock-waiting-with-sp.patch @@ -0,0 +1,71 @@ +From a6fd9edd1abb27cccce4016ec60cc643c8dba760 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Sun, 1 Dec 2013 23:03:52 -0500 +Subject: [PATCH] seqlock: consolidate spin_lock/unlock waiting with + spin_unlock_wait +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +since c2f21ce ("locking: Implement new raw_spinlock") +include/linux/spinlock.h includes spin_unlock_wait() to wait for a concurren +holder of a lock. this patch just moves over to that API. spin_unlock_wait +covers both raw_spinlock_t and spinlock_t so it should be safe here as well. +the added rt-variant of read_seqbegin in include/linux/seqlock.h that is being +modified, was introduced by patch: + seqlock-prevent-rt-starvation.patch + +behavior should be unchanged. + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/seqlock.h | 11 +++++------ + 1 file changed, 5 insertions(+), 6 deletions(-) + +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h +@@ -310,8 +310,7 @@ static inline unsigned read_seqbegin(seq + * Take the lock and let the writer proceed (i.e. evtl + * boost it), otherwise we could loop here forever. + */ +- spin_lock(&sl->lock); +- spin_unlock(&sl->lock); ++ spin_unlock_wait(&sl->lock); + goto repeat; + } + return ret; +@@ -331,7 +330,7 @@ static inline unsigned read_seqretry(con + static inline void write_seqlock(seqlock_t *sl) + { + spin_lock(&sl->lock); +- __write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock(seqlock_t *sl) +@@ -343,7 +342,7 @@ static inline void write_sequnlock(seqlo + static inline void write_seqlock_bh(seqlock_t *sl) + { + spin_lock_bh(&sl->lock); +- __write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_bh(seqlock_t *sl) +@@ -355,7 +354,7 @@ static inline void write_sequnlock_bh(se + static inline void write_seqlock_irq(seqlock_t *sl) + { + spin_lock_irq(&sl->lock); +- __write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_irq(seqlock_t *sl) +@@ -369,7 +368,7 @@ static inline unsigned long __write_seql + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); +- __write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + return flags; + } + diff --git a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch new file mode 100644 index 000000000..f3670cc7a --- /dev/null +++ b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch @@ -0,0 +1,188 @@ +Subject: seqlock: Prevent rt starvation +From: Thomas Gleixner +Date: Wed, 22 Feb 2012 12:03:30 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +If a low prio writer gets preempted while holding the seqlock write +locked, a high prio reader spins forever on RT. + +To prevent this let the reader grab the spinlock, so it blocks and +eventually boosts the writer. This way the writer can proceed and +endless spinning is prevented. + +For seqcount writers we disable preemption over the update code +path. Thaanks to Al Viro for distangling some VFS code to make that +possible. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org + +--- + include/linux/seqlock.h | 57 +++++++++++++++++++++++++++++++++++++----------- + include/net/dst.h | 2 - + include/net/neighbour.h | 4 +-- + 3 files changed, 48 insertions(+), 15 deletions(-) + +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h +@@ -205,20 +205,30 @@ static inline int read_seqcount_retry(co + return __read_seqcount_retry(s, start); + } + +- +- +-static inline void raw_write_seqcount_begin(seqcount_t *s) ++static inline void __raw_write_seqcount_begin(seqcount_t *s) + { + s->sequence++; + smp_wmb(); + } + +-static inline void raw_write_seqcount_end(seqcount_t *s) ++static inline void raw_write_seqcount_begin(seqcount_t *s) ++{ ++ preempt_disable_rt(); ++ __raw_write_seqcount_begin(s); ++} ++ ++static inline void __raw_write_seqcount_end(seqcount_t *s) + { + smp_wmb(); + s->sequence++; + } + ++static inline void raw_write_seqcount_end(seqcount_t *s) ++{ ++ __raw_write_seqcount_end(s); ++ preempt_enable_rt(); ++} ++ + /* + * Sequence counter only version assumes that callers are using their + * own mutexing. +@@ -280,10 +290,33 @@ typedef struct { + /* + * Read side functions for starting and finalizing a read side section. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline unsigned read_seqbegin(const seqlock_t *sl) + { + return read_seqcount_begin(&sl->seqcount); + } ++#else ++/* ++ * Starvation safe read side for RT ++ */ ++static inline unsigned read_seqbegin(seqlock_t *sl) ++{ ++ unsigned ret; ++ ++repeat: ++ ret = ACCESS_ONCE(sl->seqcount.sequence); ++ if (unlikely(ret & 1)) { ++ /* ++ * Take the lock and let the writer proceed (i.e. evtl ++ * boost it), otherwise we could loop here forever. ++ */ ++ spin_lock(&sl->lock); ++ spin_unlock(&sl->lock); ++ goto repeat; ++ } ++ return ret; ++} ++#endif + + static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + { +@@ -298,36 +331,36 @@ static inline unsigned read_seqretry(con + static inline void write_seqlock(seqlock_t *sl) + { + spin_lock(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); + } + + static inline void write_seqlock_bh(seqlock_t *sl) + { + spin_lock_bh(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_bh(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); + } + + static inline void write_seqlock_irq(seqlock_t *sl) + { + spin_lock_irq(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_irq(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); + } + +@@ -336,7 +369,7 @@ static inline unsigned long __write_seql + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); +- write_seqcount_begin(&sl->seqcount); ++ __write_seqcount_begin(&sl->seqcount); + return flags; + } + +@@ -346,7 +379,7 @@ static inline unsigned long __write_seql + static inline void + write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); + } + +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -393,7 +393,7 @@ static inline void dst_confirm(struct ds + static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, + struct sk_buff *skb) + { +- const struct hh_cache *hh; ++ struct hh_cache *hh; + + if (dst->pending_confirm) { + unsigned long now = jiffies; +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -388,7 +388,7 @@ static inline int neigh_hh_bridge(struct + } + #endif + +-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) ++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) + { + unsigned int seq; + int hh_len; +@@ -443,7 +443,7 @@ struct neighbour_cb { + + #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) + +-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, ++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, + const struct net_device *dev) + { + unsigned int seq; diff --git a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch new file mode 100644 index 000000000..e2028ec15 --- /dev/null +++ b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch @@ -0,0 +1,36 @@ +Subject: signal-fix-up-rcu-wreckage.patch +From: Thomas Gleixner +Date: Fri, 22 Jul 2011 08:07:08 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/signal.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1394,12 +1394,12 @@ struct sighand_struct *__lock_task_sigha + struct sighand_struct *sighand; + + for (;;) { +- local_irq_save(*flags); ++ local_irq_save_nort(*flags); + rcu_read_lock(); + sighand = rcu_dereference(tsk->sighand); + if (unlikely(sighand == NULL)) { + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + break; + } + +@@ -1410,7 +1410,7 @@ struct sighand_struct *__lock_task_sigha + } + spin_unlock(&sighand->siglock); + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + } + + return sighand; diff --git a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch new file mode 100644 index 000000000..987ba4c86 --- /dev/null +++ b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch @@ -0,0 +1,28 @@ +Subject: signal-revert-ptrace-preempt-magic.patch +From: Thomas Gleixner +Date: Wed, 21 Sep 2011 19:57:12 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/signal.c | 8 -------- + 1 file changed, 8 deletions(-) + +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1908,15 +1908,7 @@ static void ptrace_stop(int exit_code, i + if (gstop_done && ptrace_reparented(current)) + do_notify_parent_cldstop(current, false, why); + +- /* +- * Don't want to allow preemption here, because +- * sys_ptrace() needs this task to be inactive. +- * +- * XXX: implement read_unlock_no_resched(). +- */ +- preempt_disable(); + read_unlock(&tasklist_lock); +- preempt_enable_no_resched(); + freezable_schedule(); + } else { + /* diff --git a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch new file mode 100644 index 000000000..6098c4bc4 --- /dev/null +++ b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -0,0 +1,214 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:44:56 -0500 +Subject: signals: Allow rt tasks to cache one sigqueue struct +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +To avoid allocation allow rt tasks to cache one sigqueue struct in +task struct. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/sched.h | 1 + include/linux/signal.h | 1 + kernel/exit.c | 2 - + kernel/fork.c | 1 + kernel/signal.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++--- + 5 files changed, 84 insertions(+), 5 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1340,6 +1340,7 @@ struct task_struct { + /* signal handlers */ + struct signal_struct *signal; + struct sighand_struct *sighand; ++ struct sigqueue *sigqueue_cache; + + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ +--- a/include/linux/signal.h ++++ b/include/linux/signal.h +@@ -226,6 +226,7 @@ static inline void init_sigpending(struc + } + + extern void flush_sigqueue(struct sigpending *queue); ++extern void flush_task_sigqueue(struct task_struct *tsk); + + /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ + static inline int valid_signal(unsigned long sig) +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -146,7 +146,7 @@ static void __exit_signal(struct task_st + * Do this under ->siglock, we can race with another thread + * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. + */ +- flush_sigqueue(&tsk->pending); ++ flush_task_sigqueue(tsk); + tsk->sighand = NULL; + spin_unlock(&sighand->siglock); + +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1236,6 +1236,7 @@ static struct task_struct *copy_process( + spin_lock_init(&p->alloc_lock); + + init_sigpending(&p->pending); ++ p->sigqueue_cache = NULL; + + p->utime = p->stime = p->gtime = 0; + p->utimescaled = p->stimescaled = 0; +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -349,13 +350,45 @@ static bool task_participate_group_stop( + return false; + } + ++#ifdef __HAVE_ARCH_CMPXCHG ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ struct sigqueue *q = t->sigqueue_cache; ++ ++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) ++ return NULL; ++ return q; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) ++ return 0; ++ return 1; ++} ++ ++#else ++ ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ return NULL; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ return 1; ++} ++ ++#endif ++ + /* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appropriate lock must be held to stop the target task from exiting + */ + static struct sigqueue * +-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) ++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit, int fromslab) + { + struct sigqueue *q = NULL; + struct user_struct *user; +@@ -372,7 +405,10 @@ static struct sigqueue * + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { +- q = kmem_cache_alloc(sigqueue_cachep, flags); ++ if (!fromslab) ++ q = get_task_cache(t); ++ if (!q) ++ q = kmem_cache_alloc(sigqueue_cachep, flags); + } else { + print_dropped_signal(sig); + } +@@ -389,6 +425,13 @@ static struct sigqueue * + return q; + } + ++static struct sigqueue * ++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit) ++{ ++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); ++} ++ + static void __sigqueue_free(struct sigqueue *q) + { + if (q->flags & SIGQUEUE_PREALLOC) +@@ -398,6 +441,21 @@ static void __sigqueue_free(struct sigqu + kmem_cache_free(sigqueue_cachep, q); + } + ++static void sigqueue_free_current(struct sigqueue *q) ++{ ++ struct user_struct *up; ++ ++ if (q->flags & SIGQUEUE_PREALLOC) ++ return; ++ ++ up = q->user; ++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { ++ atomic_dec(&up->sigpending); ++ free_uid(up); ++ } else ++ __sigqueue_free(q); ++} ++ + void flush_sigqueue(struct sigpending *queue) + { + struct sigqueue *q; +@@ -411,6 +469,21 @@ void flush_sigqueue(struct sigpending *q + } + + /* ++ * Called from __exit_signal. Flush tsk->pending and ++ * tsk->sigqueue_cache ++ */ ++void flush_task_sigqueue(struct task_struct *tsk) ++{ ++ struct sigqueue *q; ++ ++ flush_sigqueue(&tsk->pending); ++ ++ q = get_task_cache(tsk); ++ if (q) ++ kmem_cache_free(sigqueue_cachep, q); ++} ++ ++/* + * Flush all pending signals for a task. + */ + void __flush_signals(struct task_struct *t) +@@ -562,7 +635,7 @@ static void collect_signal(int sig, stru + still_pending: + list_del_init(&first->list); + copy_siginfo(info, &first->info); +- __sigqueue_free(first); ++ sigqueue_free_current(first); + } else { + /* + * Ok, it wasn't in the queue. This must be +@@ -608,6 +681,8 @@ int dequeue_signal(struct task_struct *t + { + int signr; + ++ WARN_ON_ONCE(tsk != current); ++ + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ +@@ -1547,7 +1622,8 @@ EXPORT_SYMBOL(kill_pid); + */ + struct sigqueue *sigqueue_alloc(void) + { +- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); ++ /* Preallocated sigqueue objects always from the slabcache ! */ ++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); + + if (q) + q->flags |= SIGQUEUE_PREALLOC; diff --git a/debian/patches/features/all/rt/simple-wait-rename-and-export-the-equivalent-of-wait.patch b/debian/patches/features/all/rt/simple-wait-rename-and-export-the-equivalent-of-wait.patch new file mode 100644 index 000000000..4d224ddde --- /dev/null +++ b/debian/patches/features/all/rt/simple-wait-rename-and-export-the-equivalent-of-wait.patch @@ -0,0 +1,64 @@ +From 069b715a6b4f86a4a09a0be1d7156c7b388eaf2d Mon Sep 17 00:00:00 2001 +From: Paul Gortmaker +Date: Tue, 27 Aug 2013 14:20:26 -0400 +Subject: [PATCH] simple-wait: rename and export the equivalent of + waitqueue_active() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The function "swait_head_has_waiters()" was internalized into +wait-simple.c but it parallels the waitqueue_active of normal +waitqueue support. Given that there are over 150 waitqueue_active +users in drivers/ fs/ kernel/ and the like, lets make it globally +visible, and rename it to parallel the waitqueue_active accordingly. +We'll need to do this if we expect to expand its usage beyond RT. + +Signed-off-by: Paul Gortmaker +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/wait-simple.h | 8 ++++++++ + kernel/sched/wait-simple.c | 10 +--------- + 2 files changed, 9 insertions(+), 9 deletions(-) + +--- a/include/linux/wait-simple.h ++++ b/include/linux/wait-simple.h +@@ -47,6 +47,14 @@ extern void swait_prepare(struct swait_h + extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); + extern void swait_finish(struct swait_head *head, struct swaiter *w); + ++/* Check whether a head has waiters enqueued */ ++static inline bool swaitqueue_active(struct swait_head *h) ++{ ++ /* Make sure the condition is visible before checking list_empty() */ ++ smp_mb(); ++ return !list_empty(&h->list); ++} ++ + /* + * Wakeup functions + */ +--- a/kernel/sched/wait-simple.c ++++ b/kernel/sched/wait-simple.c +@@ -26,14 +26,6 @@ static inline void __swait_dequeue(struc + list_del_init(&w->node); + } + +-/* Check whether a head has waiters enqueued */ +-static inline bool swait_head_has_waiters(struct swait_head *h) +-{ +- /* Make sure the condition is visible before checking list_empty() */ +- smp_mb(); +- return !list_empty(&h->list); +-} +- + void __init_swait_head(struct swait_head *head, struct lock_class_key *key) + { + raw_spin_lock_init(&head->lock); +@@ -112,7 +104,7 @@ unsigned int + unsigned long flags; + int woken; + +- if (!swait_head_has_waiters(head)) ++ if (!swaitqueue_active(head)) + return 0; + + raw_spin_lock_irqsave(&head->lock, flags); diff --git a/debian/patches/features/all/rt/skbufhead-raw-lock.patch b/debian/patches/features/all/rt/skbufhead-raw-lock.patch new file mode 100644 index 000000000..04506f7e3 --- /dev/null +++ b/debian/patches/features/all/rt/skbufhead-raw-lock.patch @@ -0,0 +1,128 @@ +From: Thomas Gleixner +Date: Tue, 12 Jul 2011 15:38:34 +0200 +Subject: use skbufhead with raw lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/netdevice.h | 1 + + include/linux/skbuff.h | 7 +++++++ + net/core/dev.c | 26 ++++++++++++++++++++------ + 3 files changed, 28 insertions(+), 6 deletions(-) + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -2024,6 +2024,7 @@ struct softnet_data { + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; ++ struct sk_buff_head tofree_queue; + + #ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit __rcu *flow_limit; +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -152,6 +152,7 @@ struct sk_buff_head { + + __u32 qlen; + spinlock_t lock; ++ raw_spinlock_t raw_lock; + }; + + struct sk_buff; +@@ -1150,6 +1151,12 @@ static inline void skb_queue_head_init(s + __skb_queue_head_init(list); + } + ++static inline void skb_queue_head_init_raw(struct sk_buff_head *list) ++{ ++ raw_spin_lock_init(&list->raw_lock); ++ __skb_queue_head_init(list); ++} ++ + static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) + { +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -199,14 +199,14 @@ static inline struct hlist_head *dev_ind + static inline void rps_lock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_lock(&sd->input_pkt_queue.lock); ++ raw_spin_lock(&sd->input_pkt_queue.raw_lock); + #endif + } + + static inline void rps_unlock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_unlock(&sd->input_pkt_queue.lock); ++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); + #endif + } + +@@ -3747,7 +3747,7 @@ static void flush_backlog(void *arg) + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->input_pkt_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } +@@ -3756,10 +3756,13 @@ static void flush_backlog(void *arg) + skb_queue_walk_safe(&sd->process_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->process_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } ++ ++ if (!skb_queue_empty(&sd->tofree_queue)) ++ raise_softirq_irqoff(NET_RX_SOFTIRQ); + } + + static int napi_gro_complete(struct sk_buff *skb) +@@ -4336,10 +4339,17 @@ static void net_rx_action(struct softirq + struct softnet_data *sd = &__get_cpu_var(softnet_data); + unsigned long time_limit = jiffies + 2; + int budget = netdev_budget; ++ struct sk_buff *skb; + void *have; + + local_irq_disable(); + ++ while ((skb = __skb_dequeue(&sd->tofree_queue))) { ++ local_irq_enable(); ++ kfree_skb(skb); ++ local_irq_disable(); ++ } ++ + while (!list_empty(&sd->poll_list)) { + struct napi_struct *n; + int work, weight; +@@ -6698,6 +6708,9 @@ static int dev_cpu_callback(struct notif + netif_rx_internal(skb); + input_queue_head_incr(oldsd); + } ++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { ++ kfree_skb(skb); ++ } + + return NOTIFY_OK; + } +@@ -7008,8 +7021,9 @@ static int __init net_dev_init(void) + for_each_possible_cpu(i) { + struct softnet_data *sd = &per_cpu(softnet_data, i); + +- skb_queue_head_init(&sd->input_pkt_queue); +- skb_queue_head_init(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->input_pkt_queue); ++ skb_queue_head_init_raw(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->tofree_queue); + INIT_LIST_HEAD(&sd->poll_list); + sd->output_queue_tailp = &sd->output_queue; + #ifdef CONFIG_RPS diff --git a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch new file mode 100644 index 000000000..3fde2fd2a --- /dev/null +++ b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch @@ -0,0 +1,47 @@ +Subject: slub: Enable irqs for __GFP_WAIT +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 12:08:15 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +SYSTEM_RUNNING might be too late for enabling interrupts. Allocations +with GFP_WAIT can happen before that. So use this as an indicator. + +Signed-off-by: Thomas Gleixner +--- + mm/slub.c | 13 +++++-------- + 1 file changed, 5 insertions(+), 8 deletions(-) + +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1337,14 +1337,15 @@ static struct page *allocate_slab(struct + struct page *page; + struct kmem_cache_order_objects oo = s->oo; + gfp_t alloc_gfp; ++ bool enableirqs; + + flags &= gfp_allowed_mask; + ++ enableirqs = (flags & __GFP_WAIT) != 0; + #ifdef CONFIG_PREEMPT_RT_FULL +- if (system_state == SYSTEM_RUNNING) +-#else +- if (flags & __GFP_WAIT) ++ enableirqs |= system_state == SYSTEM_RUNNING; + #endif ++ if (enableirqs) + local_irq_enable(); + + flags |= s->allocflags; +@@ -1384,11 +1385,7 @@ static struct page *allocate_slab(struct + kmemcheck_mark_unallocated_pages(page, pages); + } + +-#ifdef CONFIG_PREEMPT_RT_FULL +- if (system_state == SYSTEM_RUNNING) +-#else +- if (flags & __GFP_WAIT) +-#endif ++ if (enableirqs) + local_irq_disable(); + if (!page) + return NULL; diff --git a/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch b/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch new file mode 100644 index 000000000..9cfeafacf --- /dev/null +++ b/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch @@ -0,0 +1,36 @@ +From: Sebastian Andrzej Siewior +Subject: slub: delay ctor until the object is requested +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +It seems that allocation of plenty objects causes latency on ARM since that +code can not be preempted + +Signed-off-by: Sebastian Andrzej Siewior +--- + mm/slub.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1403,8 +1403,10 @@ static void setup_object(struct kmem_cac + void *object) + { + setup_object_debug(s, page, object); ++#ifndef CONFIG_PREEMPT_RT_FULL + if (unlikely(s->ctor)) + s->ctor(object); ++#endif + } + + static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) +@@ -2519,6 +2521,10 @@ static __always_inline void *slab_alloc_ + + if (unlikely(gfpflags & __GFP_ZERO) && object) + memset(object, 0, s->object_size); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (unlikely(s->ctor) && object) ++ s->ctor(object); ++#endif + + slab_post_alloc_hook(s, gfpflags, object); + diff --git a/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch b/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch new file mode 100644 index 000000000..bfce0e59b --- /dev/null +++ b/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch @@ -0,0 +1,175 @@ +Subject: softirq: Adapt NOHZ softirq pending check to new RT scheme +From: Thomas Gleixner +Date: Sun, 28 Oct 2012 13:46:16 +0000 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +We can't rely on ksoftirqd anymore and we need to check the tasks +which run a particular softirq and if such a task is pi blocked ignore +the other pending bits of that task as well. + +Signed-off-by: Thomas Gleixner +--- + kernel/softirq.c | 83 ++++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 58 insertions(+), 25 deletions(-) + +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -65,46 +65,71 @@ const char * const softirq_to_name[NR_SO + + #ifdef CONFIG_NO_HZ_COMMON + # ifdef CONFIG_PREEMPT_RT_FULL ++ ++struct softirq_runner { ++ struct task_struct *runner[NR_SOFTIRQS]; ++}; ++ ++static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); ++ ++static inline void softirq_set_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners); ++ ++ sr->runner[sirq] = current; ++} ++ ++static inline void softirq_clr_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners); ++ ++ sr->runner[sirq] = NULL; ++} ++ + /* +- * On preempt-rt a softirq might be blocked on a lock. There might be +- * no other runnable task on this CPU because the lock owner runs on +- * some other CPU. So we have to go into idle with the pending bit +- * set. Therefor we need to check this otherwise we warn about false +- * positives which confuses users and defeats the whole purpose of +- * this test. ++ * On preempt-rt a softirq running context might be blocked on a ++ * lock. There might be no other runnable task on this CPU because the ++ * lock owner runs on some other CPU. So we have to go into idle with ++ * the pending bit set. Therefor we need to check this otherwise we ++ * warn about false positives which confuses users and defeats the ++ * whole purpose of this test. + * + * This code is called with interrupts disabled. + */ + void softirq_check_pending_idle(void) + { + static int rate_limit; +- u32 warnpending = 0, pending; ++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners); ++ u32 warnpending; ++ int i; + + if (rate_limit >= 10) + return; + +- pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; +- if (pending) { +- struct task_struct *tsk; ++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; ++ for (i = 0; i < NR_SOFTIRQS; i++) { ++ struct task_struct *tsk = sr->runner[i]; + +- tsk = __get_cpu_var(ksoftirqd); + /* + * The wakeup code in rtmutex.c wakes up the task + * _before_ it sets pi_blocked_on to NULL under + * tsk->pi_lock. So we need to check for both: state + * and pi_blocked_on. + */ +- raw_spin_lock(&tsk->pi_lock); +- +- if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) +- warnpending = 1; +- +- raw_spin_unlock(&tsk->pi_lock); ++ if (tsk) { ++ raw_spin_lock(&tsk->pi_lock); ++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { ++ /* Clear all bits pending in that task */ ++ warnpending &= ~(tsk->softirqs_raised); ++ warnpending &= ~(1 << i); ++ } ++ raw_spin_unlock(&tsk->pi_lock); ++ } + } + + if (warnpending) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", +- pending); ++ warnpending); + rate_limit++; + } + } +@@ -124,6 +149,10 @@ void softirq_check_pending_idle(void) + } + } + # endif ++ ++#else /* !CONFIG_NO_HZ_COMMON */ ++static inline void softirq_set_runner(unsigned int sirq) { } ++static inline void softirq_clr_runner(unsigned int sirq) { } + #endif + + /* +@@ -505,6 +534,7 @@ static void do_current_softirqs(int need + */ + lock_softirq(i); + local_irq_disable(); ++ softirq_set_runner(i); + /* + * Check with the local_softirq_pending() bits, + * whether we need to process this still or if someone +@@ -515,6 +545,7 @@ static void do_current_softirqs(int need + set_softirq_pending(pending & ~mask); + do_single_softirq(i, need_rcu_bh_qs); + } ++ softirq_clr_runner(i); + unlock_softirq(i); + WARN_ON(current->softirq_nestcnt != 1); + } +@@ -610,7 +641,7 @@ void thread_do_softirq(void) + } + } + +-void __raise_softirq_irqoff(unsigned int nr) ++static void do_raise_softirq_irqoff(unsigned int nr) + { + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +@@ -627,12 +658,19 @@ void __raise_softirq_irqoff(unsigned int + __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); + } + ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ if (!in_irq() && !current->softirq_nestcnt) ++ wakeup_softirqd(); ++} ++ + /* + * This function must run with irqs disabled! + */ + void raise_softirq_irqoff(unsigned int nr) + { +- __raise_softirq_irqoff(nr); ++ do_raise_softirq_irqoff(nr); + + /* + * If we're in an hard interrupt we let irq return code deal +@@ -654,11 +692,6 @@ void raise_softirq_irqoff(unsigned int n + wakeup_softirqd(); + } + +-void do_raise_softirq_irqoff(unsigned int nr) +-{ +- raise_softirq_irqoff(nr); +-} +- + static inline int ksoftirqd_softirq_pending(void) + { + return current->softirqs_raised; diff --git a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch new file mode 100644 index 000000000..9927b6fe3 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch @@ -0,0 +1,170 @@ +Subject: softirq-disable-softirq-stacks-for-rt.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 13:59:17 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/powerpc/kernel/irq.c | 2 ++ + arch/powerpc/kernel/misc_32.S | 2 ++ + arch/powerpc/kernel/misc_64.S | 2 ++ + arch/sh/kernel/irq.c | 2 ++ + arch/sparc/kernel/irq_64.c | 2 ++ + arch/x86/kernel/entry_64.S | 2 ++ + arch/x86/kernel/irq_32.c | 2 ++ + include/linux/interrupt.h | 8 ++++---- + 8 files changed, 18 insertions(+), 4 deletions(-) + +--- a/arch/powerpc/kernel/irq.c ++++ b/arch/powerpc/kernel/irq.c +@@ -605,6 +605,7 @@ void irq_ctx_init(void) + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curtp, *irqtp; +@@ -622,6 +623,7 @@ void do_softirq_own_stack(void) + if (irqtp->flags) + set_bits(irqtp->flags, &curtp->flags); + } ++#endif + + irq_hw_number_t virq_to_hw(unsigned int virq) + { +--- a/arch/powerpc/kernel/misc_32.S ++++ b/arch/powerpc/kernel/misc_32.S +@@ -40,6 +40,7 @@ + * We store the saved ksp_limit in the unused part + * of the STACK_FRAME_OVERHEAD + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + stw r0,4(r1) +@@ -56,6 +57,7 @@ + stw r10,THREAD+KSP_LIMIT(r2) + mtlr r0 + blr ++#endif + + /* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); +--- a/arch/powerpc/kernel/misc_64.S ++++ b/arch/powerpc/kernel/misc_64.S +@@ -29,6 +29,7 @@ + + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + std r0,16(r1) +@@ -39,6 +40,7 @@ + ld r0,16(r1) + mtlr r0 + blr ++#endif + + _GLOBAL(call_do_irq) + mflr r0 +--- a/arch/sh/kernel/irq.c ++++ b/arch/sh/kernel/irq.c +@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu) + hardirq_ctx[cpu] = NULL; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curctx; +@@ -176,6 +177,7 @@ void do_softirq_own_stack(void) + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); + } ++#endif + #else + static inline void handle_one_irq(unsigned int irq) + { +--- a/arch/sparc/kernel/irq_64.c ++++ b/arch/sparc/kernel/irq_64.c +@@ -698,6 +698,7 @@ void __irq_entry handler_irq(int pil, st + set_irq_regs(old_regs); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; +@@ -712,6 +713,7 @@ void do_softirq_own_stack(void) + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); + } ++#endif + + #ifdef CONFIG_HOTPLUG_CPU + void fixup_irqs(void) +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -1350,6 +1350,7 @@ END(native_load_gs_index) + jmp 2b + .previous + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(do_softirq_own_stack) + CFI_STARTPROC +@@ -1369,6 +1370,7 @@ ENTRY(do_softirq_own_stack) + ret + CFI_ENDPROC + END(do_softirq_own_stack) ++#endif + + #ifdef CONFIG_XEN + zeroentry xen_hypervisor_callback xen_do_hypervisor_callback +--- a/arch/x86/kernel/irq_32.c ++++ b/arch/x86/kernel/irq_32.c +@@ -145,6 +145,7 @@ void irq_ctx_init(int cpu) + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curctx; +@@ -161,6 +162,7 @@ void do_softirq_own_stack(void) + + call_on_stack(__do_softirq, isp); + } ++#endif + + bool handle_irq(unsigned irq, struct pt_regs *regs) + { +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -381,13 +381,10 @@ struct softirq_action + void (*action)(struct softirq_action *); + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +-#ifndef CONFIG_PREEMPT_RT_FULL + static inline void thread_do_softirq(void) { do_softirq(); } +-#else +-extern void thread_do_softirq(void); +-#endif + #ifdef __ARCH_HAS_DO_SOFTIRQ + void do_softirq_own_stack(void); + #else +@@ -396,6 +393,9 @@ static inline void do_softirq_own_stack( + __do_softirq(); + } + #endif ++#else ++extern void thread_do_softirq(void); ++#endif + + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); + extern void softirq_init(void); diff --git a/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch b/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch new file mode 100644 index 000000000..a06156243 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch @@ -0,0 +1,134 @@ +Subject: softirq: Init softirq local lock after per cpu section is set up +From: Steven Rostedt +Date: Thu, 04 Oct 2012 11:02:04 -0400 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +I discovered this bug when booting 3.4-rt on my powerpc box. It crashed +with the following report: + +------------[ cut here ]------------ +kernel BUG at /work/rt/stable-rt.git/kernel/rtmutex_common.h:75! +Oops: Exception in kernel mode, sig: 5 [#1] +PREEMPT SMP NR_CPUS=64 NUMA PA Semi PWRficient +Modules linked in: +NIP: c0000000004aa03c LR: c0000000004aa01c CTR: c00000000009b2ac +REGS: c00000003e8d7950 TRAP: 0700 Not tainted (3.4.11-test-rt19) +MSR: 9000000000029032 CR: 24000082 XER: 20000000 +SOFTE: 0 +TASK = c00000003e8fdcd0[11] 'ksoftirqd/1' THREAD: c00000003e8d4000 CPU: 1 +GPR00: 0000000000000001 c00000003e8d7bd0 c000000000d6cbb0 0000000000000000 +GPR04: c00000003e8fdcd0 0000000000000000 0000000024004082 c000000000011454 +GPR08: 0000000000000000 0000000080000001 c00000003e8fdcd1 0000000000000000 +GPR12: 0000000024000084 c00000000fff0280 ffffffffffffffff 000000003ffffad8 +GPR16: ffffffffffffffff 000000000072c798 0000000000000060 0000000000000000 +GPR20: 0000000000642741 000000000072c858 000000003ffffaf0 0000000000000417 +GPR24: 000000000072dcd0 c00000003e7ff990 0000000000000000 0000000000000001 +GPR28: 0000000000000000 c000000000792340 c000000000ccec78 c000000001182338 +NIP [c0000000004aa03c] .wakeup_next_waiter+0x44/0xb8 +LR [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8 +Call Trace: +[c00000003e8d7bd0] [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8 (unreliable) +[c00000003e8d7c60] [c0000000004a0320] .rt_spin_lock_slowunlock+0x8c/0xe4 +[c00000003e8d7ce0] [c0000000004a07cc] .rt_spin_unlock+0x54/0x64 +[c00000003e8d7d60] [c0000000000636bc] .__thread_do_softirq+0x130/0x174 +[c00000003e8d7df0] [c00000000006379c] .run_ksoftirqd+0x9c/0x1a4 +[c00000003e8d7ea0] [c000000000080b68] .kthread+0xa8/0xb4 +[c00000003e8d7f90] [c00000000001c2f8] .kernel_thread+0x54/0x70 +Instruction dump: +60000000 e86d01c8 38630730 4bff7061 60000000 ebbf0008 7c7c1b78 e81d0040 +7fe00278 7c000074 7800d182 68000001 <0b000000> e88d01c8 387d0010 38840738 + +The rtmutex_common.h:75 is: + +rt_mutex_top_waiter(struct rt_mutex *lock) +{ + struct rt_mutex_waiter *w; + + w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter, + list_entry); + BUG_ON(w->lock != lock); + + return w; +} + +Where the waiter->lock is corrupted. I saw various other random bugs +that all had to with the softirq lock and plist. As plist needs to be +initialized before it is used I investigated how this lock is +initialized. It's initialized with: + +void __init softirq_early_init(void) +{ + local_irq_lock_init(local_softirq_lock); +} + +Where: + +#define local_irq_lock_init(lvar) \ + do { \ + int __cpu; \ + for_each_possible_cpu(__cpu) \ + spin_lock_init(&per_cpu(lvar, __cpu).lock); \ + } while (0) + +As the softirq lock is a local_irq_lock, which is a per_cpu lock, the +initialization is done to all per_cpu versions of the lock. But lets +look at where the softirq_early_init() is called from. + +In init/main.c: start_kernel() + +/* + * Interrupts are still disabled. Do necessary setups, then + * enable them + */ + softirq_early_init(); + tick_init(); + boot_cpu_init(); + page_address_init(); + printk(KERN_NOTICE "%s", linux_banner); + setup_arch(&command_line); + mm_init_owner(&init_mm, &init_task); + mm_init_cpumask(&init_mm); + setup_command_line(command_line); + setup_nr_cpu_ids(); + setup_per_cpu_areas(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + +One of the first things that is called is the initialization of the +softirq lock. But if you look further down, we see the per_cpu areas +have not been set up yet. Thus initializing a local_irq_lock() before +the per_cpu section is set up, may not work as it is initializing the +per cpu locks before the per cpu exists. + +By moving the softirq_early_init() right after setup_per_cpu_areas(), +the kernel boots fine. + +Signed-off-by: Steven Rostedt +Cc: Clark Williams +Cc: John Kacur +Cc: Carsten Emde +Cc: vomlehn@texas.net +Link: http://lkml.kernel.org/r/1349362924.6755.18.camel@gandalf.local.home +Signed-off-by: Thomas Gleixner + +--- + init/main.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/init/main.c ++++ b/init/main.c +@@ -504,7 +504,6 @@ asmlinkage void __init start_kernel(void + * Interrupts are still disabled. Do necessary setups, then + * enable them + */ +- softirq_early_init(); + boot_cpu_init(); + page_address_init(); + pr_notice("%s", linux_banner); +@@ -514,6 +513,7 @@ asmlinkage void __init start_kernel(void + setup_command_line(command_line); + setup_nr_cpu_ids(); + setup_per_cpu_areas(); ++ softirq_early_init(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + + build_all_zonelists(NULL, NULL); diff --git a/debian/patches/features/all/rt/softirq-local-lock.patch b/debian/patches/features/all/rt/softirq-local-lock.patch new file mode 100644 index 000000000..7035a9cce --- /dev/null +++ b/debian/patches/features/all/rt/softirq-local-lock.patch @@ -0,0 +1,359 @@ +Subject: softirq-local-lock.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 15:57:18 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/bottom_half.h | 12 ++ + include/linux/interrupt.h | 10 ++ + include/linux/preempt_mask.h | 15 ++- + include/linux/sched.h | 1 + init/main.c | 1 + kernel/softirq.c | 186 ++++++++++++++++++++++++++++++++++++++++++- + 6 files changed, 220 insertions(+), 5 deletions(-) + +--- a/include/linux/bottom_half.h ++++ b/include/linux/bottom_half.h +@@ -4,6 +4,17 @@ + #include + #include + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++extern void local_bh_disable(void); ++extern void _local_bh_enable(void); ++extern void local_bh_enable(void); ++extern void local_bh_enable_ip(unsigned long ip); ++extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); ++extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); ++ ++#else ++ + #ifdef CONFIG_TRACE_IRQFLAGS + extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); + #else +@@ -31,5 +42,6 @@ static inline void local_bh_enable(void) + { + __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); + } ++#endif + + #endif /* _LINUX_BH_H */ +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -383,7 +383,11 @@ struct softirq_action + + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline void thread_do_softirq(void) { do_softirq(); } ++#else ++extern void thread_do_softirq(void); ++#endif + #ifdef __ARCH_HAS_DO_SOFTIRQ + void do_softirq_own_stack(void); + #else +@@ -558,6 +562,12 @@ void tasklet_hrtimer_cancel(struct taskl + tasklet_kill(&ttimer->tasklet); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void softirq_early_init(void); ++#else ++static inline void softirq_early_init(void) { } ++#endif ++ + /* + * Autoprobing for irqs: + * +--- a/include/linux/preempt_mask.h ++++ b/include/linux/preempt_mask.h +@@ -44,16 +44,26 @@ + #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + #define NMI_OFFSET (1UL << NMI_SHIFT) + +-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#else ++# define SOFTIRQ_DISABLE_OFFSET (0) ++#endif + + #define PREEMPT_ACTIVE_BITS 1 + #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) + #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) + + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) +-#define softirq_count() (preempt_count() & SOFTIRQ_MASK) + #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define softirq_count() (preempt_count() & SOFTIRQ_MASK) ++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) ++#else ++# define softirq_count() (0UL) ++extern int in_serving_softirq(void); ++#endif + + /* + * Are we doing bottom half or hardware interrupt processing? +@@ -64,7 +74,6 @@ + #define in_irq() (hardirq_count()) + #define in_softirq() (softirq_count()) + #define in_interrupt() (irq_count()) +-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + + /* + * Are we in NMI context? +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1609,6 +1609,7 @@ struct task_struct { + #endif + #ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head put_rcu; ++ int softirq_nestcnt; + #endif + }; + +--- a/init/main.c ++++ b/init/main.c +@@ -504,6 +504,7 @@ asmlinkage void __init start_kernel(void + * Interrupts are still disabled. Do necessary setups, then + * enable them + */ ++ softirq_early_init(); + boot_cpu_init(); + page_address_init(); + pr_notice("%s", linux_banner); +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -173,6 +174,7 @@ static void handle_pending_softirqs(u32 + local_irq_disable(); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -382,6 +384,182 @@ asmlinkage void do_softirq(void) + local_irq_restore(flags); + } + ++static inline void local_bh_disable_nort(void) { local_bh_disable(); } ++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } ++ ++#else /* !PREEMPT_RT_FULL */ ++ ++/* ++ * On RT we serialize softirq execution with a cpu local lock ++ */ ++static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); ++static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); ++ ++asmlinkage void __do_softirq(void); ++ ++void __init softirq_early_init(void) ++{ ++ local_irq_lock_init(local_softirq_lock); ++} ++ ++static void __local_bh_disable(void) ++{ ++ migrate_disable(); ++ current->softirq_nestcnt++; ++} ++ ++void local_bh_disable(void) ++{ ++ __local_bh_disable(); ++} ++EXPORT_SYMBOL(local_bh_disable); ++ ++void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) ++{ ++ __local_bh_disable(); ++ if (cnt & PREEMPT_CHECK_OFFSET) ++ preempt_disable(); ++} ++ ++static void __local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ ++ if ((current->softirq_nestcnt == 1) && ++ local_softirq_pending() && ++ local_trylock(local_softirq_lock)) { ++ ++ local_irq_disable(); ++ if (local_softirq_pending()) ++ __do_softirq(); ++ local_irq_enable(); ++ local_unlock(local_softirq_lock); ++ WARN_ON(current->softirq_nestcnt != 1); ++ } ++ current->softirq_nestcnt--; ++ migrate_enable(); ++} ++ ++void local_bh_enable(void) ++{ ++ __local_bh_enable(); ++} ++EXPORT_SYMBOL(local_bh_enable); ++ ++extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) ++{ ++ __local_bh_enable(); ++ if (cnt & PREEMPT_CHECK_OFFSET) ++ preempt_enable(); ++} ++ ++void local_bh_enable_ip(unsigned long ip) ++{ ++ local_bh_enable(); ++} ++EXPORT_SYMBOL(local_bh_enable_ip); ++ ++/* For tracing */ ++int notrace __in_softirq(void) ++{ ++ if (__get_cpu_var(local_softirq_lock).owner == current) ++ return __get_cpu_var(local_softirq_lock).nestcnt; ++ return 0; ++} ++ ++int in_serving_softirq(void) ++{ ++ int res; ++ ++ preempt_disable(); ++ res = __get_cpu_var(local_softirq_runner) == current; ++ preempt_enable(); ++ return res; ++} ++EXPORT_SYMBOL(in_serving_softirq); ++ ++/* ++ * Called with bh and local interrupts disabled. For full RT cpu must ++ * be pinned. ++ */ ++asmlinkage void __do_softirq(void) ++{ ++ u32 pending = local_softirq_pending(); ++ int cpu = smp_processor_id(); ++ ++ current->softirq_nestcnt++; ++ ++ /* Reset the pending bitmask before enabling irqs */ ++ set_softirq_pending(0); ++ ++ __get_cpu_var(local_softirq_runner) = current; ++ ++ lockdep_softirq_enter(); ++ ++ handle_pending_softirqs(pending, cpu); ++ ++ pending = local_softirq_pending(); ++ if (pending) ++ wakeup_softirqd(); ++ ++ lockdep_softirq_exit(); ++ __get_cpu_var(local_softirq_runner) = NULL; ++ ++ current->softirq_nestcnt--; ++} ++ ++static int __thread_do_softirq(int cpu) ++{ ++ /* ++ * Prevent the current cpu from going offline. ++ * pin_current_cpu() can reenable preemption and block on the ++ * hotplug mutex. When it returns, the current cpu is ++ * pinned. It might be the wrong one, but the offline check ++ * below catches that. ++ */ ++ pin_current_cpu(); ++ /* ++ * If called from ksoftirqd (cpu >= 0) we need to check ++ * whether we are on the wrong cpu due to cpu offlining. If ++ * called via thread_do_softirq() no action required. ++ */ ++ if (cpu >= 0 && cpu_is_offline(cpu)) { ++ unpin_current_cpu(); ++ return -1; ++ } ++ preempt_enable(); ++ local_lock(local_softirq_lock); ++ local_irq_disable(); ++ /* ++ * We cannot switch stacks on RT as we want to be able to ++ * schedule! ++ */ ++ if (local_softirq_pending()) ++ __do_softirq(); ++ local_unlock(local_softirq_lock); ++ unpin_current_cpu(); ++ preempt_disable(); ++ local_irq_enable(); ++ return 0; ++} ++ ++/* ++ * Called from netif_rx_ni(). Preemption enabled. ++ */ ++void thread_do_softirq(void) ++{ ++ if (!in_serving_softirq()) { ++ preempt_disable(); ++ __thread_do_softirq(-1); ++ preempt_enable(); ++ } ++} ++ ++static inline void local_bh_disable_nort(void) { } ++static inline void _local_bh_enable_nort(void) { } ++ ++#endif /* PREEMPT_RT_FULL */ + /* + * Enter an interrupt context. + */ +@@ -393,9 +571,9 @@ void irq_enter(void) + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ +- local_bh_disable(); ++ local_bh_disable_nort(); + tick_irq_enter(); +- _local_bh_enable(); ++ _local_bh_enable_nort(); + } + + __irq_enter(); +@@ -403,6 +581,7 @@ void irq_enter(void) + + static inline void invoke_softirq(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!force_irqthreads) { + #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK + /* +@@ -422,6 +601,9 @@ static inline void invoke_softirq(void) + } else { + wakeup_softirqd(); + } ++#else ++ wakeup_softirqd(); ++#endif + } + + static inline void tick_irq_exit(void) diff --git a/debian/patches/features/all/rt/softirq-make-fifo.patch b/debian/patches/features/all/rt/softirq-make-fifo.patch new file mode 100644 index 000000000..e377f43f5 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-make-fifo.patch @@ -0,0 +1,51 @@ +Subject: softirq-make-fifo.patch +From: Thomas Gleixner +Date: Thu, 21 Jul 2011 21:06:43 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/softirq.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -386,6 +386,8 @@ asmlinkage void do_softirq(void) + + static inline void local_bh_disable_nort(void) { local_bh_disable(); } + static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } ++static void ksoftirqd_set_sched_params(unsigned int cpu) { } ++static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } + + #else /* !PREEMPT_RT_FULL */ + +@@ -559,6 +561,20 @@ void thread_do_softirq(void) + static inline void local_bh_disable_nort(void) { } + static inline void _local_bh_enable_nort(void) { } + ++static inline void ksoftirqd_set_sched_params(unsigned int cpu) ++{ ++ struct sched_param param = { .sched_priority = 1 }; ++ ++ sched_setscheduler(current, SCHED_FIFO, ¶m); ++} ++ ++static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) ++{ ++ struct sched_param param = { .sched_priority = 0 }; ++ ++ sched_setscheduler(current, SCHED_NORMAL, ¶m); ++} ++ + #endif /* PREEMPT_RT_FULL */ + /* + * Enter an interrupt context. +@@ -992,6 +1008,8 @@ static struct notifier_block cpu_nfb = { + + static struct smp_hotplug_thread softirq_threads = { + .store = &ksoftirqd, ++ .setup = ksoftirqd_set_sched_params, ++ .cleanup = ksoftirqd_clr_sched_params, + .thread_should_run = ksoftirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ksoftirqd/%u", diff --git a/debian/patches/features/all/rt/softirq-make-migrate-disable-enable-conditioned-on-softirq_n.patch b/debian/patches/features/all/rt/softirq-make-migrate-disable-enable-conditioned-on-softirq_n.patch new file mode 100644 index 000000000..591035584 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-make-migrate-disable-enable-conditioned-on-softirq_n.patch @@ -0,0 +1,67 @@ +From 6661fe8f9906aa0eded36e7c9e74d39e0383f6b4 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Fri, 6 Dec 2013 00:42:22 +0100 +Subject: [PATCH] softirq: make migrate disable/enable conditioned on softirq_nestcnt + transition +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +This patch removes the recursive calls to migrate_disable/enable in +local_bh_disable/enable + +the softirq-local-lock.patch introduces local_bh_disable/enable wich +decrements/increments the current->softirq_nestcnt and disable/enables +migration as well. as softirq_nestcnt (include/linux/sched.h conditioned +on CONFIG_PREEMPT_RT_BASE) already is tracking the nesting level of the +recursive calls to local_bh_disable/enable (all in kernel/softirq.c) - no +need to do it twice. + +migrate_disable/enable thus can be conditionsed on softirq_nestcnt making +a transition from 0-1 to disable migration and 1-0 to re-enable it. + +No change of functional behavior, this does noticably reduce the observed +nesting level of migrate_disable/enable + +Signed-off-by: Nicholas Mc Guire +Reviewed-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/softirq.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -553,8 +553,8 @@ static void do_current_softirqs(int need + + static void __local_bh_disable(void) + { +- migrate_disable(); +- current->softirq_nestcnt++; ++ if (++current->softirq_nestcnt == 1) ++ migrate_disable(); + } + + void local_bh_disable(void) +@@ -580,8 +580,8 @@ static void __local_bh_enable(void) + do_current_softirqs(1); + local_irq_enable(); + +- current->softirq_nestcnt--; +- migrate_enable(); ++ if (--current->softirq_nestcnt == 0) ++ migrate_enable(); + } + + void local_bh_enable(void) +@@ -605,8 +605,10 @@ EXPORT_SYMBOL(local_bh_enable_ip); + + void _local_bh_enable(void) + { +- current->softirq_nestcnt--; +- migrate_enable(); ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ if (--current->softirq_nestcnt == 0) ++ migrate_enable(); + } + EXPORT_SYMBOL(_local_bh_enable); + diff --git a/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch b/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch new file mode 100644 index 000000000..2e9cf2964 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch @@ -0,0 +1,75 @@ +Subject: softirq: Make serving softirqs a task flag +From: Thomas Gleixner +Date: Thu, 04 Oct 2012 14:30:25 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Avoid the percpu softirq_runner pointer magic by using a task flag. + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 1 + + kernel/softirq.c | 20 +++----------------- + 2 files changed, 4 insertions(+), 17 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1850,6 +1850,7 @@ extern void thread_group_cputime_adjuste + /* + * Per process flags + */ ++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ + #define PF_EXITING 0x00000004 /* getting shut down */ + #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -397,7 +397,6 @@ static void ksoftirqd_clr_sched_params(u + * On RT we serialize softirq execution with a cpu local lock + */ + static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); +-static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); + + static void __do_softirq_common(int need_rcu_bh_qs); + +@@ -476,22 +475,9 @@ void _local_bh_enable(void) + } + EXPORT_SYMBOL(_local_bh_enable); + +-/* For tracing */ +-int notrace __in_softirq(void) +-{ +- if (__get_cpu_var(local_softirq_lock).owner == current) +- return __get_cpu_var(local_softirq_lock).nestcnt; +- return 0; +-} +- + int in_serving_softirq(void) + { +- int res; +- +- preempt_disable(); +- res = __get_cpu_var(local_softirq_runner) == current; +- preempt_enable(); +- return res; ++ return current->flags & PF_IN_SOFTIRQ; + } + EXPORT_SYMBOL(in_serving_softirq); + +@@ -509,7 +495,7 @@ asmlinkage void __do_softirq(void) + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- __get_cpu_var(local_softirq_runner) = current; ++ current->flags |= PF_IN_SOFTIRQ; + + lockdep_softirq_enter(); + +@@ -520,7 +506,7 @@ asmlinkage void __do_softirq(void) + wakeup_softirqd(); + + lockdep_softirq_exit(); +- __get_cpu_var(local_softirq_runner) = NULL; ++ current->flags &= ~PF_IN_SOFTIRQ; + + current->softirq_nestcnt--; + } diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch new file mode 100644 index 000000000..99b1d3344 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch @@ -0,0 +1,146 @@ +Subject: softirq: Check preemption after reenabling interrupts +From: Thomas Gleixner +Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET) +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +raise_softirq_irqoff() disables interrupts and wakes the softirq +daemon, but after reenabling interrupts there is no preemption check, +so the execution of the softirq thread might be delayed arbitrarily. + +In principle we could add that check to local_irq_enable/restore, but +that's overkill as the rasie_softirq_irqoff() sections are the only +ones which show this behaviour. + +Reported-by: Carsten Emde +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + block/blk-iopoll.c | 3 +++ + block/blk-softirq.c | 3 +++ + include/linux/preempt.h | 3 +++ + net/core/dev.c | 6 ++++++ + 4 files changed, 15 insertions(+) + +--- a/block/blk-iopoll.c ++++ b/block/blk-iopoll.c +@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll + list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(blk_iopoll_sched); + +@@ -135,6 +136,7 @@ static void blk_iopoll_softirq(struct so + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + /** +@@ -204,6 +206,7 @@ static int blk_iopoll_cpu_notify(struct + this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +--- a/block/blk-softirq.c ++++ b/block/blk-softirq.c +@@ -51,6 +51,7 @@ static void trigger_softirq(void *data) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /* +@@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifie + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +@@ -150,6 +152,7 @@ void __blk_complete_request(struct reque + goto do_local; + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /** +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -49,8 +49,10 @@ do { \ + + #ifdef CONFIG_PREEMPT_RT_BASE + # define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() preempt_check_resched() + #else + # define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() barrier(); + #endif + + #ifdef CONFIG_PREEMPT +@@ -125,6 +127,7 @@ do { \ + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() + #define preempt_enable_notrace() barrier() ++#define preempt_check_resched_rt() barrier() + + #endif /* CONFIG_PREEMPT_COUNT */ + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2146,6 +2146,7 @@ static inline void __netif_reschedule(st + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -2180,6 +2181,7 @@ void __dev_kfree_skb_irq(struct sk_buff + __this_cpu_write(softnet_data.completion_queue, skb); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__dev_kfree_skb_irq); + +@@ -3232,6 +3234,7 @@ static int enqueue_to_backlog(struct sk_ + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -4179,6 +4182,7 @@ static void net_rps_action_and_irq_enabl + } else + #endif + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + static int process_backlog(struct napi_struct *napi, int quota) +@@ -4251,6 +4255,7 @@ void __napi_schedule(struct napi_struct + local_irq_save(flags); + ____napi_schedule(&__get_cpu_var(softnet_data), n); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__napi_schedule); + +@@ -6732,6 +6737,7 @@ static int dev_cpu_callback(struct notif + + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Process offline CPU's input_pkt_queue */ + while ((skb = __skb_dequeue(&oldsd->process_queue))) { diff --git a/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch b/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch new file mode 100644 index 000000000..5ec433743 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch @@ -0,0 +1,113 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 13:16:38 -0500 +Subject: softirq: Sanitize softirq pending for NOHZ/RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner + +--- + include/linux/interrupt.h | 1 + kernel/softirq.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++ + kernel/time/tick-sched.c | 9 ------ + 3 files changed, 65 insertions(+), 8 deletions(-) + +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -399,6 +399,7 @@ extern void __raise_softirq_irqoff(unsig + + extern void raise_softirq_irqoff(unsigned int nr); + extern void raise_softirq(unsigned int nr); ++extern void softirq_check_pending_idle(void); + + DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -61,6 +61,69 @@ const char * const softirq_to_name[NR_SO + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; + ++#ifdef CONFIG_NO_HZ_COMMON ++# ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * On preempt-rt a softirq might be blocked on a lock. There might be ++ * no other runnable task on this CPU because the lock owner runs on ++ * some other CPU. So we have to go into idle with the pending bit ++ * set. Therefor we need to check this otherwise we warn about false ++ * positives which confuses users and defeats the whole purpose of ++ * this test. ++ * ++ * This code is called with interrupts disabled. ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ u32 warnpending = 0, pending; ++ ++ if (rate_limit >= 10) ++ return; ++ ++ pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; ++ if (pending) { ++ struct task_struct *tsk; ++ ++ tsk = __get_cpu_var(ksoftirqd); ++ /* ++ * The wakeup code in rtmutex.c wakes up the task ++ * _before_ it sets pi_blocked_on to NULL under ++ * tsk->pi_lock. So we need to check for both: state ++ * and pi_blocked_on. ++ */ ++ raw_spin_lock(&tsk->pi_lock); ++ ++ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) ++ warnpending = 1; ++ ++ raw_spin_unlock(&tsk->pi_lock); ++ } ++ ++ if (warnpending) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ pending); ++ rate_limit++; ++ } ++} ++# else ++/* ++ * On !PREEMPT_RT we just printk rate limited: ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ ++ if (rate_limit < 10 && ++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ local_softirq_pending()); ++ rate_limit++; ++ } ++} ++# endif ++#endif ++ + /* + * we cannot loop indefinitely here to avoid userspace starvation, + * but we also don't want to introduce a worst case 1/HZ latency +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -722,14 +722,7 @@ static bool can_stop_idle_tick(int cpu, + return false; + + if (unlikely(local_softirq_pending() && cpu_online(cpu))) { +- static int ratelimit; +- +- if (ratelimit < 10 && +- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { +- pr_warn("NOHZ: local_softirq_pending %02x\n", +- (unsigned int) local_softirq_pending()); +- ratelimit++; +- } ++ softirq_check_pending_idle(); + return false; + } + diff --git a/debian/patches/features/all/rt/softirq-split-handling-function.patch b/debian/patches/features/all/rt/softirq-split-handling-function.patch new file mode 100644 index 000000000..6eaaff46e --- /dev/null +++ b/debian/patches/features/all/rt/softirq-split-handling-function.patch @@ -0,0 +1,66 @@ +Subject: softirq: Split handling function +From: Thomas Gleixner +Date: Thu, 04 Oct 2012 15:33:53 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Split out the inner handling function, so RT can reuse it. + +Signed-off-by: Thomas Gleixner +--- + kernel/softirq.c | 34 +++++++++++++++++++++------------- + 1 file changed, 21 insertions(+), 13 deletions(-) + +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -141,10 +141,29 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + ++static void handle_softirq(unsigned int vec_nr, int cpu, int need_rcu_bh_qs) ++{ ++ struct softirq_action *h = softirq_vec + vec_nr; ++ unsigned int prev_count = preempt_count(); ++ ++ kstat_incr_softirqs_this_cpu(vec_nr); ++ ++ trace_softirq_entry(vec_nr); ++ h->action(h); ++ trace_softirq_exit(vec_nr); ++ if (unlikely(prev_count != preempt_count())) { ++ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", ++ vec_nr, softirq_to_name[vec_nr], h->action, ++ prev_count, preempt_count()); ++ preempt_count_set(prev_count); ++ } ++ if (need_rcu_bh_qs) ++ rcu_bh_qs(cpu); ++} ++ + static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) + { + struct softirq_action *h = softirq_vec; +- unsigned int prev_count = preempt_count(); + int softirq_bit; + + local_irq_enable(); +@@ -156,19 +175,8 @@ static void handle_pending_softirqs(u32 + h += softirq_bit - 1; + vec_nr = h - softirq_vec; + +- kstat_incr_softirqs_this_cpu(vec_nr); ++ handle_softirq(vec_nr, cpu, need_rcu_bh_qs); + +- trace_softirq_entry(vec_nr); +- h->action(h); +- trace_softirq_exit(vec_nr); +- if (unlikely(prev_count != preempt_count())) { +- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", +- vec_nr, softirq_to_name[vec_nr], h->action, +- prev_count, preempt_count()); +- preempt_count_set(prev_count); +- } +- if (need_rcu_bh_qs) +- rcu_bh_qs(cpu); + h++; + pending >>= softirq_bit; + } diff --git a/debian/patches/features/all/rt/softirq-split-locks.patch b/debian/patches/features/all/rt/softirq-split-locks.patch new file mode 100644 index 000000000..2b085690c --- /dev/null +++ b/debian/patches/features/all/rt/softirq-split-locks.patch @@ -0,0 +1,455 @@ +Subject: softirq: Split softirq locks +From: Thomas Gleixner +Date: Thu, 04 Oct 2012 14:20:47 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The 3.x RT series removed the split softirq implementation in favour +of pushing softirq processing into the context of the thread which +raised it. Though this prevents us from handling the various softirqs +at different priorities. Now instead of reintroducing the split +softirq threads we split the locks which serialize the softirq +processing. + +If a softirq is raised in context of a thread, then the softirq is +noted on a per thread field, if the thread is in a bh disabled +region. If the softirq is raised from hard interrupt context, then the +bit is set in the flag field of ksoftirqd and ksoftirqd is invoked. +When a thread leaves a bh disabled region, then it tries to execute +the softirqs which have been raised in its own context. It acquires +the per softirq / per cpu lock for the softirq and then checks, +whether the softirq is still pending in the per cpu +local_softirq_pending() field. If yes, it runs the softirq. If no, +then some other task executed it already. This allows for zero config +softirq elevation in the context of user space tasks or interrupt +threads. + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 1 + kernel/softirq.c | 311 ++++++++++++++++++++++++++++++-------------------- + 2 files changed, 189 insertions(+), 123 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1611,6 +1611,7 @@ struct task_struct { + #ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head put_rcu; + int softirq_nestcnt; ++ unsigned int softirqs_raised; + #endif + #ifdef CONFIG_PREEMPT_RT_FULL + # if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -161,6 +161,12 @@ static void handle_softirq(unsigned int + rcu_bh_qs(cpu); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return local_softirq_pending(); ++} ++ + static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) + { + struct softirq_action *h = softirq_vec; +@@ -184,7 +190,19 @@ static void handle_pending_softirqs(u32 + local_irq_disable(); + } + +-#ifndef CONFIG_PREEMPT_RT_FULL ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ if (ksoftirqd_softirq_pending()) { ++ __do_softirq(); ++ rcu_note_context_switch(cpu); ++ local_irq_enable(); ++ cond_resched(); ++ return; ++ } ++ local_irq_enable(); ++} ++ + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -394,6 +412,32 @@ asmlinkage void do_softirq(void) + local_irq_restore(flags); + } + ++/* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++ ++ /* ++ * If we're in an interrupt or softirq, we're done ++ * (this also catches softirq-disabled code). We will ++ * actually run the softirq once we return from ++ * the irq or softirq. ++ * ++ * Otherwise we wake up ksoftirqd to make sure we ++ * schedule the softirq soon. ++ */ ++ if (!in_interrupt()) ++ wakeup_softirqd(); ++} ++ ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ trace_softirq_raise(nr); ++ or_softirq_pending(1UL << nr); ++} ++ + static inline void local_bh_disable_nort(void) { local_bh_disable(); } + static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } + static void ksoftirqd_set_sched_params(unsigned int cpu) { } +@@ -402,20 +446,78 @@ static void ksoftirqd_clr_sched_params(u + #else /* !PREEMPT_RT_FULL */ + + /* +- * On RT we serialize softirq execution with a cpu local lock ++ * On RT we serialize softirq execution with a cpu local lock per softirq + */ +-static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); ++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); ++ ++void __init softirq_early_init(void) ++{ ++ int i; + +-static void __do_softirq_common(int need_rcu_bh_qs); ++ for (i = 0; i < NR_SOFTIRQS; i++) ++ local_irq_lock_init(local_softirq_locks[i]); ++} + +-asmlinkage __do_softirq(void) ++static void lock_softirq(int which) + { +- __do_softirq_common(0); ++ __local_lock(&__get_cpu_var(local_softirq_locks[which])); + } + +-void __init softirq_early_init(void) ++static void unlock_softirq(int which) + { +- local_irq_lock_init(local_softirq_lock); ++ __local_unlock(&__get_cpu_var(local_softirq_locks[which])); ++} ++ ++static void do_single_softirq(int which, int need_rcu_bh_qs) ++{ ++ unsigned long old_flags = current->flags; ++ ++ current->flags &= ~PF_MEMALLOC; ++ vtime_account_irq_enter(current); ++ current->flags |= PF_IN_SOFTIRQ; ++ lockdep_softirq_enter(); ++ local_irq_enable(); ++ handle_softirq(which, smp_processor_id(), need_rcu_bh_qs); ++ local_irq_disable(); ++ lockdep_softirq_exit(); ++ current->flags &= ~PF_IN_SOFTIRQ; ++ vtime_account_irq_enter(current); ++ tsk_restore_flags(current, old_flags, PF_MEMALLOC); ++} ++ ++/* ++ * Called with interrupts disabled. Process softirqs which were raised ++ * in current context (or on behalf of ksoftirqd). ++ */ ++static void do_current_softirqs(int need_rcu_bh_qs) ++{ ++ while (current->softirqs_raised) { ++ int i = __ffs(current->softirqs_raised); ++ unsigned int pending, mask = (1U << i); ++ ++ current->softirqs_raised &= ~mask; ++ local_irq_enable(); ++ ++ /* ++ * If the lock is contended, we boost the owner to ++ * process the softirq or leave the critical section ++ * now. ++ */ ++ lock_softirq(i); ++ local_irq_disable(); ++ /* ++ * Check with the local_softirq_pending() bits, ++ * whether we need to process this still or if someone ++ * else took care of it. ++ */ ++ pending = local_softirq_pending(); ++ if (pending & mask) { ++ set_softirq_pending(pending & ~mask); ++ do_single_softirq(i, need_rcu_bh_qs); ++ } ++ unlock_softirq(i); ++ WARN_ON(current->softirq_nestcnt != 1); ++ } + } + + static void __local_bh_disable(void) +@@ -442,17 +544,11 @@ static void __local_bh_enable(void) + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + +- if ((current->softirq_nestcnt == 1) && +- local_softirq_pending() && +- local_trylock(local_softirq_lock)) { ++ local_irq_disable(); ++ if (current->softirq_nestcnt == 1 && current->softirqs_raised) ++ do_current_softirqs(1); ++ local_irq_enable(); + +- local_irq_disable(); +- if (local_softirq_pending()) +- __do_softirq(); +- local_irq_enable(); +- local_unlock(local_softirq_lock); +- WARN_ON(current->softirq_nestcnt != 1); +- } + current->softirq_nestcnt--; + migrate_enable(); + } +@@ -489,81 +585,83 @@ int in_serving_softirq(void) + } + EXPORT_SYMBOL(in_serving_softirq); + +-/* +- * Called with bh and local interrupts disabled. For full RT cpu must +- * be pinned. +- */ +-asmlinkage void __do_softirq(void) ++/* Called with preemption disabled */ ++static void run_ksoftirqd(unsigned int cpu) + { +- u32 pending = local_softirq_pending(); +- int cpu = smp_processor_id(); +- ++ local_irq_disable(); + current->softirq_nestcnt++; + +- /* Reset the pending bitmask before enabling irqs */ +- set_softirq_pending(0); +- +- current->flags |= PF_IN_SOFTIRQ; +- +- lockdep_softirq_enter(); +- +- handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); +- +- pending = local_softirq_pending(); +- if (pending) +- wakeup_softirqd(); +- +- lockdep_softirq_exit(); +- current->flags &= ~PF_IN_SOFTIRQ; +- ++ do_current_softirqs(1); + current->softirq_nestcnt--; ++ rcu_note_context_switch(cpu); ++ local_irq_enable(); ++} ++ ++/* ++ * Called from netif_rx_ni(). Preemption enabled, but migration ++ * disabled. So the cpu can't go away under us. ++ */ ++void thread_do_softirq(void) ++{ ++ if (!in_serving_softirq() && current->softirqs_raised) { ++ current->softirq_nestcnt++; ++ do_current_softirqs(0); ++ current->softirq_nestcnt--; ++ } + } + +-static int __thread_do_softirq(int cpu) ++void __raise_softirq_irqoff(unsigned int nr) + { ++ trace_softirq_raise(nr); ++ or_softirq_pending(1UL << nr); + /* +- * Prevent the current cpu from going offline. +- * pin_current_cpu() can reenable preemption and block on the +- * hotplug mutex. When it returns, the current cpu is +- * pinned. It might be the wrong one, but the offline check +- * below catches that. ++ * If we are not in a hard interrupt and inside a bh disabled ++ * region, we simply raise the flag on current. local_bh_enable() ++ * will make sure that the softirq is executed. Otherwise we ++ * delegate it to ksoftirqd. + */ +- pin_current_cpu(); ++ ++ if (!in_irq() && current->softirq_nestcnt) ++ current->softirqs_raised |= (1U << nr); ++ else if (__this_cpu_read(ksoftirqd)) ++ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); ++} ++ ++/* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++ + /* +- * If called from ksoftirqd (cpu >= 0) we need to check +- * whether we are on the wrong cpu due to cpu offlining. If +- * called via thread_do_softirq() no action required. ++ * If we're in an hard interrupt we let irq return code deal ++ * with the wakeup of ksoftirqd. + */ +- if (cpu >= 0 && cpu_is_offline(cpu)) { +- unpin_current_cpu(); +- return -1; +- } +- preempt_enable(); +- local_lock(local_softirq_lock); +- local_irq_disable(); ++ if (in_irq()) ++ return; + /* +- * We cannot switch stacks on RT as we want to be able to +- * schedule! ++ * If we are in thread context but outside of a bh disabled ++ * region, we need to wake ksoftirqd as well. ++ * ++ * CHECKME: Some of the places which do that could be wrapped ++ * into local_bh_disable/enable pairs. Though it's unclear ++ * whether this is worth the effort. To find those places just ++ * raise a WARN() if the condition is met. ++ * + */ +- if (local_softirq_pending()) +- __do_softirq_common(cpu >= 0); +- local_unlock(local_softirq_lock); +- unpin_current_cpu(); +- preempt_disable(); +- local_irq_enable(); +- return 0; ++ if (!current->softirq_nestcnt) ++ wakeup_softirqd(); + } + +-/* +- * Called from netif_rx_ni(). Preemption enabled. +- */ +-void thread_do_softirq(void) ++void do_raise_softirq_irqoff(unsigned int nr) + { +- if (!in_serving_softirq()) { +- preempt_disable(); +- __thread_do_softirq(-1); +- preempt_enable(); +- } ++ raise_softirq_irqoff(nr); ++} ++ ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return current->softirqs_raised; + } + + static inline void local_bh_disable_nort(void) { } +@@ -574,6 +672,10 @@ static inline void ksoftirqd_set_sched_p + struct sched_param param = { .sched_priority = 1 }; + + sched_setscheduler(current, SCHED_FIFO, ¶m); ++ /* Take over all pending softirqs when starting */ ++ local_irq_disable(); ++ current->softirqs_raised = local_softirq_pending(); ++ local_irq_enable(); + } + + static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) +@@ -625,8 +727,14 @@ static inline void invoke_softirq(void) + } else { + wakeup_softirqd(); + } +-#else +- wakeup_softirqd(); ++#else /* PREEMPT_RT_FULL */ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ if (__this_cpu_read(ksoftirqd) && ++ __this_cpu_read(ksoftirqd)->softirqs_raised) ++ wakeup_softirqd(); ++ local_irq_restore(flags); + #endif + } + +@@ -664,26 +772,6 @@ void irq_exit(void) + trace_hardirq_exit(); /* must be last! */ + } + +-/* +- * This function must run with irqs disabled! +- */ +-inline void raise_softirq_irqoff(unsigned int nr) +-{ +- __raise_softirq_irqoff(nr); +- +- /* +- * If we're in an interrupt or softirq, we're done +- * (this also catches softirq-disabled code). We will +- * actually run the softirq once we return from +- * the irq or softirq. +- * +- * Otherwise we wake up ksoftirqd to make sure we +- * schedule the softirq soon. +- */ +- if (!in_interrupt()) +- wakeup_softirqd(); +-} +- + void raise_softirq(unsigned int nr) + { + unsigned long flags; +@@ -693,12 +781,6 @@ void raise_softirq(unsigned int nr) + local_irq_restore(flags); + } + +-void __raise_softirq_irqoff(unsigned int nr) +-{ +- trace_softirq_raise(nr); +- or_softirq_pending(1UL << nr); +-} +- + void open_softirq(int nr, void (*action)(struct softirq_action *)) + { + softirq_vec[nr].action = action; +@@ -1004,24 +1086,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait); + + static int ksoftirqd_should_run(unsigned int cpu) + { +- return local_softirq_pending(); +-} +- +-static void run_ksoftirqd(unsigned int cpu) +-{ +- local_irq_disable(); +- if (local_softirq_pending()) { +- /* +- * We can safely run softirq on inline stack, as we are not deep +- * in the task stack here. +- */ +- __do_softirq(); +- rcu_note_context_switch(cpu); +- local_irq_enable(); +- cond_resched(); +- return; +- } +- local_irq_enable(); ++ return ksoftirqd_softirq_pending(); + } + + #ifdef CONFIG_HOTPLUG_CPU diff --git a/debian/patches/features/all/rt/softirq-split-out-code.patch b/debian/patches/features/all/rt/softirq-split-out-code.patch new file mode 100644 index 000000000..04dc514f7 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-split-out-code.patch @@ -0,0 +1,102 @@ +Subject: softirq-split-out-code.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 15:46:49 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/softirq.c | 67 ++++++++++++++++++++++++++++--------------------------- + 1 file changed, 35 insertions(+), 32 deletions(-) + +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -76,6 +76,40 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + ++static void handle_pending_softirqs(u32 pending, int cpu) ++{ ++ struct softirq_action *h = softirq_vec; ++ unsigned int prev_count = preempt_count(); ++ int softirq_bit; ++ ++ local_irq_enable(); ++ h = softirq_vec; ++ ++ while ((softirq_bit = ffs(pending))) { ++ unsigned int vec_nr; ++ ++ h += softirq_bit - 1; ++ vec_nr = h - softirq_vec; ++ ++ kstat_incr_softirqs_this_cpu(vec_nr); ++ ++ trace_softirq_entry(vec_nr); ++ h->action(h); ++ trace_softirq_exit(vec_nr); ++ if (unlikely(prev_count != preempt_count())) { ++ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", ++ vec_nr, softirq_to_name[vec_nr], h->action, ++ prev_count, preempt_count()); ++ preempt_count_set(prev_count); ++ } ++ rcu_bh_qs(cpu); ++ h++; ++ pending >>= softirq_bit; ++ } ++ ++ local_irq_disable(); ++} ++ + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -227,10 +261,8 @@ asmlinkage void __do_softirq(void) + unsigned long end = jiffies + MAX_SOFTIRQ_TIME; + unsigned long old_flags = current->flags; + int max_restart = MAX_SOFTIRQ_RESTART; +- struct softirq_action *h; + bool in_hardirq; + __u32 pending; +- int softirq_bit; + int cpu; + + /* +@@ -251,36 +283,7 @@ asmlinkage void __do_softirq(void) + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- local_irq_enable(); +- +- h = softirq_vec; +- +- while ((softirq_bit = ffs(pending))) { +- unsigned int vec_nr; +- int prev_count; +- +- h += softirq_bit - 1; +- +- vec_nr = h - softirq_vec; +- prev_count = preempt_count(); +- +- kstat_incr_softirqs_this_cpu(vec_nr); +- +- trace_softirq_entry(vec_nr); +- h->action(h); +- trace_softirq_exit(vec_nr); +- if (unlikely(prev_count != preempt_count())) { +- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", +- vec_nr, softirq_to_name[vec_nr], h->action, +- prev_count, preempt_count()); +- preempt_count_set(prev_count); +- } +- rcu_bh_qs(cpu); +- h++; +- pending >>= softirq_bit; +- } +- +- local_irq_disable(); ++ handle_pending_softirqs(pending, cpu); + + pending = local_softirq_pending(); + if (pending) { diff --git a/debian/patches/features/all/rt/softirq-thread-do-softirq.patch b/debian/patches/features/all/rt/softirq-thread-do-softirq.patch new file mode 100644 index 000000000..4ef2bfabd --- /dev/null +++ b/debian/patches/features/all/rt/softirq-thread-do-softirq.patch @@ -0,0 +1,33 @@ +Subject: softirq-thread-do-softirq.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 15:44:15 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/interrupt.h | 2 +- + net/core/dev.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -378,7 +378,7 @@ struct softirq_action + + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +- ++static inline void thread_do_softirq(void) { do_softirq(); } + #ifdef __ARCH_HAS_DO_SOFTIRQ + void do_softirq_own_stack(void); + #else +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3301,7 +3301,7 @@ int netif_rx_ni(struct sk_buff *skb) + preempt_disable(); + err = netif_rx_internal(skb); + if (local_softirq_pending()) +- do_softirq(); ++ thread_do_softirq(); + preempt_enable(); + + return err; diff --git a/debian/patches/features/all/rt/sparc-provide-EARLY_PRINTK-for-SPARC.patch b/debian/patches/features/all/rt/sparc-provide-EARLY_PRINTK-for-SPARC.patch new file mode 100644 index 000000000..b027b0f6c --- /dev/null +++ b/debian/patches/features/all/rt/sparc-provide-EARLY_PRINTK-for-SPARC.patch @@ -0,0 +1,36 @@ +From 11fab744e7819cb63788ceb271074524c214a2c4 Mon Sep 17 00:00:00 2001 +From: Kirill Tkhai +Date: Fri, 30 Aug 2013 21:16:08 +0400 +Subject: [PATCH] sparc: provide EARLY_PRINTK for SPARC +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +sparc does not have CONFIG_EARLY_PRINTK option. + +So early-printk-consolidate.patch breaks compilation: + +arch/sparc/built-in.o: In function `setup_arch': +(.init.text+0x15e4): undefined reference to `early_console' +arch/sparc/built-in.o: In function `setup_arch': +(.init.text+0x15ec): undefined reference to `early_console' + +The below addition fixes that. + +Signed-off-by: Kirill Tkhai +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/sparc/Kconfig | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/arch/sparc/Kconfig ++++ b/arch/sparc/Kconfig +@@ -525,6 +525,10 @@ menu "Executable file formats" + + source "fs/Kconfig.binfmt" + ++config EARLY_PRINTK ++ bool ++ default y ++ + config COMPAT + bool + depends on SPARC64 diff --git a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch new file mode 100644 index 000000000..51889bbc2 --- /dev/null +++ b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch @@ -0,0 +1,205 @@ +Subject: spinlock-types-separate-raw.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 19:34:01 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rwlock_types.h | 4 + + include/linux/spinlock_types.h | 74 ------------------------------------ + include/linux/spinlock_types_nort.h | 33 ++++++++++++++++ + include/linux/spinlock_types_raw.h | 56 +++++++++++++++++++++++++++ + 4 files changed, 95 insertions(+), 72 deletions(-) + +--- a/include/linux/rwlock_types.h ++++ b/include/linux/rwlock_types.h +@@ -1,6 +1,10 @@ + #ifndef __LINUX_RWLOCK_TYPES_H + #define __LINUX_RWLOCK_TYPES_H + ++#if !defined(__LINUX_SPINLOCK_TYPES_H) ++# error "Do not include directly, include spinlock_types.h" ++#endif ++ + /* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers +--- a/include/linux/spinlock_types.h ++++ b/include/linux/spinlock_types.h +@@ -9,79 +9,9 @@ + * Released under the General Public License (GPL). + */ + +-#if defined(CONFIG_SMP) +-# include +-#else +-# include +-#endif ++#include + +-#include +- +-typedef struct raw_spinlock { +- arch_spinlock_t raw_lock; +-#ifdef CONFIG_GENERIC_LOCKBREAK +- unsigned int break_lock; +-#endif +-#ifdef CONFIG_DEBUG_SPINLOCK +- unsigned int magic, owner_cpu; +- void *owner; +-#endif +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +- struct lockdep_map dep_map; +-#endif +-} raw_spinlock_t; +- +-#define SPINLOCK_MAGIC 0xdead4ead +- +-#define SPINLOCK_OWNER_INIT ((void *)-1L) +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +-#else +-# define SPIN_DEP_MAP_INIT(lockname) +-#endif +- +-#ifdef CONFIG_DEBUG_SPINLOCK +-# define SPIN_DEBUG_INIT(lockname) \ +- .magic = SPINLOCK_MAGIC, \ +- .owner_cpu = -1, \ +- .owner = SPINLOCK_OWNER_INIT, +-#else +-# define SPIN_DEBUG_INIT(lockname) +-#endif +- +-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +- { \ +- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +- SPIN_DEBUG_INIT(lockname) \ +- SPIN_DEP_MAP_INIT(lockname) } +- +-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ +- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +- +-typedef struct spinlock { +- union { +- struct raw_spinlock rlock; +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) +- struct { +- u8 __padding[LOCK_PADSIZE]; +- struct lockdep_map dep_map; +- }; +-#endif +- }; +-} spinlock_t; +- +-#define __SPIN_LOCK_INITIALIZER(lockname) \ +- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } +- +-#define __SPIN_LOCK_UNLOCKED(lockname) \ +- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++#include + + #include + +--- /dev/null ++++ b/include/linux/spinlock_types_nort.h +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H ++#define __LINUX_SPINLOCK_TYPES_NORT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * The non RT version maps spinlocks to raw_spinlocks ++ */ ++typedef struct spinlock { ++ union { ++ struct raw_spinlock rlock; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) ++ struct { ++ u8 __padding[LOCK_PADSIZE]; ++ struct lockdep_map dep_map; ++ }; ++#endif ++ }; ++} spinlock_t; ++ ++#define __SPIN_LOCK_INITIALIZER(lockname) \ ++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } ++ ++#define __SPIN_LOCK_UNLOCKED(lockname) \ ++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++ ++#endif +--- /dev/null ++++ b/include/linux/spinlock_types_raw.h +@@ -0,0 +1,56 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H ++#define __LINUX_SPINLOCK_TYPES_RAW_H ++ ++#if defined(CONFIG_SMP) ++# include ++#else ++# include ++#endif ++ ++#include ++ ++typedef struct raw_spinlock { ++ arch_spinlock_t raw_lock; ++#ifdef CONFIG_GENERIC_LOCKBREAK ++ unsigned int break_lock; ++#endif ++#ifdef CONFIG_DEBUG_SPINLOCK ++ unsigned int magic, owner_cpu; ++ void *owner; ++#endif ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} raw_spinlock_t; ++ ++#define SPINLOCK_MAGIC 0xdead4ead ++ ++#define SPINLOCK_OWNER_INIT ((void *)-1L) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define SPIN_DEP_MAP_INIT(lockname) ++#endif ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define SPIN_DEBUG_INIT(lockname) \ ++ .magic = SPINLOCK_MAGIC, \ ++ .owner_cpu = -1, \ ++ .owner = SPINLOCK_OWNER_INIT, ++#else ++# define SPIN_DEBUG_INIT(lockname) ++#endif ++ ++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ ++ { \ ++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ ++ SPIN_DEBUG_INIT(lockname) \ ++ SPIN_DEP_MAP_INIT(lockname) } ++ ++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ ++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) ++ ++#endif diff --git a/debian/patches/features/all/rt/stomp-machine-raw-lock.patch b/debian/patches/features/all/rt/stomp-machine-raw-lock.patch new file mode 100644 index 000000000..529cfea9e --- /dev/null +++ b/debian/patches/features/all/rt/stomp-machine-raw-lock.patch @@ -0,0 +1,195 @@ +Subject: stomp-machine-raw-lock.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 11:01:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/stop_machine.c | 64 ++++++++++++++++++++++++++++++++++---------------- + 1 file changed, 44 insertions(+), 20 deletions(-) + +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c +@@ -30,12 +30,12 @@ struct cpu_stop_done { + atomic_t nr_todo; /* nr left to execute */ + bool executed; /* actually executed? */ + int ret; /* collected return value */ +- struct completion completion; /* fired if nr_todo reaches 0 */ ++ struct task_struct *waiter; /* woken when nr_todo reaches 0 */ + }; + + /* the actual stopper, one per every possible cpu, enabled on online cpus */ + struct cpu_stopper { +- spinlock_t lock; ++ raw_spinlock_t lock; + bool enabled; /* is this stopper enabled? */ + struct list_head works; /* list of pending works */ + }; +@@ -56,7 +56,7 @@ static void cpu_stop_init_done(struct cp + { + memset(done, 0, sizeof(*done)); + atomic_set(&done->nr_todo, nr_todo); +- init_completion(&done->completion); ++ done->waiter = current; + } + + /* signal completion unless @done is NULL */ +@@ -65,8 +65,10 @@ static void cpu_stop_signal_done(struct + if (done) { + if (executed) + done->executed = true; +- if (atomic_dec_and_test(&done->nr_todo)) +- complete(&done->completion); ++ if (atomic_dec_and_test(&done->nr_todo)) { ++ wake_up_process(done->waiter); ++ done->waiter = NULL; ++ } + } + } + +@@ -78,7 +80,7 @@ static void cpu_stop_queue_work(unsigned + + unsigned long flags; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + + if (stopper->enabled) { + list_add_tail(&work->list, &stopper->works); +@@ -86,7 +88,23 @@ static void cpu_stop_queue_work(unsigned + } else + cpu_stop_signal_done(work->done, false); + +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); ++} ++ ++static void wait_for_stop_done(struct cpu_stop_done *done) ++{ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (atomic_read(&done->nr_todo)) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ /* ++ * We need to wait until cpu_stop_signal_done() has cleared ++ * done->waiter. ++ */ ++ while (done->waiter) ++ cpu_relax(); ++ set_current_state(TASK_RUNNING); + } + + /** +@@ -120,7 +138,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s + + cpu_stop_init_done(&done, 1); + cpu_stop_queue_work(cpu, &work); +- wait_for_completion(&done.completion); ++ wait_for_stop_done(&done); + return done.executed ? done.ret : -ENOENT; + } + +@@ -297,7 +315,7 @@ int stop_two_cpus(unsigned int cpu1, uns + lg_local_unlock(&stop_cpus_lock); + preempt_enable(); + +- wait_for_completion(&done.completion); ++ wait_for_stop_done(&done); + + return done.executed ? done.ret : -ENOENT; + } +@@ -359,7 +377,7 @@ static int __stop_cpus(const struct cpum + + cpu_stop_init_done(&done, cpumask_weight(cpumask)); + queue_stop_cpus_work(cpumask, fn, arg, &done); +- wait_for_completion(&done.completion); ++ wait_for_stop_done(&done); + return done.executed ? done.ret : -ENOENT; + } + +@@ -438,9 +456,9 @@ static int cpu_stop_should_run(unsigned + unsigned long flags; + int run; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + run = !list_empty(&stopper->works); +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + return run; + } + +@@ -452,13 +470,13 @@ static void cpu_stopper_thread(unsigned + + repeat: + work = NULL; +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + if (!list_empty(&stopper->works)) { + work = list_first_entry(&stopper->works, + struct cpu_stop_work, list); + list_del_init(&work->list); + } +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + + if (work) { + cpu_stop_fn_t fn = work->fn; +@@ -490,7 +508,13 @@ static void cpu_stopper_thread(unsigned + kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, + ksym_buf), arg); + ++ /* ++ * Make sure that the wakeup and setting done->waiter ++ * to NULL is atomic. ++ */ ++ local_irq_disable(); + cpu_stop_signal_done(done, true); ++ local_irq_enable(); + goto repeat; + } + } +@@ -509,20 +533,20 @@ static void cpu_stop_park(unsigned int c + unsigned long flags; + + /* drain remaining works */ +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + list_for_each_entry(work, &stopper->works, list) + cpu_stop_signal_done(work->done, false); + stopper->enabled = false; +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + } + + static void cpu_stop_unpark(unsigned int cpu) + { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + stopper->enabled = true; +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + } + + static struct smp_hotplug_thread cpu_stop_threads = { +@@ -544,7 +568,7 @@ static int __init cpu_stop_init(void) + for_each_possible_cpu(cpu) { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +- spin_lock_init(&stopper->lock); ++ raw_spin_lock_init(&stopper->lock); + INIT_LIST_HEAD(&stopper->works); + } + +@@ -647,7 +671,7 @@ int stop_machine_from_inactive_cpu(int ( + ret = multi_cpu_stop(&msdata); + + /* Busy wait for completion. */ +- while (!completion_done(&done.completion)) ++ while (!atomic_read(&done.nr_todo)) + cpu_relax(); + + mutex_unlock(&stop_cpus_mutex); diff --git a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch new file mode 100644 index 000000000..34d2e7775 --- /dev/null +++ b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch @@ -0,0 +1,35 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:27 -0500 +Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Instead of playing with non-preemption, introduce explicit +startup serialization. This is more robust and cleaner as +well. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +[bigeasy: XXX: stopper_lock -> stop_cpus_lock] +--- + kernel/stop_machine.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c +@@ -466,6 +466,16 @@ static void cpu_stopper_thread(unsigned + struct cpu_stop_done *done = work->done; + char ksym_buf[KSYM_NAME_LEN] __maybe_unused; + ++ /* ++ * Wait until the stopper finished scheduling on all ++ * cpus ++ */ ++ lg_global_lock(&stop_cpus_lock); ++ /* ++ * Let other cpu threads continue as well ++ */ ++ lg_global_unlock(&stop_cpus_lock); ++ + /* cpu stop callbacks are not allowed to sleep */ + preempt_disable(); + diff --git a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch new file mode 100644 index 000000000..9eff7bec2 --- /dev/null +++ b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch @@ -0,0 +1,107 @@ +From: Thomas Gleixner +Date: Thu, 15 Jul 2010 10:29:00 +0200 +Subject: suspend: Prevent might sleep splats +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +timekeeping suspend/resume calls read_persistant_clock() which takes +rtc_lock. That results in might sleep warnings because at that point +we run with interrupts disabled. + +We cannot convert rtc_lock to a raw spinlock as that would trigger +other might sleep warnings. + +As a temporary workaround we disable the might sleep warnings by +setting system_state to SYSTEM_SUSPEND before calling sysdev_suspend() +and restoring it to SYSTEM_RUNNING afer sysdev_resume(). + +Needs to be revisited. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/kernel.h | 1 + + kernel/power/hibernate.c | 7 +++++++ + kernel/power/suspend.c | 4 ++++ + 3 files changed, 12 insertions(+) + +--- a/include/linux/kernel.h ++++ b/include/linux/kernel.h +@@ -454,6 +454,7 @@ extern enum system_states { + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, ++ SYSTEM_SUSPEND, + } system_state; + + #define TAINT_PROPRIETARY_MODULE 0 +--- a/kernel/power/hibernate.c ++++ b/kernel/power/hibernate.c +@@ -276,6 +276,8 @@ static int create_image(int platform_mod + + local_irq_disable(); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (error) { + printk(KERN_ERR "PM: Some system devices failed to power down, " +@@ -303,6 +305,7 @@ static int create_image(int platform_mod + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -428,6 +431,7 @@ static int resume_target_kernel(bool pla + goto Enable_cpus; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + + error = syscore_suspend(); + if (error) +@@ -461,6 +465,7 @@ static int resume_target_kernel(bool pla + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -543,6 +548,7 @@ int hibernation_platform_enter(void) + goto Platform_finish; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + syscore_suspend(); + if (pm_wakeup_pending()) { + error = -EAGAIN; +@@ -555,6 +561,7 @@ int hibernation_platform_enter(void) + + Power_up: + syscore_resume(); ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + enable_nonboot_cpus(); + +--- a/kernel/power/suspend.c ++++ b/kernel/power/suspend.c +@@ -218,6 +218,8 @@ static int suspend_enter(suspend_state_t + arch_suspend_disable_irqs(); + BUG_ON(!irqs_disabled()); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (!error) { + *wakeup = pm_wakeup_pending(); +@@ -228,6 +230,8 @@ static int suspend_enter(suspend_state_t + syscore_resume(); + } + ++ system_state = SYSTEM_RUNNING; ++ + arch_suspend_enable_irqs(); + BUG_ON(irqs_disabled()); + diff --git a/debian/patches/features/all/rt/sysctl-include-atomic-h.patch b/debian/patches/features/all/rt/sysctl-include-atomic-h.patch new file mode 100644 index 000000000..a484363ed --- /dev/null +++ b/debian/patches/features/all/rt/sysctl-include-atomic-h.patch @@ -0,0 +1,20 @@ +Subject: sysctl-include-fix.patch +From: Thomas Gleixner +Date: Mon, 14 Nov 2011 10:52:34 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sysctl.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/linux/sysctl.h ++++ b/include/linux/sysctl.h +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + + /* For the /proc/sys support */ diff --git a/debian/patches/features/all/rt/sysfs-realtime-entry.patch b/debian/patches/features/all/rt/sysfs-realtime-entry.patch new file mode 100644 index 000000000..6e6220726 --- /dev/null +++ b/debian/patches/features/all/rt/sysfs-realtime-entry.patch @@ -0,0 +1,48 @@ +Subject: add /sys/kernel/realtime entry +From: Clark Williams +Date: Sat Jul 30 21:55:53 2011 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Add a /sys/kernel entry to indicate that the kernel is a +realtime kernel. + +Clark says that he needs this for udev rules, udev needs to evaluate +if its a PREEMPT_RT kernel a few thousand times and parsing uname +output is too slow or so. + +Are there better solutions? Should it exist and return 0 on !-rt? + +Signed-off-by: Clark Williams +Signed-off-by: Peter Zijlstra +--- + kernel/ksysfs.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +--- a/kernel/ksysfs.c ++++ b/kernel/ksysfs.c +@@ -132,6 +132,15 @@ KERNEL_ATTR_RO(vmcoreinfo); + + #endif /* CONFIG_KEXEC */ + ++#if defined(CONFIG_PREEMPT_RT_FULL) ++static ssize_t realtime_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", 1); ++} ++KERNEL_ATTR_RO(realtime); ++#endif ++ + /* whether file capabilities are enabled */ + static ssize_t fscaps_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +@@ -197,6 +206,9 @@ static struct attribute * kernel_attrs[] + &vmcoreinfo_attr.attr, + #endif + &rcu_expedited_attr.attr, ++#ifdef CONFIG_PREEMPT_RT_FULL ++ &realtime_attr.attr, ++#endif + NULL + }; + diff --git a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch new file mode 100644 index 000000000..599918707 --- /dev/null +++ b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch @@ -0,0 +1,401 @@ +Subject: tasklet: Prevent tasklets from going into infinite spin in RT +From: Ingo Molnar +Date: Tue Nov 29 20:18:22 2011 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, +and spinlocks turn are mutexes. But this can cause issues with +tasks disabling tasklets. A tasklet runs under ksoftirqd, and +if a tasklets are disabled with tasklet_disable(), the tasklet +count is increased. When a tasklet runs, it checks this counter +and if it is set, it adds itself back on the softirq queue and +returns. + +The problem arises in RT because ksoftirq will see that a softirq +is ready to run (the tasklet softirq just re-armed itself), and will +not sleep, but instead run the softirqs again. The tasklet softirq +will still see that the count is non-zero and will not execute +the tasklet and requeue itself on the softirq again, which will +cause ksoftirqd to run it again and again and again. + +It gets worse because ksoftirqd runs as a real-time thread. +If it preempted the task that disabled tasklets, and that task +has migration disabled, or can't run for other reasons, the tasklet +softirq will never run because the count will never be zero, and +ksoftirqd will go into an infinite loop. As an RT task, it this +becomes a big problem. + +This is a hack solution to have tasklet_disable stop tasklets, and +when a tasklet runs, instead of requeueing the tasklet softirqd +it delays it. When tasklet_enable() is called, and tasklets are +waiting, then the tasklet_enable() will kick the tasklets to continue. +This prevents the lock up from ksoftirq going into an infinite loop. + +[ rostedt@goodmis.org: ported to 3.0-rt ] + +Signed-off-by: Ingo Molnar +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + +--- + include/linux/interrupt.h | 39 ++++---- + kernel/softirq.c | 208 ++++++++++++++++++++++++++++++++-------------- + 2 files changed, 168 insertions(+), 79 deletions(-) + +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -425,8 +425,9 @@ static inline struct task_struct *this_c + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its execution is still not + started, it will be executed only once. +- * If this tasklet is already running on another CPU (or schedule is called +- from tasklet itself), it is rescheduled for later. ++ * If this tasklet is already running on another CPU, it is rescheduled ++ for later. ++ * Schedule must not be called from the tasklet itself (a lockup occurs) + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. +@@ -451,27 +452,36 @@ struct tasklet_struct name = { NULL, 0, + enum + { + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ +- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_PENDING /* Tasklet is pending */ + }; + +-#ifdef CONFIG_SMP ++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) ++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) ++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + static inline int tasklet_trylock(struct tasklet_struct *t) + { + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); + } + ++static inline int tasklet_tryunlock(struct tasklet_struct *t) ++{ ++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; ++} ++ + static inline void tasklet_unlock(struct tasklet_struct *t) + { + smp_mb__before_clear_bit(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); + } + +-static inline void tasklet_unlock_wait(struct tasklet_struct *t) +-{ +- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +-} ++extern void tasklet_unlock_wait(struct tasklet_struct *t); ++ + #else + #define tasklet_trylock(t) 1 ++#define tasklet_tryunlock(t) 1 + #define tasklet_unlock_wait(t) do { } while (0) + #define tasklet_unlock(t) do { } while (0) + #endif +@@ -520,17 +530,8 @@ static inline void tasklet_disable(struc + smp_mb(); + } + +-static inline void tasklet_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic_dec(); +- atomic_dec(&t->count); +-} +- +-static inline void tasklet_hi_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic_dec(); +- atomic_dec(&t->count); +-} ++extern void tasklet_enable(struct tasklet_struct *t); ++extern void tasklet_hi_enable(struct tasklet_struct *t); + + extern void tasklet_kill(struct tasklet_struct *t); + extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -707,15 +708,45 @@ struct tasklet_head { + static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); + static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); + ++static void inline ++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) ++{ ++ if (tasklet_trylock(t)) { ++again: ++ /* We may have been preempted before tasklet_trylock ++ * and __tasklet_action may have already run. ++ * So double check the sched bit while the takslet ++ * is locked before adding it to the list. ++ */ ++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { ++ t->next = NULL; ++ *head->tail = t; ++ head->tail = &(t->next); ++ raise_softirq_irqoff(nr); ++ tasklet_unlock(t); ++ } else { ++ /* This is subtle. If we hit the corner case above ++ * It is possible that we get preempted right here, ++ * and another task has successfully called ++ * tasklet_schedule(), then this function, and ++ * failed on the trylock. Thus we must be sure ++ * before releasing the tasklet lock, that the ++ * SCHED_BIT is clear. Otherwise the tasklet ++ * may get its SCHED_BIT set, but not added to the ++ * list ++ */ ++ if (!tasklet_tryunlock(t)) ++ goto again; ++ } ++ } ++} ++ + void __tasklet_schedule(struct tasklet_struct *t) + { + unsigned long flags; + + local_irq_save(flags); +- t->next = NULL; +- *__this_cpu_read(tasklet_vec.tail) = t; +- __this_cpu_write(tasklet_vec.tail, &(t->next)); +- raise_softirq_irqoff(TASKLET_SOFTIRQ); ++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ); + local_irq_restore(flags); + } + EXPORT_SYMBOL(__tasklet_schedule); +@@ -725,60 +756,123 @@ void __tasklet_hi_schedule(struct taskle + unsigned long flags; + + local_irq_save(flags); +- t->next = NULL; +- *__this_cpu_read(tasklet_hi_vec.tail) = t; +- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); +- raise_softirq_irqoff(HI_SOFTIRQ); ++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ); + local_irq_restore(flags); + } + EXPORT_SYMBOL(__tasklet_hi_schedule); + + void __tasklet_hi_schedule_first(struct tasklet_struct *t) + { +- BUG_ON(!irqs_disabled()); +- +- t->next = __this_cpu_read(tasklet_hi_vec.head); +- __this_cpu_write(tasklet_hi_vec.head, t); +- __raise_softirq_irqoff(HI_SOFTIRQ); ++ __tasklet_hi_schedule(t); + } + EXPORT_SYMBOL(__tasklet_hi_schedule_first); + +-static void tasklet_action(struct softirq_action *a) ++void tasklet_enable(struct tasklet_struct *t) + { +- struct tasklet_struct *list; ++ if (!atomic_dec_and_test(&t->count)) ++ return; ++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) ++ tasklet_schedule(t); ++} ++EXPORT_SYMBOL(tasklet_enable); + +- local_irq_disable(); +- list = __this_cpu_read(tasklet_vec.head); +- __this_cpu_write(tasklet_vec.head, NULL); +- __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); +- local_irq_enable(); ++void tasklet_hi_enable(struct tasklet_struct *t) ++{ ++ if (!atomic_dec_and_test(&t->count)) ++ return; ++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) ++ tasklet_hi_schedule(t); ++} ++EXPORT_SYMBOL(tasklet_hi_enable); ++ ++static void ++__tasklet_action(struct softirq_action *a, struct tasklet_struct *list) ++{ ++ int loops = 1000000; + + while (list) { + struct tasklet_struct *t = list; + + list = list->next; + +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, +- &t->state)) +- BUG(); +- t->func(t->data); +- tasklet_unlock(t); +- continue; +- } +- tasklet_unlock(t); ++ /* ++ * Should always succeed - after a tasklist got on the ++ * list (after getting the SCHED bit set from 0 to 1), ++ * nothing but the tasklet softirq it got queued to can ++ * lock it: ++ */ ++ if (!tasklet_trylock(t)) { ++ WARN_ON(1); ++ continue; + } + +- local_irq_disable(); + t->next = NULL; +- *__this_cpu_read(tasklet_vec.tail) = t; +- __this_cpu_write(tasklet_vec.tail, &(t->next)); +- __raise_softirq_irqoff(TASKLET_SOFTIRQ); +- local_irq_enable(); ++ ++ /* ++ * If we cannot handle the tasklet because it's disabled, ++ * mark it as pending. tasklet_enable() will later ++ * re-schedule the tasklet. ++ */ ++ if (unlikely(atomic_read(&t->count))) { ++out_disabled: ++ /* implicit unlock: */ ++ wmb(); ++ t->state = TASKLET_STATEF_PENDING; ++ continue; ++ } ++ ++ /* ++ * After this point on the tasklet might be rescheduled ++ * on another CPU, but it can only be added to another ++ * CPU's tasklet list if we unlock the tasklet (which we ++ * dont do yet). ++ */ ++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ WARN_ON(1); ++ ++again: ++ t->func(t->data); ++ ++ /* ++ * Try to unlock the tasklet. We must use cmpxchg, because ++ * another CPU might have scheduled or disabled the tasklet. ++ * We only allow the STATE_RUN -> 0 transition here. ++ */ ++ while (!tasklet_tryunlock(t)) { ++ /* ++ * If it got disabled meanwhile, bail out: ++ */ ++ if (atomic_read(&t->count)) ++ goto out_disabled; ++ /* ++ * If it got scheduled meanwhile, re-execute ++ * the tasklet function: ++ */ ++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ goto again; ++ if (!--loops) { ++ printk("hm, tasklet state: %08lx\n", t->state); ++ WARN_ON(1); ++ tasklet_unlock(t); ++ break; ++ } ++ } + } + } + ++static void tasklet_action(struct softirq_action *a) ++{ ++ struct tasklet_struct *list; ++ ++ local_irq_disable(); ++ list = __get_cpu_var(tasklet_vec).head; ++ __get_cpu_var(tasklet_vec).head = NULL; ++ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; ++ local_irq_enable(); ++ ++ __tasklet_action(a, list); ++} ++ + static void tasklet_hi_action(struct softirq_action *a) + { + struct tasklet_struct *list; +@@ -789,30 +883,7 @@ static void tasklet_hi_action(struct sof + __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); + local_irq_enable(); + +- while (list) { +- struct tasklet_struct *t = list; +- +- list = list->next; +- +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, +- &t->state)) +- BUG(); +- t->func(t->data); +- tasklet_unlock(t); +- continue; +- } +- tasklet_unlock(t); +- } +- +- local_irq_disable(); +- t->next = NULL; +- *__this_cpu_read(tasklet_hi_vec.tail) = t; +- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); +- __raise_softirq_irqoff(HI_SOFTIRQ); +- local_irq_enable(); +- } ++ __tasklet_action(a, list); + } + + void tasklet_init(struct tasklet_struct *t, +@@ -833,7 +904,7 @@ void tasklet_kill(struct tasklet_struct + + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + do { +- yield(); ++ msleep(1); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } + tasklet_unlock_wait(t); +@@ -907,6 +978,23 @@ void __init softirq_init(void) + open_softirq(HI_SOFTIRQ, tasklet_hi_action); + } + ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) ++void tasklet_unlock_wait(struct tasklet_struct *t) ++{ ++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { ++ /* ++ * Hack for now to avoid this busy-loop: ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ msleep(1); ++#else ++ barrier(); ++#endif ++ } ++} ++EXPORT_SYMBOL(tasklet_unlock_wait); ++#endif ++ + static int ksoftirqd_should_run(unsigned int cpu) + { + return local_softirq_pending(); diff --git a/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch b/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch new file mode 100644 index 000000000..6b66f88bc --- /dev/null +++ b/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch @@ -0,0 +1,58 @@ +Subject: rwlocks: Fix section mismatch +From: John Kacur +Date: Mon, 19 Sep 2011 11:09:27 +0200 (CEST) +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +This fixes the following build error for the preempt-rt kernel. + +make kernel/fork.o + CC kernel/fork.o +kernel/fork.c:90: error: section of tasklist_lock conflicts with previous declaration +make[2]: *** [kernel/fork.o] Error 1 +make[1]: *** [kernel/fork.o] Error 2 + +The rt kernel cache aligns the RWLOCK in DEFINE_RWLOCK by default. +The non-rt kernels explicitly cache align only the tasklist_lock in +kernel/fork.c +That can create a build conflict. This fixes the build problem by making the +non-rt kernels cache align RWLOCKs by default. The side effect is that +the other RWLOCKs are also cache aligned for non-rt. + +This is a short term solution for rt only. +The longer term solution would be to push the cache aligned DEFINE_RWLOCK +to mainline. If there are objections, then we could create a +DEFINE_RWLOCK_CACHE_ALIGNED or something of that nature. + +Comments? Objections? + +Signed-off-by: John Kacur +Cc: Peter Zijlstra +Link: http://lkml.kernel.org/r/alpine.LFD.2.00.1109191104010.23118@localhost6.localdomain6 +Signed-off-by: Thomas Gleixner +--- + include/linux/rwlock_types.h | 3 ++- + kernel/fork.c | 2 +- + 2 files changed, 3 insertions(+), 2 deletions(-) + +--- a/include/linux/rwlock_types.h ++++ b/include/linux/rwlock_types.h +@@ -47,6 +47,7 @@ typedef struct { + RW_DEP_MAP_INIT(lockname) } + #endif + +-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + + #endif /* __LINUX_RWLOCK_TYPES_H */ +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -94,7 +94,7 @@ int max_threads; /* tunable limit on nr + + DEFINE_PER_CPU(unsigned long, process_counts) = 0; + +-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ ++DEFINE_RWLOCK(tasklist_lock); /* outer */ + + #ifdef CONFIG_PROVE_RCU + int lockdep_tasklist_lock_is_held(void) diff --git a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch new file mode 100644 index 000000000..06d86de5b --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch @@ -0,0 +1,149 @@ +Subject: timekeeping-split-jiffies-lock.patch +From: Thomas Gleixner +Date: Thu, 14 Feb 2013 22:36:59 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/time/jiffies.c | 7 ++++--- + kernel/time/tick-common.c | 10 ++++++---- + kernel/time/tick-internal.h | 3 ++- + kernel/time/tick-sched.c | 16 ++++++++++------ + kernel/time/timekeeping.c | 6 ++++-- + 5 files changed, 26 insertions(+), 16 deletions(-) + +--- a/kernel/time/jiffies.c ++++ b/kernel/time/jiffies.c +@@ -73,7 +73,8 @@ static struct clocksource clocksource_ji + .shift = JIFFIES_SHIFT, + }; + +-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); ++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); ++__cacheline_aligned_in_smp seqcount_t jiffies_seq; + + #if (BITS_PER_LONG < 64) + u64 get_jiffies_64(void) +@@ -82,9 +83,9 @@ u64 get_jiffies_64(void) + u64 ret; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + ret = jiffies_64; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + return ret; + } + EXPORT_SYMBOL(get_jiffies_64); +--- a/kernel/time/tick-common.c ++++ b/kernel/time/tick-common.c +@@ -78,13 +78,15 @@ int tick_is_oneshot_available(void) + static void tick_periodic(int cpu) + { + if (tick_do_timer_cpu == cpu) { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + /* Keep track of the next tick event */ + tick_next_period = ktime_add(tick_next_period, tick_period); + + do_timer(1); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } + +@@ -146,9 +148,9 @@ void tick_setup_periodic(struct clock_ev + ktime_t next; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + next = tick_next_period; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + +--- a/kernel/time/tick-internal.h ++++ b/kernel/time/tick-internal.h +@@ -4,7 +4,8 @@ + #include + #include + +-extern seqlock_t jiffies_lock; ++extern raw_spinlock_t jiffies_lock; ++extern seqcount_t jiffies_seq; + + #define CS_NAME_LEN 32 + +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(kti + return; + + /* Reevalute with jiffies_lock held */ +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + delta = ktime_sub(now, last_jiffies_update); + if (delta.tv64 >= tick_period.tv64) { +@@ -85,7 +86,8 @@ static void tick_do_update_jiffies64(kti + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); + } +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } + +@@ -96,12 +98,14 @@ static ktime_t tick_init_jiffy_update(vo + { + ktime_t period; + +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + /* Did we start the jiffies update yet ? */ + if (last_jiffies_update.tv64 == 0) + last_jiffies_update = tick_next_period; + period = last_jiffies_update; +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return period; + } + +@@ -537,10 +541,10 @@ static ktime_t tick_nohz_stop_sched_tick + + /* Read jiffies and the time when jiffies were updated last */ + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + last_update = last_jiffies_update; + last_jiffies = jiffies; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || + arch_needs_cpu(cpu) || irq_work_needs_cpu()) { +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -1735,8 +1735,10 @@ EXPORT_SYMBOL(hardpps); + */ + void xtime_update(unsigned long ticks) + { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + do_timer(ticks); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } diff --git a/debian/patches/features/all/rt/timer-Raise-softirq-if-there-s-irq_work.patch b/debian/patches/features/all/rt/timer-Raise-softirq-if-there-s-irq_work.patch new file mode 100644 index 000000000..124443f11 --- /dev/null +++ b/debian/patches/features/all/rt/timer-Raise-softirq-if-there-s-irq_work.patch @@ -0,0 +1,47 @@ +From 9c537765294dfce1e51cd713b9020987bb0a3ed2 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Fri, 24 Jan 2014 15:09:33 -0500 +Subject: [PATCH] timer: Raise softirq if there's irq_work +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +[ Talking with Sebastian on IRC, it seems that doing the irq_work_run() + from the interrupt in -rt is a bad thing. Here we simply raise the + softirq if there's irq work to do. This too boots on my i7 ] + +After trying hard to figure out why my i7 box was locking up with the +new active_timers code, that does not run the timer softirq if there +are no active timers, I took an extra look at the softirq handler and +noticed that it doesn't just run timer softirqs, it also runs irq work. + +This was the bug that was locking up the system. It wasn't missing a +timer, it was missing irq work. By always doing the irq work callbacks, +the system boots fine. The missing irq work callback was the RCU's +sp_wakeup() function. + +No need to check for defined(CONFIG_IRQ_WORK). When that's not set the +"irq_work_needs_cpu()" is a static inline that returns false. + +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/timer.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1462,8 +1462,13 @@ void run_local_timers(void) + return; + } + #endif +- if (!base->active_timers) +- goto out; ++ if (!base->active_timers) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* On RT, irq work runs from softirq */ ++ if (!irq_work_needs_cpu()) ++#endif ++ goto out; ++ } + + /* Check whether the next pending timer has expired */ + if (time_before_eq(base->next_timer, jiffies)) diff --git a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch new file mode 100644 index 000000000..df37ac935 --- /dev/null +++ b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch @@ -0,0 +1,76 @@ +From: Peter Zijlstra +Date: Fri, 21 Aug 2009 11:56:45 +0200 +Subject: timer: delay waking softirqs from the jiffy tick +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +People were complaining about broken balancing with the recent -rt +series. + +A look at /proc/sched_debug yielded: + +cpu#0, 2393.874 MHz + .nr_running : 0 + .load : 0 + .cpu_load[0] : 177522 + .cpu_load[1] : 177522 + .cpu_load[2] : 177522 + .cpu_load[3] : 177522 + .cpu_load[4] : 177522 +cpu#1, 2393.874 MHz + .nr_running : 4 + .load : 4096 + .cpu_load[0] : 181618 + .cpu_load[1] : 180850 + .cpu_load[2] : 180274 + .cpu_load[3] : 179938 + .cpu_load[4] : 179758 + +Which indicated the cpu_load computation was hosed, the 177522 value +indicates that there is one RT task runnable. Initially I thought the +old problem of calculating the cpu_load from a softirq had re-surfaced, +however looking at the code shows its being done from scheduler_tick(). + +[ we really should fix this RT/cfs interaction some day... ] + +A few trace_printk()s later: + + sirq-timer/1-19 [001] 174.289744: 19: 50:S ==> [001] 0:140:R + -0 [001] 174.290724: enqueue_task_rt: adding task: 19/sirq-timer/1 with load: 177522 + -0 [001] 174.290725: 0:140:R + [001] 19: 50:S sirq-timer/1 + -0 [001] 174.290730: scheduler_tick: current load: 177522 + -0 [001] 174.290732: scheduler_tick: current: 0/swapper + -0 [001] 174.290736: 0:140:R ==> [001] 19: 50:R sirq-timer/1 + sirq-timer/1-19 [001] 174.290741: dequeue_task_rt: removing task: 19/sirq-timer/1 with load: 177522 + sirq-timer/1-19 [001] 174.290743: 19: 50:S ==> [001] 0:140:R + +We see that we always raise the timer softirq before doing the load +calculation. Avoid this by re-ordering the scheduler_tick() call in +update_process_times() to occur before we deal with timers. + +This lowers the load back to sanity and restores regular load-balancing +behaviour. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner + +--- + kernel/timer.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1393,13 +1393,13 @@ void update_process_times(int user_tick) + + /* Note: this timer irq context must be accounted for as well. */ + account_process_tick(p, user_tick); ++ scheduler_tick(); + run_local_timers(); + rcu_check_callbacks(cpu, user_tick); + #ifdef CONFIG_IRQ_WORK + if (in_irq()) + irq_work_run(); + #endif +- scheduler_tick(); + run_posix_cpu_timers(p); + } + diff --git a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch new file mode 100644 index 000000000..22f88a505 --- /dev/null +++ b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch @@ -0,0 +1,31 @@ +Subject: timer-fd: Prevent live lock +From: Thomas Gleixner +Date: Wed, 25 Jan 2012 11:08:40 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +If hrtimer_try_to_cancel() requires a retry, then depending on the +priority setting te retry loop might prevent timer callback completion +on RT. Prevent that by waiting for completion on RT, no change for a +non RT kernel. + +Reported-by: Sankara Muthukrishnan +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + fs/timerfd.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/fs/timerfd.c ++++ b/fs/timerfd.c +@@ -380,7 +380,10 @@ static int do_timerfd_settime(int ufd, i + break; + } + spin_unlock_irq(&ctx->wqh.lock); +- cpu_relax(); ++ if (isalarm(ctx)) ++ hrtimer_wait_for_timer(&ctx->t.alarm.timer); ++ else ++ hrtimer_wait_for_timer(&ctx->t.tmr); + } + + /* diff --git a/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch b/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch new file mode 100644 index 000000000..f1bbf4a0b --- /dev/null +++ b/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch @@ -0,0 +1,79 @@ +Subject: timer-handle-idle-trylock-in-get-next-timer-irq.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 22:08:38 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/spinlock_rt.h | 12 +++++++++++- + kernel/locking/rtmutex.c | 7 +------ + kernel/timer.c | 9 +++++++-- + 3 files changed, 19 insertions(+), 9 deletions(-) + +--- a/include/linux/spinlock_rt.h ++++ b/include/linux/spinlock_rt.h +@@ -50,7 +50,17 @@ extern void __lockfunc __rt_spin_unlock( + + #define spin_lock_irq(lock) spin_lock(lock) + +-#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++ ++#define spin_trylock(lock) \ ++({ \ ++ int __locked; \ ++ migrate_disable(); \ ++ __locked = spin_do_trylock(lock); \ ++ if (!__locked) \ ++ migrate_enable(); \ ++ __locked; \ ++}) + + #ifdef CONFIG_LOCKDEP + # define spin_lock_nested(lock, subclass) \ +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -981,15 +981,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait); + + int __lockfunc rt_spin_trylock(spinlock_t *lock) + { +- int ret; ++ int ret = rt_mutex_trylock(&lock->lock); + +- migrate_disable(); +- ret = rt_mutex_trylock(&lock->lock); + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); +- else +- migrate_enable(); +- + return ret; + } + EXPORT_SYMBOL(rt_spin_trylock); +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1386,9 +1386,10 @@ unsigned long get_next_timer_interrupt(u + /* + * On PREEMPT_RT we cannot sleep here. If the trylock does not + * succeed then we return the worst-case 'expires in 1 tick' +- * value: ++ * value. We use the rt functions here directly to avoid a ++ * migrate_disable() call. + */ +- if (!spin_trylock(&base->lock)) ++ if (!spin_do_trylock(&base->lock)) + return now + 1; + #else + spin_lock(&base->lock); +@@ -1398,7 +1399,11 @@ unsigned long get_next_timer_interrupt(u + base->next_timer = __next_timer_interrupt(base); + expires = base->next_timer; + } ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rt_spin_unlock(&base->lock); ++#else + spin_unlock(&base->lock); ++#endif + + if (time_before_eq(expires, now)) + return now; diff --git a/debian/patches/features/all/rt/timer-rt-Always-raise-the-softirq-if-there-s-irq_wor.patch b/debian/patches/features/all/rt/timer-rt-Always-raise-the-softirq-if-there-s-irq_wor.patch new file mode 100644 index 000000000..8749f70d2 --- /dev/null +++ b/debian/patches/features/all/rt/timer-rt-Always-raise-the-softirq-if-there-s-irq_wor.patch @@ -0,0 +1,67 @@ +From a0944325c8d32f24e9c1e0e998a10342fc9b6667 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Fri, 31 Jan 2014 12:07:57 -0500 +Subject: [PATCH 2/7] timer/rt: Always raise the softirq if there's irq_work to + be done +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +It was previously discovered that some systems would hang on boot up +with a previous version of 3.12-rt. This was due to RCU using irq_work, +and RT defers the irq_work to a softirq. But if there's no active +timers, the softirq will not be raised, and RCU work will not get done, +causing the system to hang. The fix was to check that if there was no +active timers but irq_work to be done, then we should raise the softirq. + +But this fix was not 100% correct. It left out the case that there were +active timers that were not expired yet. This would have the softirq +not get raised even if there was irq work to be done. + +If there is irq_work to be done, then we must raise the timer softirq +regardless of if there is active timers or whether they are expired or +not. The softirq can handle those cases. But we can never ignore +irq_work. + +As it is only PREEMPT_RT_FULL that requires irq_work to be done in the +softirq, we can pull out the check in the active_timers condition, and +make the code a bit cleaner by having the irq_work check separate, and +put the code in with the other #ifdef PREEMPT_RT. If there is irq_work +to be done, there's no need to check the active timers or if they are +expired. Just raise the time softirq and be done with it. Otherwise, we +can do the timer checks just like we do with non -rt. + +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/timer.c | 16 +++++++++------- + 1 file changed, 9 insertions(+), 7 deletions(-) + +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1457,18 +1457,20 @@ void run_local_timers(void) + * the timer softirq. + */ + #ifdef CONFIG_PREEMPT_RT_FULL ++ /* On RT, irq work runs from softirq */ ++ if (irq_work_needs_cpu()) { ++ raise_softirq(TIMER_SOFTIRQ); ++ return; ++ } ++ + if (!spin_do_trylock(&base->lock)) { + raise_softirq(TIMER_SOFTIRQ); + return; + } + #endif +- if (!base->active_timers) { +-#ifdef CONFIG_PREEMPT_RT_FULL +- /* On RT, irq work runs from softirq */ +- if (!irq_work_needs_cpu()) +-#endif +- goto out; +- } ++ ++ if (!base->active_timers) ++ goto out; + + /* Check whether the next pending timer has expired */ + if (time_before_eq(base->next_timer, jiffies)) diff --git a/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch b/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch new file mode 100644 index 000000000..cd850cf7d --- /dev/null +++ b/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch @@ -0,0 +1,69 @@ +Subject: timers: Avoid the switch timers base set to NULL trick on RT +From: Thomas Gleixner +Date: Thu, 21 Jul 2011 15:23:39 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +On RT that code is preemptible, so we cannot assign NULL to timers +base as a preempter would spin forever in lock_timer_base(). + +Signed-off-by: Thomas Gleixner +--- + kernel/timer.c | 40 ++++++++++++++++++++++++++++++++-------- + 1 file changed, 32 insertions(+), 8 deletions(-) + +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -723,6 +723,36 @@ static struct tvec_base *lock_timer_base + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline struct tvec_base *switch_timer_base(struct timer_list *timer, ++ struct tvec_base *old, ++ struct tvec_base *new) ++{ ++ /* See the comment in lock_timer_base() */ ++ timer_set_base(timer, NULL); ++ spin_unlock(&old->lock); ++ spin_lock(&new->lock); ++ timer_set_base(timer, new); ++ return new; ++} ++#else ++static inline struct tvec_base *switch_timer_base(struct timer_list *timer, ++ struct tvec_base *old, ++ struct tvec_base *new) ++{ ++ /* ++ * We cannot do the above because we might be preempted and ++ * then the preempter would see NULL and loop forever. ++ */ ++ if (spin_trylock(&new->lock)) { ++ timer_set_base(timer, new); ++ spin_unlock(&old->lock); ++ return new; ++ } ++ return old; ++} ++#endif ++ + static inline int + __mod_timer(struct timer_list *timer, unsigned long expires, + bool pending_only, int pinned) +@@ -761,14 +791,8 @@ static inline int + * handler yet has not finished. This also guarantees that + * the timer is serialized wrt itself. + */ +- if (likely(base->running_timer != timer)) { +- /* See the comment in lock_timer_base() */ +- timer_set_base(timer, NULL); +- spin_unlock(&base->lock); +- base = new_base; +- spin_lock(&base->lock); +- timer_set_base(timer, base); +- } ++ if (likely(base->running_timer != timer)) ++ base = switch_timer_base(timer, base, new_base); + } + + timer->expires = expires; diff --git a/debian/patches/features/all/rt/timers-do-not-raise-softirq-unconditionally.patch b/debian/patches/features/all/rt/timers-do-not-raise-softirq-unconditionally.patch new file mode 100644 index 000000000..780505d6e --- /dev/null +++ b/debian/patches/features/all/rt/timers-do-not-raise-softirq-unconditionally.patch @@ -0,0 +1,162 @@ +From 35b6173e6176fc978c635f9e07f1778eff7b76e7 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Thu, 7 Nov 2013 12:21:11 +0100 +Subject: [PATCH] timers: do not raise softirq unconditionally +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Mike, + +On Thu, 7 Nov 2013, Mike Galbraith wrote: + +> On Thu, 2013-11-07 at 04:26 +0100, Mike Galbraith wrote: +> > On Wed, 2013-11-06 at 18:49 +0100, Thomas Gleixner wrote: +> +> > > I bet you are trying to work around some of the side effects of the +> > > occasional tick which is still necessary despite of full nohz, right? +> > +> > Nope, I wanted to check out cost of nohz_full for rt, and found that it +> > doesn't work at all instead, looked, and found that the sole running +> > task has just awakened ksoftirqd when it wants to shut the tick down, so +> > that shutdown never happens. +> +> Like so in virgin 3.10-rt. Box is x3550 M3 booted nowatchdog +> rcu_nocbs=1-3 nohz_full=1-3, and CPUs1-3 are completely isolated via +> cpusets as well. + +well, that very same problem is in mainline if you add "threadirqs" to +the command line. But we can be smart about this. The untested patch +below should address that issue. If that works on mainline we can +adapt it for RT (needs a trylock(&base->lock) there). + +Though it's not a full solution. It needs some thought versus the +softirq code of timers. Assume we have only one timer queued 1000 +ticks into the future. So this change will cause the timer softirq not +to be called until that timer expires and then the timer softirq is +going to do 1000 loops until it catches up with jiffies. That's +anything but pretty ... + +What worries me more is this one: + + pert-5229 [003] d..h1.. 684.482618: softirq_raise: vec=9 [action=RCU] + +The CPU has no callbacks as you shoved them over to cpu 0, so why is +the RCU softirq raised? + +Thanks, + + tglx +------------------ +Message-id: +|CONFIG_NO_HZ_FULL + CONFIG_PREEMPT_RT_FULL = nogo +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/hrtimer.h | 3 +-- + kernel/hrtimer.c | 31 +++++++------------------------ + kernel/timer.c | 28 +++++++++++++++++++++++++--- + 3 files changed, 33 insertions(+), 29 deletions(-) + +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -461,9 +461,8 @@ extern int schedule_hrtimeout_range_cloc + unsigned long delta, const enum hrtimer_mode mode, int clock); + extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); + +-/* Soft interrupt function to run the hrtimer queues: */ ++/* Called from the periodic timer tick */ + extern void hrtimer_run_queues(void); +-extern void hrtimer_run_pending(void); + + /* Bootup initialization: */ + extern void __init hrtimers_init(void); +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1695,30 +1695,6 @@ static void run_hrtimer_softirq(struct s + } + + /* +- * Called from timer softirq every jiffy, expire hrtimers: +- * +- * For HRT its the fall back code to run the softirq in the timer +- * softirq context in case the hrtimer initialization failed or has +- * not been done yet. +- */ +-void hrtimer_run_pending(void) +-{ +- if (hrtimer_hres_active()) +- return; +- +- /* +- * This _is_ ugly: We have to check in the softirq context, +- * whether we can switch to highres and / or nohz mode. The +- * clocksource switch happens in the timer interrupt with +- * xtime_lock held. Notification from there only sets the +- * check bit in the tick_oneshot code, otherwise we might +- * deadlock vs. xtime_lock. +- */ +- if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) +- hrtimer_switch_to_hres(); +-} +- +-/* + * Called from hardirq context every jiffy + */ + void hrtimer_run_queues(void) +@@ -1731,6 +1707,13 @@ void hrtimer_run_queues(void) + if (hrtimer_hres_active()) + return; + ++ /* ++ * Check whether we can switch to highres mode. ++ */ ++ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()) ++ && hrtimer_switch_to_hres()) ++ return; ++ + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { + base = &cpu_base->clock_base[index]; + if (!timerqueue_getnext(&base->active)) +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1439,8 +1439,6 @@ static void run_timer_softirq(struct sof + { + struct tvec_base *base = __this_cpu_read(tvec_bases); + +- hrtimer_run_pending(); +- + if (time_after_eq(jiffies, base->timer_jiffies)) + __run_timers(base); + } +@@ -1450,8 +1448,32 @@ static void run_timer_softirq(struct sof + */ + void run_local_timers(void) + { ++ struct tvec_base *base = __this_cpu_read(tvec_bases); ++ + hrtimer_run_queues(); +- raise_softirq(TIMER_SOFTIRQ); ++ /* ++ * We can access this lockless as we are in the timer ++ * interrupt. If there are no timers queued, nothing to do in ++ * the timer softirq. ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!spin_do_trylock(&base->lock)) { ++ raise_softirq(TIMER_SOFTIRQ); ++ return; ++ } ++#endif ++ if (!base->active_timers) ++ goto out; ++ ++ /* Check whether the next pending timer has expired */ ++ if (time_before_eq(base->next_timer, jiffies)) ++ raise_softirq(TIMER_SOFTIRQ); ++out: ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rt_spin_unlock_after_trylock_in_irq(&base->lock); ++#endif ++ /* The ; ensures that gcc won't complain in the !RT case */ ++ ; + } + + #ifdef __ARCH_WANT_SYS_ALARM diff --git a/debian/patches/features/all/rt/timers-preempt-rt-support.patch b/debian/patches/features/all/rt/timers-preempt-rt-support.patch new file mode 100644 index 000000000..accd63fa7 --- /dev/null +++ b/debian/patches/features/all/rt/timers-preempt-rt-support.patch @@ -0,0 +1,57 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:20 -0500 +Subject: timers: preempt-rt support +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +--- + kernel/timer.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1358,7 +1358,17 @@ unsigned long get_next_timer_interrupt(u + if (cpu_is_offline(smp_processor_id())) + return expires; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * On PREEMPT_RT we cannot sleep here. If the trylock does not ++ * succeed then we return the worst-case 'expires in 1 tick' ++ * value: ++ */ ++ if (!spin_trylock(&base->lock)) ++ return now + 1; ++#else + spin_lock(&base->lock); ++#endif + if (base->active_timers) { + if (time_before_eq(base->next_timer, base->timer_jiffies)) + base->next_timer = __next_timer_interrupt(base); +@@ -1368,7 +1378,6 @@ unsigned long get_next_timer_interrupt(u + + if (time_before_eq(expires, now)) + return now; +- + return cmp_next_hrtimer_event(now, expires); + } + #endif +@@ -1618,7 +1627,7 @@ static void migrate_timers(int cpu) + + BUG_ON(cpu_online(cpu)); + old_base = per_cpu(tvec_bases, cpu); +- new_base = get_cpu_var(tvec_bases); ++ new_base = get_local_var(tvec_bases); + /* + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. +@@ -1639,7 +1648,7 @@ static void migrate_timers(int cpu) + + spin_unlock(&old_base->lock); + spin_unlock_irq(&new_base->lock); +- put_cpu_var(tvec_bases); ++ put_local_var(tvec_bases); + } + #endif /* CONFIG_HOTPLUG_CPU */ + diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption-improve.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption-improve.patch new file mode 100644 index 000000000..79652dd3f --- /dev/null +++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption-improve.patch @@ -0,0 +1,57 @@ +From a57194f115acfc967aa0907bc130e95b68723121 Mon Sep 17 00:00:00 2001 +From: Zhao Hongjiang +Date: Wed, 17 Apr 2013 17:44:16 +0800 +Subject: [PATCH] timers: prepare for full preemption improve +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +wake_up should do nothing on the nort, so we should use wakeup_timer_waiters, +also fix a spell mistake. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Zhao Hongjiang +[bigeasy: s/CONFIG_PREEMPT_RT_BASE/CONFIG_PREEMPT_RT_FULL/] +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/timer.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -78,7 +78,9 @@ struct tvec_root { + struct tvec_base { + spinlock_t lock; + struct timer_list *running_timer; ++#ifdef CONFIG_PREEMPT_RT_FULL + wait_queue_head_t wait_for_running_timer; ++#endif + unsigned long timer_jiffies; + unsigned long next_timer; + unsigned long active_timers; +@@ -962,7 +964,7 @@ static void wait_for_running_timer(struc + base->running_timer != timer); + } + +-# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) ++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer) + #else + static inline void wait_for_running_timer(struct timer_list *timer) + { +@@ -1216,7 +1218,7 @@ static inline void __run_timers(struct t + } + } + } +- wake_up(&base->wait_for_running_timer); ++ wakeup_timer_waiters(base); + spin_unlock_irq(&base->lock); + } + +@@ -1575,7 +1577,9 @@ static int init_timers_cpu(int cpu) + base = per_cpu(tvec_bases, cpu); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL + init_waitqueue_head(&base->wait_for_running_timer); ++#endif + + for (j = 0; j < TVN_SIZE; j++) { + INIT_LIST_HEAD(base->tv5.vec + j); diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch new file mode 100644 index 000000000..74b508f9e --- /dev/null +++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch @@ -0,0 +1,129 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:34 -0500 +Subject: timers: prepare for full preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +When softirqs can be preempted we need to make sure that cancelling +the timer from the active thread can not deadlock vs. a running timer +callback. Add a waitqueue to resolve that. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/linux/timer.h | 2 +- + kernel/timer.c | 36 +++++++++++++++++++++++++++++++++--- + 2 files changed, 34 insertions(+), 4 deletions(-) + +--- a/include/linux/timer.h ++++ b/include/linux/timer.h +@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list + + extern int try_to_del_timer_sync(struct timer_list *timer); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + extern int del_timer_sync(struct timer_list *timer); + #else + # define del_timer_sync(t) del_timer(t) +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -78,6 +78,7 @@ struct tvec_root { + struct tvec_base { + spinlock_t lock; + struct timer_list *running_timer; ++ wait_queue_head_t wait_for_running_timer; + unsigned long timer_jiffies; + unsigned long next_timer; + unsigned long active_timers; +@@ -739,12 +740,15 @@ static inline int + + debug_activate(timer, expires); + ++ preempt_disable_rt(); + cpu = smp_processor_id(); + + #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) + if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) + cpu = get_nohz_timer_target(); + #endif ++ preempt_enable_rt(); ++ + new_base = per_cpu(tvec_bases, cpu); + + if (base != new_base) { +@@ -945,6 +949,29 @@ void add_timer_on(struct timer_list *tim + } + EXPORT_SYMBOL_GPL(add_timer_on); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Wait for a running timer ++ */ ++static void wait_for_running_timer(struct timer_list *timer) ++{ ++ struct tvec_base *base = timer->base; ++ ++ if (base->running_timer == timer) ++ wait_event(base->wait_for_running_timer, ++ base->running_timer != timer); ++} ++ ++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) ++#else ++static inline void wait_for_running_timer(struct timer_list *timer) ++{ ++ cpu_relax(); ++} ++ ++# define wakeup_timer_waiters(b) do { } while (0) ++#endif ++ + /** + * del_timer - deactive a timer. + * @timer: the timer to be deactivated +@@ -1002,7 +1029,7 @@ int try_to_del_timer_sync(struct timer_l + } + EXPORT_SYMBOL(try_to_del_timer_sync); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + /** + * del_timer_sync - deactivate a timer and wait for the handler to finish. + * @timer: the timer to be deactivated +@@ -1062,7 +1089,7 @@ int del_timer_sync(struct timer_list *ti + int ret = try_to_del_timer_sync(timer); + if (ret >= 0) + return ret; +- cpu_relax(); ++ wait_for_running_timer(timer); + } + } + EXPORT_SYMBOL(del_timer_sync); +@@ -1179,15 +1206,17 @@ static inline void __run_timers(struct t + if (irqsafe) { + spin_unlock(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock(&base->lock); + } else { + spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock_irq(&base->lock); + } + } + } +- base->running_timer = NULL; ++ wake_up(&base->wait_for_running_timer); + spin_unlock_irq(&base->lock); + } + +@@ -1546,6 +1575,7 @@ static int init_timers_cpu(int cpu) + base = per_cpu(tvec_bases, cpu); + } + ++ init_waitqueue_head(&base->wait_for_running_timer); + + for (j = 0; j < TVN_SIZE; j++) { + INIT_LIST_HEAD(base->tv5.vec + j); diff --git a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch new file mode 100644 index 000000000..36315c6dc --- /dev/null +++ b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch @@ -0,0 +1,47 @@ +From: Steven Rostedt +Date: Thu, 29 Sep 2011 12:24:30 -0500 +Subject: tracing: Account for preempt off in preempt_schedule() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +The preempt_schedule() uses the preempt_disable_notrace() version +because it can cause infinite recursion by the function tracer as +the function tracer uses preempt_enable_notrace() which may call +back into the preempt_schedule() code as the NEED_RESCHED is still +set and the PREEMPT_ACTIVE has not been set yet. + +See commit: d1f74e20b5b064a130cd0743a256c2d3cfe84010 that made this +change. + +The preemptoff and preemptirqsoff latency tracers require the first +and last preempt count modifiers to enable tracing. But this skips +the checks. Since we can not convert them back to the non notrace +version, we can use the idle() hooks for the latency tracers here. +That is, the start/stop_critical_timings() works well to manually +start and stop the latency tracer for preempt off timings. + +Signed-off-by: Steven Rostedt +Signed-off-by: Clark Williams +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2810,7 +2810,16 @@ asmlinkage void __sched notrace preempt_ + + do { + __preempt_count_add(PREEMPT_ACTIVE); ++ /* ++ * The add/subtract must not be traced by the function ++ * tracer. But we still want to account for the ++ * preempt off latency tracer. Since the _notrace versions ++ * of add/subtract skip the accounting for latency tracer ++ * we must force it manually. ++ */ ++ start_critical_timings(); + __schedule(); ++ stop_critical_timings(); + __preempt_count_sub(PREEMPT_ACTIVE); + + /* diff --git a/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch b/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch new file mode 100644 index 000000000..b027e6387 --- /dev/null +++ b/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch @@ -0,0 +1,80 @@ +From db7ae440c333156392bc56badc610469a4d522ae Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 8 Apr 2013 16:09:57 +0200 +Subject: [PATCH] kernel/treercu: use a simple waitqueue +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/rcu/tree.c | 13 +++++++------ + kernel/rcu/tree.h | 2 +- + 2 files changed, 8 insertions(+), 7 deletions(-) + +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -1610,7 +1610,7 @@ static int __noreturn rcu_gp_kthread(voi + trace_rcu_grace_period(rsp->name, + ACCESS_ONCE(rsp->gpnum), + TPS("reqwait")); +- wait_event_interruptible(rsp->gp_wq, ++ swait_event_interruptible(rsp->gp_wq, + ACCESS_ONCE(rsp->gp_flags) & + RCU_GP_FLAG_INIT); + /* Locking provides needed memory barrier. */ +@@ -1637,7 +1637,7 @@ static int __noreturn rcu_gp_kthread(voi + trace_rcu_grace_period(rsp->name, + ACCESS_ONCE(rsp->gpnum), + TPS("fqswait")); +- ret = wait_event_interruptible_timeout(rsp->gp_wq, ++ ret = swait_event_interruptible_timeout(rsp->gp_wq, + ((gf = ACCESS_ONCE(rsp->gp_flags)) & + RCU_GP_FLAG_FQS) || + (!ACCESS_ONCE(rnp->qsmask) && +@@ -1687,7 +1687,7 @@ static void rsp_wakeup(struct irq_work * + struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work); + + /* Wake up rcu_gp_kthread() to start the grace period. */ +- wake_up(&rsp->gp_wq); ++ swait_wake(&rsp->gp_wq); + } + + /* +@@ -1763,7 +1763,7 @@ static void rcu_report_qs_rsp(struct rcu + { + WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); +- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ ++ swait_wake(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ + } + + /* +@@ -2339,7 +2339,8 @@ static void force_quiescent_state(struct + } + rsp->gp_flags |= RCU_GP_FLAG_FQS; + raw_spin_unlock_irqrestore(&rnp_old->lock, flags); +- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ ++ /* Memory barrier implied by wake_up() path. */ ++ swait_wake(&rsp->gp_wq); + } + + /* +@@ -3477,7 +3478,7 @@ static void __init rcu_init_one(struct r + } + + rsp->rda = rda; +- init_waitqueue_head(&rsp->gp_wq); ++ init_swait_head(&rsp->gp_wq); + init_irq_work(&rsp->wakeup_work, rsp_wakeup); + rnp = rsp->level[rcu_num_lvls - 1]; + for_each_possible_cpu(i) { +--- a/kernel/rcu/tree.h ++++ b/kernel/rcu/tree.h +@@ -405,7 +405,7 @@ struct rcu_state { + unsigned long gpnum; /* Current gp number. */ + unsigned long completed; /* # of last completed gp. */ + struct task_struct *gp_kthread; /* Task for grace periods. */ +- wait_queue_head_t gp_wq; /* Where GP task waits. */ ++ struct swait_head gp_wq; /* Where GP task waits. */ + int gp_flags; /* Commands for GP task. */ + + /* End of fields guarded by root rcu_node's lock. */ diff --git a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch new file mode 100644 index 000000000..624dbda34 --- /dev/null +++ b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch @@ -0,0 +1,66 @@ +Subject: [UPSTREAM]net,RT:REmove preemption disabling in netif_rx() +From: Priyanka Jain +Date: Thu, 17 May 2012 09:35:11 +0530 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +1)enqueue_to_backlog() (called from netif_rx) should be + bind to a particluar CPU. This can be achieved by + disabling migration. No need to disable preemption + +2)Fixes crash "BUG: scheduling while atomic: ksoftirqd" + in case of RT. + If preemption is disabled, enqueue_to_backog() is called + in atomic context. And if backlog exceeds its count, + kfree_skb() is called. But in RT, kfree_skb() might + gets scheduled out, so it expects non atomic context. + +3)When CONFIG_PREEMPT_RT_FULL is not defined, + migrate_enable(), migrate_disable() maps to + preempt_enable() and preempt_disable(), so no + change in functionality in case of non-RT. + +-Replace preempt_enable(), preempt_disable() with + migrate_enable(), migrate_disable() respectively +-Replace get_cpu(), put_cpu() with get_cpu_light(), + put_cpu_light() respectively + +Signed-off-by: Priyanka Jain +Acked-by: Rajan Srivastava +Cc: +Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +--- + Testing: Tested successfully on p4080ds(8-core SMP system) + + net/core/dev.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3248,7 +3248,7 @@ static int netif_rx_internal(struct sk_b + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu; + +- preempt_disable(); ++ migrate_disable(); + rcu_read_lock(); + + cpu = get_rps_cpu(skb->dev, skb, &rflow); +@@ -3258,13 +3258,13 @@ static int netif_rx_internal(struct sk_b + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + + rcu_read_unlock(); +- preempt_enable(); ++ migrate_enable(); + } else + #endif + { + unsigned int qtail; +- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); +- put_cpu(); ++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); ++ put_cpu_light(); + } + return ret; + } diff --git a/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch b/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch new file mode 100644 index 000000000..94eb50b8a --- /dev/null +++ b/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch @@ -0,0 +1,37 @@ +From: Wu Zhangjin +Date: Mon, 4 Jan 2010 11:33:02 +0800 +Subject: USB: Fix the mouse problem when copying large amounts of data +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +When copying large amounts of data between the USB storage devices and +the hard disk, the USB mouse will not work, this patch fixes it. + +[NOTE: This problem have been found in the Loongson family machines, not +sure whether it is producible on other platforms] + +Signed-off-by: Hu Hongbing +Signed-off-by: Wu Zhangjin + +--- + drivers/usb/host/ohci-hcd.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +--- a/drivers/usb/host/ohci-hcd.c ++++ b/drivers/usb/host/ohci-hcd.c +@@ -864,9 +864,13 @@ static irqreturn_t ohci_irq (struct usb_ + } + + if (ints & OHCI_INTR_WDH) { +- spin_lock (&ohci->lock); +- dl_done_list (ohci); +- spin_unlock (&ohci->lock); ++ if (ohci->hcca->done_head == 0) { ++ ints &= ~OHCI_INTR_WDH; ++ } else { ++ spin_lock (&ohci->lock); ++ dl_done_list (ohci); ++ spin_unlock (&ohci->lock); ++ } + } + + if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) { diff --git a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch new file mode 100644 index 000000000..cb99cbbd7 --- /dev/null +++ b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch @@ -0,0 +1,59 @@ +From 7b1bab1c264fba44ef4c582d1acd9d8a2ef45e2a Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 8 Nov 2013 17:34:54 +0100 +Subject: [PATCH] usb: use _nort in giveback +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet +context") I see + +|BUG: sleeping function called from invalid context at kernel/rtmutex.c:673 +|in_atomic(): 0, irqs_disabled(): 1, pid: 109, name: irq/11-uhci_hcd +|no locks held by irq/11-uhci_hcd/109. +|irq event stamp: 440 +|hardirqs last enabled at (439): [] _raw_spin_unlock_irqrestore+0x75/0x90 +|hardirqs last disabled at (440): [] __usb_hcd_giveback_urb+0x46/0xc0 +|softirqs last enabled at (0): [] copy_process.part.52+0x511/0x1510 +|softirqs last disabled at (0): [< (null)>] (null) +|CPU: 3 PID: 109 Comm: irq/11-uhci_hcd Not tainted 3.12.0-rt0-rc1+ #13 +|Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 +| 0000000000000000 ffff8800db9ffbe0 ffffffff8169f064 0000000000000000 +| ffff8800db9ffbf8 ffffffff810b2122 ffff88020f03e888 ffff8800db9ffc18 +| ffffffff816a6944 ffffffff810b5748 ffff88020f03c000 ffff8800db9ffc50 +|Call Trace: +| [] dump_stack+0x4e/0x8f +| [] __might_sleep+0x112/0x190 +| [] rt_spin_lock+0x24/0x60 +| [] hid_ctrl+0x3b/0x190 +| [] __usb_hcd_giveback_urb+0x4f/0xc0 +| [] usb_hcd_giveback_urb+0x3f/0x140 +| [] uhci_giveback_urb+0xaf/0x280 +| [] uhci_scan_schedule+0x47a/0xb10 +| [] uhci_irq+0xa6/0x1a0 +| [] usb_hcd_irq+0x28/0x40 +| [] irq_forced_thread_fn+0x23/0x70 +| [] irq_thread+0x10f/0x150 +| [] kthread+0xcd/0xe0 +| [] ret_from_fork+0x7c/0xb0 + +on -RT we run threaded so no need to disable interrupts. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/usb/core/hcd.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -1678,9 +1678,9 @@ static void __usb_hcd_giveback_urb(struc + * and no one may trigger the above deadlock situation when + * running complete() in tasklet. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + urb->complete(urb); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + usb_anchor_resume_wakeups(anchor); + atomic_dec(&urb->use_count); diff --git a/debian/patches/features/all/rt/use-local-spin_locks-in-local_lock.patch b/debian/patches/features/all/rt/use-local-spin_locks-in-local_lock.patch new file mode 100644 index 000000000..2553e38ad --- /dev/null +++ b/debian/patches/features/all/rt/use-local-spin_locks-in-local_lock.patch @@ -0,0 +1,83 @@ +From 03dd6f7b4ca7aba6b5a4e6dea5c4635e22d07ab4 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Fri, 17 Jan 2014 20:41:58 +0100 +Subject: [PATCH 6/7] use local spin_locks in local_lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Drop recursive call to migrate_disabel/enable for local_*lock* api +reported by Steven Rostedt. + +local_lock will call migrate_disable via get_local_var - call tree is + +get_locked_var + `-> local_lock(lvar) + `-> __local_lock(&get_local_var(lvar)); + `--> # define get_local_var(var) (*({ + migrate_disable(); + &__get_cpu_var(var); })) \ + +thus there should be no need to call migrate_disable/enable recursively in +spin_try/lock/unlock. This patch addes a spin_trylock_local and replaces +the migration disabling calls by the local calls. + +This patch is incomplete as it does not yet cover the _irq/_irqsave variants +by local locks. This patch requires the API cleanup in kernel/softirq.c or +it would break softirq_lock/unlock with respect to migration. + +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/locallock.h | 18 ++++++++++++++---- + 1 file changed, 14 insertions(+), 4 deletions(-) + +--- a/include/linux/locallock.h ++++ b/include/linux/locallock.h +@@ -36,10 +36,20 @@ struct local_irq_lock { + spin_lock_init(&per_cpu(lvar, __cpu).lock); \ + } while (0) + ++/* ++ * spin_lock|trylock|unlock_local flavour that does not migrate disable ++ * used for __local_lock|trylock|unlock where get_local_var/put_local_var ++ * already takes care of the migrate_disable/enable ++ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. ++ */ ++# define spin_lock_local(lock) spin_lock(lock) ++# define spin_trylock_local(lock) spin_trylock(lock) ++# define spin_unlock_local(lock) spin_unlock(lock) ++ + static inline void __local_lock(struct local_irq_lock *lv) + { + if (lv->owner != current) { +- spin_lock(&lv->lock); ++ spin_lock_local(&lv->lock); + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; +@@ -52,7 +62,7 @@ static inline void __local_lock(struct l + + static inline int __local_trylock(struct local_irq_lock *lv) + { +- if (lv->owner != current && spin_trylock(&lv->lock)) { ++ if (lv->owner != current && spin_trylock_local(&lv->lock)) { + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; +@@ -79,7 +89,7 @@ static inline void __local_unlock(struct + return; + + lv->owner = NULL; +- spin_unlock(&lv->lock); ++ spin_unlock_local(&lv->lock); + } + + #define local_unlock(lvar) \ +@@ -211,7 +221,7 @@ static inline int __local_unlock_irqrest + &__get_cpu_var(var); \ + })) + +-#define put_locked_var(lvar, var) local_unlock(lvar) ++#define put_locked_var(lvar, var) local_unlock(lvar); + + #define local_lock_cpu(lvar) \ + ({ \ diff --git a/debian/patches/features/all/rt/user-use-local-irq-nort.patch b/debian/patches/features/all/rt/user-use-local-irq-nort.patch new file mode 100644 index 000000000..fb5e33195 --- /dev/null +++ b/debian/patches/features/all/rt/user-use-local-irq-nort.patch @@ -0,0 +1,30 @@ +From: Thomas Gleixner +Date: Tue, 21 Jul 2009 23:06:05 +0200 +Subject: core: Do not disable interrupts on RT in kernel/users.c +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Use the local_irq_*_nort variants to reduce latencies in RT. The code +is serialized by the locks. No need to disable interrupts. + +Signed-off-by: Thomas Gleixner + +--- + kernel/user.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/kernel/user.c ++++ b/kernel/user.c +@@ -158,11 +158,11 @@ void free_uid(struct user_struct *up) + if (!up) + return; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) + free_user(up, flags); + else +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + struct user_struct *alloc_uid(kuid_t uid) diff --git a/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch b/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch new file mode 100644 index 000000000..0cd5cf5be --- /dev/null +++ b/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch @@ -0,0 +1,203 @@ +Subject: vtime-split-lock-and-seqcount.patch +From: Thomas Gleixner +Date: Tue, 23 Jul 2013 15:45:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/init_task.h | 3 +- + include/linux/sched.h | 3 +- + kernel/fork.c | 3 +- + kernel/sched/cputime.c | 62 +++++++++++++++++++++++++++++----------------- + 4 files changed, 46 insertions(+), 25 deletions(-) +--- a/include/linux/init_task.h ++++ b/include/linux/init_task.h +@@ -147,7 +147,8 @@ extern struct task_group root_task_group + + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + # define INIT_VTIME(tsk) \ +- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ ++ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ ++ .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \ + .vtime_snap = 0, \ + .vtime_snap_whence = VTIME_SYS, + #else +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1295,7 +1295,8 @@ struct task_struct { + struct cputime prev_cputime; + #endif + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +- seqlock_t vtime_seqlock; ++ raw_spinlock_t vtime_lock; ++ seqcount_t vtime_seq; + unsigned long long vtime_snap; + enum { + VTIME_SLEEPING = 0, +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1243,7 +1243,8 @@ static struct task_struct *copy_process( + p->prev_cputime.utime = p->prev_cputime.stime = 0; + #endif + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +- seqlock_init(&p->vtime_seqlock); ++ raw_spin_lock_init(&p->vtime_lock); ++ seqcount_init(&p->vtime_seq); + p->vtime_snap = 0; + p->vtime_snap_whence = VTIME_SLEEPING; + #endif +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -655,37 +655,45 @@ static void __vtime_account_system(struc + + void vtime_account_system(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + __vtime_account_system(tsk); +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_gen_account_irq_exit(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + __vtime_account_system(tsk); + if (context_tracking_in_user()) + tsk->vtime_snap_whence = VTIME_USER; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_account_user(struct task_struct *tsk) + { + cputime_t delta_cpu; + +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + delta_cpu = get_vtime_delta(tsk); + tsk->vtime_snap_whence = VTIME_SYS; + account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_user_enter(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + __vtime_account_system(tsk); + tsk->vtime_snap_whence = VTIME_USER; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_guest_enter(struct task_struct *tsk) +@@ -697,19 +705,23 @@ void vtime_guest_enter(struct task_struc + * synchronization against the reader (task_gtime()) + * that can thus safely catch up with a tickless delta. + */ +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + __vtime_account_system(tsk); + current->flags |= PF_VCPU; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + EXPORT_SYMBOL_GPL(vtime_guest_enter); + + void vtime_guest_exit(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + __vtime_account_system(tsk); + current->flags &= ~PF_VCPU; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + EXPORT_SYMBOL_GPL(vtime_guest_exit); + +@@ -722,24 +734,30 @@ void vtime_account_idle(struct task_stru + + void arch_vtime_task_switch(struct task_struct *prev) + { +- write_seqlock(&prev->vtime_seqlock); ++ raw_spin_lock(&prev->vtime_lock); ++ write_seqcount_begin(&prev->vtime_seq); + prev->vtime_snap_whence = VTIME_SLEEPING; +- write_sequnlock(&prev->vtime_seqlock); ++ write_seqcount_end(&prev->vtime_seq); ++ raw_spin_unlock(&prev->vtime_lock); + +- write_seqlock(¤t->vtime_seqlock); ++ raw_spin_lock(¤t->vtime_lock); ++ write_seqcount_begin(¤t->vtime_seq); + current->vtime_snap_whence = VTIME_SYS; + current->vtime_snap = sched_clock_cpu(smp_processor_id()); +- write_sequnlock(¤t->vtime_seqlock); ++ write_seqcount_end(¤t->vtime_seq); ++ raw_spin_unlock(¤t->vtime_lock); + } + + void vtime_init_idle(struct task_struct *t, int cpu) + { + unsigned long flags; + +- write_seqlock_irqsave(&t->vtime_seqlock, flags); ++ raw_spin_lock_irqsave(&t->vtime_lock, flags); ++ write_seqcount_begin(&t->vtime_seq); + t->vtime_snap_whence = VTIME_SYS; + t->vtime_snap = sched_clock_cpu(cpu); +- write_sequnlock_irqrestore(&t->vtime_seqlock, flags); ++ write_seqcount_end(&t->vtime_seq); ++ raw_spin_unlock_irqrestore(&t->vtime_lock, flags); + } + + cputime_t task_gtime(struct task_struct *t) +@@ -748,13 +766,13 @@ cputime_t task_gtime(struct task_struct + cputime_t gtime; + + do { +- seq = read_seqbegin(&t->vtime_seqlock); ++ seq = read_seqcount_begin(&t->vtime_seq); + + gtime = t->gtime; + if (t->flags & PF_VCPU) + gtime += vtime_delta(t); + +- } while (read_seqretry(&t->vtime_seqlock, seq)); ++ } while (read_seqcount_retry(&t->vtime_seq, seq)); + + return gtime; + } +@@ -777,7 +795,7 @@ fetch_task_cputime(struct task_struct *t + *udelta = 0; + *sdelta = 0; + +- seq = read_seqbegin(&t->vtime_seqlock); ++ seq = read_seqcount_begin(&t->vtime_seq); + + if (u_dst) + *u_dst = *u_src; +@@ -801,7 +819,7 @@ fetch_task_cputime(struct task_struct *t + if (t->vtime_snap_whence == VTIME_SYS) + *sdelta = delta; + } +- } while (read_seqretry(&t->vtime_seqlock, seq)); ++ } while (read_seqcount_retry(&t->vtime_seq, seq)); + } + + diff --git a/debian/patches/features/all/rt/wait-simple-implementation.patch b/debian/patches/features/all/rt/wait-simple-implementation.patch new file mode 100644 index 000000000..39dc35d50 --- /dev/null +++ b/debian/patches/features/all/rt/wait-simple-implementation.patch @@ -0,0 +1,344 @@ +From: Thomas Gleixner +Date: Mon Dec 12 12:29:04 2011 +0100 +Subject: wait-simple: Simple waitqueue implementation + +wait_queue is a swiss army knife and in most of the cases the +complexity is not needed. For RT waitqueues are a constant source of +trouble as we can't convert the head lock to a raw spinlock due to +fancy and long lasting callbacks. + +Provide a slim version, which allows RT to replace wait queues. This +should go mainline as well, as it lowers memory consumption and +runtime overhead. + +Signed-off-by: Thomas Gleixner +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +smp_mb() added by Steven Rostedt to fix a race condition with swait +wakeups vs adding items to the list. +--- + include/linux/wait-simple.h | 235 ++++++++++++++++++++++++++++++++++++++++++++ + kernel/sched/Makefile | 2 + kernel/sched/wait-simple.c | 68 ++++++++++++ + 3 files changed, 304 insertions(+), 1 deletion(-) + +--- /dev/null ++++ b/include/linux/wait-simple.h +@@ -0,0 +1,235 @@ ++#ifndef _LINUX_WAIT_SIMPLE_H ++#define _LINUX_WAIT_SIMPLE_H ++ ++#include ++#include ++ ++#include ++ ++struct swaiter { ++ struct task_struct *task; ++ struct list_head node; ++}; ++ ++#define DEFINE_SWAITER(name) \ ++ struct swaiter name = { \ ++ .task = current, \ ++ .node = LIST_HEAD_INIT((name).node), \ ++ } ++ ++struct swait_head { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++ ++#define DEFINE_SWAIT_HEAD(name) \ ++ struct swait_head name = { \ ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ ++ .list = LIST_HEAD_INIT((name).list), \ ++ } ++ ++extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); ++ ++#define init_swait_head(swh) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __init_swait_head((swh), &__key); \ ++ } while (0) ++ ++/* ++ * Waiter functions ++ */ ++static inline bool swaiter_enqueued(struct swaiter *w) ++{ ++ return w->task != NULL; ++} ++ ++extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); ++extern void swait_finish(struct swait_head *head, struct swaiter *w); ++ ++/* ++ * Adds w to head->list. Must be called with head->lock locked. ++ */ ++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) ++{ ++ list_add(&w->node, &head->list); ++ /* We can't let the condition leak before the setting of head */ ++ smp_mb(); ++} ++ ++/* ++ * Removes w from head->list. Must be called with head->lock locked. ++ */ ++static inline void __swait_dequeue(struct swaiter *w) ++{ ++ list_del_init(&w->node); ++} ++ ++/* ++ * Check whether a head has waiters enqueued ++ */ ++static inline bool swait_head_has_waiters(struct swait_head *h) ++{ ++ /* Make sure the condition is visible before checking list_empty() */ ++ smp_mb(); ++ return !list_empty(&h->list); ++} ++ ++/* ++ * Wakeup functions ++ */ ++extern int __swait_wake(struct swait_head *head, unsigned int state); ++ ++static inline int swait_wake(struct swait_head *head) ++{ ++ return swait_head_has_waiters(head) ? ++ __swait_wake(head, TASK_NORMAL) : 0; ++} ++ ++static inline int swait_wake_interruptible(struct swait_head *head) ++{ ++ return swait_head_has_waiters(head) ? ++ __swait_wake(head, TASK_INTERRUPTIBLE) : 0; ++} ++ ++/* ++ * Event API ++ */ ++ ++#define __swait_event(wq, condition) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ schedule(); \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event - sleep until a condition gets true ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * ++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ */ ++#define swait_event(wq, condition) \ ++do { \ ++ if (condition) \ ++ break; \ ++ __swait_event(wq, condition); \ ++} while (0) ++ ++#define __swait_event_interruptible(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (signal_pending(current)) { \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ schedule(); \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++#define __swait_event_interruptible_timeout(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (signal_pending(current)) { \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event_interruptible - sleep until a condition gets true ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * ++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ */ ++#define swait_event_interruptible(wq, condition) \ ++({ \ ++ int __ret = 0; \ ++ if (!(condition)) \ ++ __swait_event_interruptible(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#define swait_event_interruptible_timeout(wq, condition, timeout) \ ++({ \ ++ int __ret = timeout; \ ++ if (!(condition)) \ ++ __swait_event_interruptible_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#define __swait_event_timeout(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event_timeout - sleep until a condition gets true or a timeout elapses ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * @timeout: timeout, in jiffies ++ * ++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ * ++ * The function returns 0 if the @timeout elapsed, and the remaining ++ * jiffies if the condition evaluated to true before the timeout elapsed. ++ */ ++#define swait_event_timeout(wq, condition, timeout) \ ++({ \ ++ long __ret = timeout; \ ++ if (!(condition)) \ ++ __swait_event_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#endif +--- a/kernel/sched/Makefile ++++ b/kernel/sched/Makefile +@@ -13,7 +13,7 @@ endif + + obj-y += core.o proc.o clock.o cputime.o + obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o +-obj-y += wait.o completion.o ++obj-y += wait.o wait-simple.o completion.o + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o + obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o + obj-$(CONFIG_SCHEDSTATS) += stats.o +--- /dev/null ++++ b/kernel/sched/wait-simple.c +@@ -0,0 +1,68 @@ ++/* ++ * Simple waitqueues without fancy flags and callbacks ++ * ++ * (C) 2011 Thomas Gleixner ++ * ++ * Based on kernel/wait.c ++ * ++ * For licencing details see kernel-base/COPYING ++ */ ++#include ++#include ++#include ++#include ++ ++void __init_swait_head(struct swait_head *head, struct lock_class_key *key) ++{ ++ raw_spin_lock_init(&head->lock); ++ lockdep_set_class(&head->lock, key); ++ INIT_LIST_HEAD(&head->list); ++} ++EXPORT_SYMBOL(__init_swait_head); ++ ++void swait_prepare(struct swait_head *head, struct swaiter *w, int state) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ w->task = current; ++ if (list_empty(&w->node)) ++ __swait_enqueue(head, w); ++ set_current_state(state); ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++} ++EXPORT_SYMBOL(swait_prepare); ++ ++void swait_finish(struct swait_head *head, struct swaiter *w) ++{ ++ unsigned long flags; ++ ++ __set_current_state(TASK_RUNNING); ++ if (w->task) { ++ raw_spin_lock_irqsave(&head->lock, flags); ++ __swait_dequeue(w); ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++ } ++} ++EXPORT_SYMBOL(swait_finish); ++ ++int __swait_wake(struct swait_head *head, unsigned int state) ++{ ++ struct swaiter *curr, *next; ++ unsigned long flags; ++ int woken = 0; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ ++ list_for_each_entry_safe(curr, next, &head->list, node) { ++ if (wake_up_state(curr->task, state)) { ++ __swait_dequeue(curr); ++ curr->task = NULL; ++ woken++; ++ } ++ } ++ ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++ return woken; ++} ++EXPORT_SYMBOL(__swait_wake); diff --git a/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch b/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch new file mode 100644 index 000000000..6b6073bf6 --- /dev/null +++ b/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch @@ -0,0 +1,218 @@ +Subject: wait-simple: Rework for use with completions +From: Thomas Gleixner +Date: Thu, 10 Jan 2013 11:47:35 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/wait-simple.h | 60 +++++++----------------------------- + kernel/sched/wait-simple.c | 73 ++++++++++++++++++++++++++++++++++++++------ + 2 files changed, 76 insertions(+), 57 deletions(-) + +--- a/include/linux/wait-simple.h ++++ b/include/linux/wait-simple.h +@@ -22,12 +22,14 @@ struct swait_head { + struct list_head list; + }; + +-#define DEFINE_SWAIT_HEAD(name) \ +- struct swait_head name = { \ ++#define SWAIT_HEAD_INITIALIZER(name) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .list = LIST_HEAD_INIT((name).list), \ + } + ++#define DEFINE_SWAIT_HEAD(name) \ ++ struct swait_head name = SWAIT_HEAD_INITIALIZER(name) ++ + extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); + + #define init_swait_head(swh) \ +@@ -40,63 +42,25 @@ extern void __init_swait_head(struct swa + /* + * Waiter functions + */ +-static inline bool swaiter_enqueued(struct swaiter *w) +-{ +- return w->task != NULL; +-} +- ++extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w); + extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); ++extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); + extern void swait_finish(struct swait_head *head, struct swaiter *w); + + /* +- * Adds w to head->list. Must be called with head->lock locked. +- */ +-static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) +-{ +- list_add(&w->node, &head->list); +- /* We can't let the condition leak before the setting of head */ +- smp_mb(); +-} +- +-/* +- * Removes w from head->list. Must be called with head->lock locked. +- */ +-static inline void __swait_dequeue(struct swaiter *w) +-{ +- list_del_init(&w->node); +-} +- +-/* +- * Check whether a head has waiters enqueued +- */ +-static inline bool swait_head_has_waiters(struct swait_head *h) +-{ +- /* Make sure the condition is visible before checking list_empty() */ +- smp_mb(); +- return !list_empty(&h->list); +-} +- +-/* + * Wakeup functions + */ +-extern int __swait_wake(struct swait_head *head, unsigned int state); ++extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num); ++extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num); + +-static inline int swait_wake(struct swait_head *head) +-{ +- return swait_head_has_waiters(head) ? +- __swait_wake(head, TASK_NORMAL) : 0; +-} +- +-static inline int swait_wake_interruptible(struct swait_head *head) +-{ +- return swait_head_has_waiters(head) ? +- __swait_wake(head, TASK_INTERRUPTIBLE) : 0; +-} ++#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1) ++#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1) ++#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0) ++#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0) + + /* + * Event API + */ +- + #define __swait_event(wq, condition) \ + do { \ + DEFINE_SWAITER(__wait); \ +--- a/kernel/sched/wait-simple.c ++++ b/kernel/sched/wait-simple.c +@@ -12,6 +12,28 @@ + #include + #include + ++/* Adds w to head->list. Must be called with head->lock locked. */ ++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) ++{ ++ list_add(&w->node, &head->list); ++ /* We can't let the condition leak before the setting of head */ ++ smp_mb(); ++} ++ ++/* Removes w from head->list. Must be called with head->lock locked. */ ++static inline void __swait_dequeue(struct swaiter *w) ++{ ++ list_del_init(&w->node); ++} ++ ++/* Check whether a head has waiters enqueued */ ++static inline bool swait_head_has_waiters(struct swait_head *h) ++{ ++ /* Make sure the condition is visible before checking list_empty() */ ++ smp_mb(); ++ return !list_empty(&h->list); ++} ++ + void __init_swait_head(struct swait_head *head, struct lock_class_key *key) + { + raw_spin_lock_init(&head->lock); +@@ -20,19 +42,31 @@ void __init_swait_head(struct swait_head + } + EXPORT_SYMBOL(__init_swait_head); + ++void swait_prepare_locked(struct swait_head *head, struct swaiter *w) ++{ ++ w->task = current; ++ if (list_empty(&w->node)) ++ __swait_enqueue(head, w); ++} ++ + void swait_prepare(struct swait_head *head, struct swaiter *w, int state) + { + unsigned long flags; + + raw_spin_lock_irqsave(&head->lock, flags); +- w->task = current; +- if (list_empty(&w->node)) +- __swait_enqueue(head, w); +- set_current_state(state); ++ swait_prepare_locked(head, w); ++ __set_current_state(state); + raw_spin_unlock_irqrestore(&head->lock, flags); + } + EXPORT_SYMBOL(swait_prepare); + ++void swait_finish_locked(struct swait_head *head, struct swaiter *w) ++{ ++ __set_current_state(TASK_RUNNING); ++ if (w->task) ++ __swait_dequeue(w); ++} ++ + void swait_finish(struct swait_head *head, struct swaiter *w) + { + unsigned long flags; +@@ -46,22 +80,43 @@ void swait_finish(struct swait_head *hea + } + EXPORT_SYMBOL(swait_finish); + +-int __swait_wake(struct swait_head *head, unsigned int state) ++unsigned int ++__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num) + { + struct swaiter *curr, *next; +- unsigned long flags; + int woken = 0; + +- raw_spin_lock_irqsave(&head->lock, flags); +- + list_for_each_entry_safe(curr, next, &head->list, node) { + if (wake_up_state(curr->task, state)) { + __swait_dequeue(curr); ++ /* ++ * The waiting task can free the waiter as ++ * soon as curr->task = NULL is written, ++ * without taking any locks. A memory barrier ++ * is required here to prevent the following ++ * store to curr->task from getting ahead of ++ * the dequeue operation. ++ */ ++ smp_wmb(); + curr->task = NULL; +- woken++; ++ if (++woken == num) ++ break; + } + } ++ return woken; ++} ++ ++unsigned int ++__swait_wake(struct swait_head *head, unsigned int state, unsigned int num) ++{ ++ unsigned long flags; ++ int woken; + ++ if (!swait_head_has_waiters(head)) ++ return 0; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ woken = __swait_wake_locked(head, state, num); + raw_spin_unlock_irqrestore(&head->lock, flags); + return woken; + } diff --git a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch new file mode 100644 index 000000000..4c5fb9fd1 --- /dev/null +++ b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch @@ -0,0 +1,35 @@ +From 405ee18a071001dfeeef3b2b0eafd50cac712ceb Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 28 Oct 2013 12:19:57 +0100 +Subject: [PATCH] wait.h: include atomic.h +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +| CC init/main.o +|In file included from include/linux/mmzone.h:9:0, +| from include/linux/gfp.h:4, +| from include/linux/kmod.h:22, +| from include/linux/module.h:13, +| from init/main.c:15: +|include/linux/wait.h: In function ‘wait_on_atomic_t’: +|include/linux/wait.h:982:2: error: implicit declaration of function ‘atomic_read’ [-Werror=implicit-function-declaration] +| if (atomic_read(val) == 0) +| ^ + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/wait.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/linux/wait.h ++++ b/include/linux/wait.h +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + typedef struct __wait_queue wait_queue_t; + typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); diff --git a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch new file mode 100644 index 000000000..d369ee3f1 --- /dev/null +++ b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch @@ -0,0 +1,133 @@ +From: Thomas Gleixner +Date: Mon, 01 Jul 2013 11:02:42 +0200 +Subject: [PATCH] workqueue vs ata-piix livelock fixup +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +An Intel i7 system regularly detected rcu_preempt stalls after the kernel +was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no +longer possible, unless the system was restarted. + +The kernel message was: +INFO: rcu_preempt self-detected stall on CPU { 6} +[..] +NMI backtrace for cpu 6 +CPU 6 +Pid: 119, comm: irq/19-ata_piix Not tainted 3.8.13-rt13 #11 Shuttle Inc. SX58/SX58 +RIP: 0010:[] [] ip_compute_csum+0x30/0x30 +RSP: 0018:ffff880333303cb0 EFLAGS: 00000002 +RAX: 0000000000000006 RBX: 00000000000003e9 RCX: 0000000000000034 +RDX: 0000000000000000 RSI: ffffffff81aa16d0 RDI: 0000000000000001 +RBP: ffff880333303ce8 R08: ffffffff81aa16d0 R09: ffffffff81c1b8cc +R10: 0000000000000000 R11: 0000000000000000 R12: 000000000005161f +R13: 0000000000000006 R14: ffffffff81aa16d0 R15: 0000000000000002 +FS: 0000000000000000(0000) GS:ffff880333300000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b +CR2: 0000003c1b2bb420 CR3: 0000000001a0f000 CR4: 00000000000007e0 +DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 +Process irq/19-ata_piix (pid: 119, threadinfo ffff88032d88a000, task ffff88032df80000) +Stack: +ffffffff8124cb32 000000000005161e 00000000000003e9 0000000000001000 +0000000000009022 ffffffff81aa16d0 0000000000000002 ffff880333303cf8 +ffffffff8124caa9 ffff880333303d08 ffffffff8124cad2 ffff880333303d28 +Call Trace: + +[] ? delay_tsc+0x33/0xe3 +[] __delay+0xf/0x11 +[] __const_udelay+0x27/0x29 +[] native_safe_apic_wait_icr_idle+0x39/0x45 +[] __default_send_IPI_dest_field.constprop.0+0x1e/0x58 +[] default_send_IPI_mask_sequence_phys+0x49/0x7d +[] physflat_send_IPI_all+0x17/0x19 +[] arch_trigger_all_cpu_backtrace+0x50/0x79 +[] rcu_check_callbacks+0x1cb/0x568 +[] ? raise_softirq+0x2e/0x35 +[] ? tick_sched_do_timer+0x38/0x38 +[] update_process_times+0x44/0x55 +[] tick_sched_handle+0x4a/0x59 +[] tick_sched_timer+0x3c/0x5b +[] __run_hrtimer+0x9b/0x158 +[] hrtimer_interrupt+0x172/0x2aa +[] smp_apic_timer_interrupt+0x76/0x89 +[] apic_timer_interrupt+0x6d/0x80 + +[] ? __local_lock_irqsave+0x17/0x4a +[] try_to_grab_pending+0x42/0x17e +[] mod_delayed_work_on+0x32/0x88 +[] mod_delayed_work+0x1c/0x1e +[] blk_run_queue_async+0x37/0x39 +[] flush_end_io+0xf1/0x107 +[] blk_finish_request+0x21e/0x264 +[] blk_end_bidi_request+0x42/0x60 +[] blk_end_request+0x10/0x12 +[] scsi_io_completion+0x1bf/0x492 +[] ? sd_done+0x298/0x2ef +[] scsi_finish_command+0xe9/0xf2 +[] scsi_softirq_done+0x106/0x10f +[] blk_done_softirq+0x77/0x87 +[] do_current_softirqs+0x172/0x2e1 +[] ? irq_thread_fn+0x3a/0x3a +[] local_bh_enable+0x43/0x72 +[] irq_forced_thread_fn+0x46/0x52 +[] irq_thread+0x8c/0x17c +[] ? irq_thread+0x17c/0x17c +[] ? wake_threads_waitq+0x44/0x44 +[] kthread+0x8d/0x95 +[] ? __kthread_parkme+0x65/0x65 +[] ret_from_fork+0x7c/0xb0 +[] ? __kthread_parkme+0x65/0x65 + +The state of softirqd of this CPU at the time of the crash was: +ksoftirqd/6 R running task 0 53 2 0x00000000 +ffff88032fc39d18 0000000000000046 ffff88033330c4c0 ffff8803303f4710 +ffff88032fc39fd8 ffff88032fc39fd8 0000000000000000 0000000000062500 +ffff88032df88000 ffff8803303f4710 0000000000000000 ffff88032fc38000 +Call Trace: +[] ? __queue_work+0x27c/0x27c +[] preempt_schedule+0x61/0x76 +[] migrate_enable+0xe5/0x1df +[] ? __queue_work+0x27c/0x27c +[] run_timer_softirq+0x161/0x1d6 +[] do_current_softirqs+0x172/0x2e1 +[] run_ksoftirqd+0x2d/0x45 +[] smpboot_thread_fn+0x2ea/0x308 +[] ? test_ti_thread_flag+0xc/0xc +[] ? test_ti_thread_flag+0xc/0xc +[] kthread+0x8d/0x95 +[] ? __kthread_parkme+0x65/0x65 +[] ret_from_fork+0x7c/0xb0 +[] ? __kthread_parkme+0x65/0x65 + +Apparently, the softirq demon and the ata_piix IRQ handler were waiting +for each other to finish ending up in a livelock. After the below patch +was applied, the system no longer crashes. + +Reported-by: Carsten Emde +Proposed-by: Thomas Gleixner +Tested by: Carsten Emde +Signed-off-by: Carsten Emde +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/workqueue.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + + #include "workqueue_internal.h" + +@@ -1263,7 +1264,7 @@ static int try_to_grab_pending(struct wo + local_unlock_irqrestore(pendingb_lock, *flags); + if (work_is_canceling(work)) + return -ENOENT; +- cpu_relax(); ++ cpu_chill(); + return -EAGAIN; + } + diff --git a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch new file mode 100644 index 000000000..412325fe7 --- /dev/null +++ b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch @@ -0,0 +1,260 @@ +From: Thomas Gleixner +Date: Wed Jun 22 19:47:03 2011 +0200 +Subject: sched: Distangle worker accounting from rqlock + +The worker accounting for cpu bound workers is plugged into the core +scheduler code and the wakeup code. This is not a hard requirement and +can be avoided by keeping track of the state in the workqueue code +itself. + +Keep track of the sleeping state in the worker itself and call the +notifier before entering the core scheduler. There might be false +positives when the task is woken between that call and actually +scheduling, but that's not really different from scheduling and being +woken immediately after switching away. There is also no harm from +updating nr_running when the task returns from scheduling instead of +accounting it in the wakeup code. + +Signed-off-by: Thomas Gleixner +Cc: Peter Zijlstra +Cc: Tejun Heo +Cc: Jens Axboe +Cc: Linus Torvalds +Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de +Signed-off-by: Thomas Gleixner +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +--- + kernel/sched/core.c | 70 +++++++++----------------------------------- + kernel/workqueue.c | 55 ++++++++++++++-------------------- + kernel/workqueue_internal.h | 5 +-- + 3 files changed, 41 insertions(+), 89 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1430,10 +1430,6 @@ static void ttwu_activate(struct rq *rq, + { + activate_task(rq, p, en_flags); + p->on_rq = 1; +- +- /* if a worker is waking up, notify workqueue */ +- if (p->flags & PF_WQ_WORKER) +- wq_worker_waking_up(p, cpu_of(rq)); + } + + /* +@@ -1679,42 +1675,6 @@ try_to_wake_up(struct task_struct *p, un + } + + /** +- * try_to_wake_up_local - try to wake up a local task with rq lock held +- * @p: the thread to be awakened +- * +- * Put @p on the run-queue if it's not already there. The caller must +- * ensure that this_rq() is locked, @p is bound to this_rq() and not +- * the current task. +- */ +-static void try_to_wake_up_local(struct task_struct *p) +-{ +- struct rq *rq = task_rq(p); +- +- if (WARN_ON_ONCE(rq != this_rq()) || +- WARN_ON_ONCE(p == current)) +- return; +- +- lockdep_assert_held(&rq->lock); +- +- if (!raw_spin_trylock(&p->pi_lock)) { +- raw_spin_unlock(&rq->lock); +- raw_spin_lock(&p->pi_lock); +- raw_spin_lock(&rq->lock); +- } +- +- if (!(p->state & TASK_NORMAL)) +- goto out; +- +- if (!p->on_rq) +- ttwu_activate(rq, p, ENQUEUE_WAKEUP); +- +- ttwu_do_wakeup(rq, p, 0); +- ttwu_stat(p, smp_processor_id(), 0); +-out: +- raw_spin_unlock(&p->pi_lock); +-} +- +-/** + * wake_up_process - Wake up a specific process + * @p: The process to be woken up. + * +@@ -2873,21 +2833,6 @@ static void __sched __schedule(void) + } else { + deactivate_task(rq, prev, DEQUEUE_SLEEP); + prev->on_rq = 0; +- +- /* +- * If a worker went to sleep, notify and ask workqueue +- * whether it wants to wake up a task to maintain +- * concurrency. +- * Only call wake up if prev isn't blocked on a sleeping +- * spin lock. +- */ +- if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { +- struct task_struct *to_wakeup; +- +- to_wakeup = wq_worker_sleeping(prev, cpu); +- if (to_wakeup) +- try_to_wake_up_local(to_wakeup); +- } + } + switch_count = &prev->nvcsw; + } +@@ -2931,6 +2876,14 @@ static inline void sched_submit_work(str + { + if (!tsk->state || tsk_is_pi_blocked(tsk)) + return; ++ ++ /* ++ * If a worker went to sleep, notify and ask workqueue whether ++ * it wants to wake up a task to maintain concurrency. ++ */ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_sleeping(tsk); ++ + /* + * If we are going to sleep and we have plugged IO queued, + * make sure to submit it to avoid deadlocks. +@@ -2939,12 +2892,19 @@ static inline void sched_submit_work(str + blk_schedule_flush_plug(tsk); + } + ++static inline void sched_update_worker(struct task_struct *tsk) ++{ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_running(tsk); ++} ++ + asmlinkage void __sched schedule(void) + { + struct task_struct *tsk = current; + + sched_submit_work(tsk); + __schedule(); ++ sched_update_worker(tsk); + } + EXPORT_SYMBOL(schedule); + +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -815,44 +815,31 @@ static void wake_up_worker(struct worker + } + + /** +- * wq_worker_waking_up - a worker is waking up +- * @task: task waking up +- * @cpu: CPU @task is waking up to ++ * wq_worker_running - a worker is running again ++ * @task: task returning from sleep + * +- * This function is called during try_to_wake_up() when a worker is +- * being awoken. +- * +- * CONTEXT: +- * spin_lock_irq(rq->lock) ++ * This function is called when a worker returns from schedule() + */ +-void wq_worker_waking_up(struct task_struct *task, int cpu) ++void wq_worker_running(struct task_struct *task) + { + struct worker *worker = kthread_data(task); + +- if (!(worker->flags & WORKER_NOT_RUNNING)) { +- WARN_ON_ONCE(worker->pool->cpu != cpu); ++ if (!worker->sleeping) ++ return; ++ if (!(worker->flags & WORKER_NOT_RUNNING)) + atomic_inc(&worker->pool->nr_running); +- } ++ worker->sleeping = 0; + } + + /** + * wq_worker_sleeping - a worker is going to sleep + * @task: task going to sleep +- * @cpu: CPU in question, must be the current CPU number +- * +- * This function is called during schedule() when a busy worker is +- * going to sleep. Worker on the same cpu can be woken up by +- * returning pointer to its task. +- * +- * CONTEXT: +- * spin_lock_irq(rq->lock) +- * +- * Return: +- * Worker task on @cpu to wake up, %NULL if none. ++ * This function is called from schedule() when a busy worker is ++ * going to sleep. + */ +-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) ++void wq_worker_sleeping(struct task_struct *task) + { +- struct worker *worker = kthread_data(task), *to_wakeup = NULL; ++ struct worker *next, *worker = kthread_data(task); + struct worker_pool *pool; + + /* +@@ -861,14 +848,15 @@ struct task_struct *wq_worker_sleeping(s + * checking NOT_RUNNING. + */ + if (worker->flags & WORKER_NOT_RUNNING) +- return NULL; ++ return; + + pool = worker->pool; + +- /* this can only happen on the local cpu */ +- if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) +- return NULL; ++ if (WARN_ON_ONCE(worker->sleeping)) ++ return; + ++ worker->sleeping = 1; ++ spin_lock_irq(&pool->lock); + /* + * The counterpart of the following dec_and_test, implied mb, + * worklist not empty test sequence is in insert_work(). +@@ -881,9 +869,12 @@ struct task_struct *wq_worker_sleeping(s + * lock is safe. + */ + if (atomic_dec_and_test(&pool->nr_running) && +- !list_empty(&pool->worklist)) +- to_wakeup = first_worker(pool); +- return to_wakeup ? to_wakeup->task : NULL; ++ !list_empty(&pool->worklist)) { ++ next = first_worker(pool); ++ if (next) ++ wake_up_process(next->task); ++ } ++ spin_unlock_irq(&pool->lock); + } + + /** +--- a/kernel/workqueue_internal.h ++++ b/kernel/workqueue_internal.h +@@ -41,6 +41,7 @@ struct worker { + unsigned long last_active; /* L: last active timestamp */ + unsigned int flags; /* X: flags */ + int id; /* I: worker id */ ++ int sleeping; /* None */ + + /* + * Opaque string set with work_set_desc(). Printed out with task +@@ -66,7 +67,7 @@ static inline struct worker *current_wq_ + * Scheduler hooks for concurrency managed workqueue. Only to be used from + * sched/core.c and workqueue.c. + */ +-void wq_worker_waking_up(struct task_struct *task, int cpu); +-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); ++void wq_worker_running(struct task_struct *task); ++void wq_worker_sleeping(struct task_struct *task); + + #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ diff --git a/debian/patches/features/all/rt/workqueue-use-locallock.patch b/debian/patches/features/all/rt/workqueue-use-locallock.patch new file mode 100644 index 000000000..10d1cf154 --- /dev/null +++ b/debian/patches/features/all/rt/workqueue-use-locallock.patch @@ -0,0 +1,151 @@ +Subject: Use local irq lock instead of irq disable regions +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:42:26 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/workqueue.c | 33 ++++++++++++++++++--------------- + 1 file changed, 18 insertions(+), 15 deletions(-) + +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -48,6 +48,7 @@ + #include + #include + #include ++#include + + #include "workqueue_internal.h" + +@@ -323,6 +324,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient + struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; + EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); + ++static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); ++ + static int worker_thread(void *__worker); + static void copy_workqueue_attrs(struct workqueue_attrs *to, + const struct workqueue_attrs *from); +@@ -1089,9 +1092,9 @@ static void put_pwq_unlocked(struct pool + * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ +- spin_lock_irq(&pwq->pool->lock); ++ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); + put_pwq(pwq); +- spin_unlock_irq(&pwq->pool->lock); ++ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); + } + } + +@@ -1193,7 +1196,7 @@ static int try_to_grab_pending(struct wo + struct worker_pool *pool; + struct pool_workqueue *pwq; + +- local_irq_save(*flags); ++ local_lock_irqsave(pendingb_lock, *flags); + + /* try to steal the timer if it exists */ + if (is_dwork) { +@@ -1257,7 +1260,7 @@ static int try_to_grab_pending(struct wo + spin_unlock(&pool->lock); + fail: + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_unlock_irqrestore(pendingb_lock, *flags); + if (work_is_canceling(work)) + return -ENOENT; + cpu_relax(); +@@ -1329,7 +1332,7 @@ static void __queue_work(int cpu, struct + * queued or lose PENDING. Grabbing PENDING and queueing should + * happen with IRQ disabled. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + debug_work_activate(work); + +@@ -1434,14 +1437,14 @@ bool queue_work_on(int cpu, struct workq + bool ret = false; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock,flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_work(cpu, wq, work); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(queue_work_on); +@@ -1508,14 +1511,14 @@ bool queue_delayed_work_on(int cpu, stru + unsigned long flags; + + /* read the comment in __queue_work() */ +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock, flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_delayed_work(cpu, wq, dwork, delay); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(queue_delayed_work_on); +@@ -1550,7 +1553,7 @@ bool mod_delayed_work_on(int cpu, struct + + if (likely(ret >= 0)) { + __queue_delayed_work(cpu, wq, dwork, delay); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + } + + /* -ENOENT from try_to_grab_pending() becomes %true */ +@@ -2909,7 +2912,7 @@ static bool __cancel_work_timer(struct w + + /* tell other tasks trying to grab @work to back off */ + mark_work_canceling(work); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + + flush_work(work); + clear_work_data(work); +@@ -2954,10 +2957,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); + */ + bool flush_delayed_work(struct delayed_work *dwork) + { +- local_irq_disable(); ++ local_lock_irq(pendingb_lock); + if (del_timer_sync(&dwork->timer)) + __queue_work(dwork->cpu, dwork->wq, &dwork->work); +- local_irq_enable(); ++ local_unlock_irq(pendingb_lock); + return flush_work(&dwork->work); + } + EXPORT_SYMBOL(flush_delayed_work); +@@ -2992,7 +2995,7 @@ bool cancel_delayed_work(struct delayed_ + + set_work_pool_and_clear_pending(&dwork->work, + get_work_pool_id(&dwork->work)); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(cancel_delayed_work); +@@ -4467,7 +4470,7 @@ unsigned int work_busy(struct work_struc + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + +- rcu_read_lock() ++ rcu_read_lock(); + pool = get_work_pool(work); + if (pool) { + spin_lock_irqsave(&pool->lock, flags); diff --git a/debian/patches/features/all/rt/workqueue-use-rcu.patch b/debian/patches/features/all/rt/workqueue-use-rcu.patch new file mode 100644 index 000000000..2a3549c2c --- /dev/null +++ b/debian/patches/features/all/rt/workqueue-use-rcu.patch @@ -0,0 +1,309 @@ +Subject: workqueue: Use normal rcu +From: Thomas Gleixner +Date: Wed, 24 Jul 2013 15:26:54 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +There is no need for sched_rcu. The undocumented reason why sched_rcu +is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by +abusing the fact that sched_rcu reader side critical sections are also +protected by preempt or irq disabled regions. + +Signed-off-by: Thomas Gleixner +--- + kernel/workqueue.c | 85 +++++++++++++++++++++++++++++------------------------ + 1 file changed, 47 insertions(+), 38 deletions(-) + +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -129,11 +129,11 @@ enum { + * + * PL: wq_pool_mutex protected. + * +- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. ++ * PR: wq_pool_mutex protected for writes. RCU protected for reads. + * + * WQ: wq->mutex protected. + * +- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. ++ * WR: wq->mutex protected for writes. RCU protected for reads. + * + * MD: wq_mayday_lock protected. + */ +@@ -178,7 +178,7 @@ struct worker_pool { + atomic_t nr_running ____cacheline_aligned_in_smp; + + /* +- * Destruction of pool is sched-RCU protected to allow dereferences ++ * Destruction of pool is RCU protected to allow dereferences + * from get_work_pool(). + */ + struct rcu_head rcu; +@@ -207,7 +207,7 @@ struct pool_workqueue { + /* + * Release of unbound pwq is punted to system_wq. See put_pwq() + * and pwq_unbound_release_workfn() for details. pool_workqueue +- * itself is also sched-RCU protected so that the first pwq can be ++ * itself is also RCU protected so that the first pwq can be + * determined without grabbing wq->mutex. + */ + struct work_struct unbound_release_work; +@@ -331,14 +331,14 @@ static void copy_workqueue_attrs(struct + #include + + #define assert_rcu_or_pool_mutex() \ +- rcu_lockdep_assert(rcu_read_lock_sched_held() || \ ++ rcu_lockdep_assert(rcu_read_lock_held() || \ + lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU or wq_pool_mutex should be held") ++ "RCU or wq_pool_mutex should be held") + + #define assert_rcu_or_wq_mutex(wq) \ +- rcu_lockdep_assert(rcu_read_lock_sched_held() || \ ++ rcu_lockdep_assert(rcu_read_lock_held() || \ + lockdep_is_held(&wq->mutex), \ +- "sched RCU or wq->mutex should be held") ++ "RCU or wq->mutex should be held") + + #ifdef CONFIG_LOCKDEP + #define assert_manager_or_pool_lock(pool) \ +@@ -360,7 +360,7 @@ static void copy_workqueue_attrs(struct + * @pool: iteration cursor + * @pi: integer used for iteration + * +- * This must be called either with wq_pool_mutex held or sched RCU read ++ * This must be called either with wq_pool_mutex held or RCU read + * locked. If the pool needs to be used beyond the locking in effect, the + * caller is responsible for guaranteeing that the pool stays online. + * +@@ -393,7 +393,7 @@ static void copy_workqueue_attrs(struct + * @pwq: iteration cursor + * @wq: the target workqueue + * +- * This must be called either with wq->mutex held or sched RCU read locked. ++ * This must be called either with wq->mutex held or RCU read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. + * +@@ -548,7 +548,7 @@ static int worker_pool_assign_id(struct + * @wq: the target workqueue + * @node: the node ID + * +- * This must be called either with pwq_lock held or sched RCU read locked. ++ * This must be called either with pwq_lock held or RCU read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. + * +@@ -652,8 +652,8 @@ static struct pool_workqueue *get_work_p + * @work: the work item of interest + * + * Pools are created and destroyed under wq_pool_mutex, and allows read +- * access under sched-RCU read lock. As such, this function should be +- * called under wq_pool_mutex or with preemption disabled. ++ * access under RCU read lock. As such, this function should be ++ * called under wq_pool_mutex or inside of a rcu_read_lock() region. + * + * All fields of the returned pool are accessible as long as the above + * mentioned locking is in effect. If the returned pool needs to be used +@@ -1086,7 +1086,7 @@ static void put_pwq_unlocked(struct pool + { + if (pwq) { + /* +- * As both pwqs and pools are sched-RCU protected, the ++ * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ + spin_lock_irq(&pwq->pool->lock); +@@ -1212,6 +1212,7 @@ static int try_to_grab_pending(struct wo + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) + return 0; + ++ rcu_read_lock(); + /* + * The queueing is in progress, or it is already queued. Try to + * steal it from ->worklist without clearing WORK_STRUCT_PENDING. +@@ -1250,10 +1251,12 @@ static int try_to_grab_pending(struct wo + set_work_pool_and_keep_pending(work, pool->id); + + spin_unlock(&pool->lock); ++ rcu_read_unlock(); + return 1; + } + spin_unlock(&pool->lock); + fail: ++ rcu_read_unlock(); + local_irq_restore(*flags); + if (work_is_canceling(work)) + return -ENOENT; +@@ -1334,6 +1337,8 @@ static void __queue_work(int cpu, struct + if (unlikely(wq->flags & __WQ_DRAINING) && + WARN_ON_ONCE(!is_chained_work(wq))) + return; ++ ++ rcu_read_lock(); + retry: + if (req_cpu == WORK_CPU_UNBOUND) + cpu = raw_smp_processor_id(); +@@ -1390,10 +1395,8 @@ static void __queue_work(int cpu, struct + /* pwq determined, queue */ + trace_workqueue_queue_work(req_cpu, pwq, work); + +- if (WARN_ON(!list_empty(&work->entry))) { +- spin_unlock(&pwq->pool->lock); +- return; +- } ++ if (WARN_ON(!list_empty(&work->entry))) ++ goto out; + + pwq->nr_in_flight[pwq->work_color]++; + work_flags = work_color_to_flags(pwq->work_color); +@@ -1409,7 +1412,9 @@ static void __queue_work(int cpu, struct + + insert_work(pwq, work, worklist, work_flags); + ++out: + spin_unlock(&pwq->pool->lock); ++ rcu_read_unlock(); + } + + /** +@@ -2817,14 +2822,14 @@ static bool start_flush_work(struct work + + might_sleep(); + +- local_irq_disable(); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (!pool) { +- local_irq_enable(); ++ rcu_read_unlock(); + return false; + } + +- spin_lock(&pool->lock); ++ spin_lock_irq(&pool->lock); + /* see the comment in try_to_grab_pending() with the same code */ + pwq = get_work_pwq(work); + if (pwq) { +@@ -2851,10 +2856,11 @@ static bool start_flush_work(struct work + else + lock_map_acquire_read(&pwq->wq->lockdep_map); + lock_map_release(&pwq->wq->lockdep_map); +- ++ rcu_read_unlock(); + return true; + already_gone: + spin_unlock_irq(&pool->lock); ++ rcu_read_unlock(); + return false; + } + +@@ -3172,7 +3178,8 @@ static ssize_t wq_pool_ids_show(struct d + const char *delim = ""; + int node, written = 0; + +- rcu_read_lock_sched(); ++ get_online_cpus(); ++ rcu_read_lock(); + for_each_node(node) { + written += scnprintf(buf + written, PAGE_SIZE - written, + "%s%d:%d", delim, node, +@@ -3180,7 +3187,8 @@ static ssize_t wq_pool_ids_show(struct d + delim = " "; + } + written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); ++ put_online_cpus(); + + return written; + } +@@ -3546,7 +3554,7 @@ static void rcu_free_pool(struct rcu_hea + * put_unbound_pool - put a worker_pool + * @pool: worker_pool to put + * +- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU ++ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU + * safe manner. get_unbound_pool() calls this function on its failure path + * and this function should be able to release pools which went through, + * successfully or not, init_worker_pool(). +@@ -3593,8 +3601,8 @@ static void put_unbound_pool(struct work + del_timer_sync(&pool->idle_timer); + del_timer_sync(&pool->mayday_timer); + +- /* sched-RCU protected to allow dereferences from get_work_pool() */ +- call_rcu_sched(&pool->rcu, rcu_free_pool); ++ /* RCU protected to allow dereferences from get_work_pool() */ ++ call_rcu(&pool->rcu, rcu_free_pool); + } + + /** +@@ -3707,7 +3715,7 @@ static void pwq_unbound_release_workfn(s + put_unbound_pool(pool); + mutex_unlock(&wq_pool_mutex); + +- call_rcu_sched(&pwq->rcu, rcu_free_pwq); ++ call_rcu(&pwq->rcu, rcu_free_pwq); + + /* + * If we're the last pwq going away, @wq is already dead and no one +@@ -4420,7 +4428,8 @@ bool workqueue_congested(int cpu, struct + struct pool_workqueue *pwq; + bool ret; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); ++ preempt_disable(); + + if (cpu == WORK_CPU_UNBOUND) + cpu = smp_processor_id(); +@@ -4431,7 +4440,8 @@ bool workqueue_congested(int cpu, struct + pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); + + ret = !list_empty(&pwq->delayed_works); +- rcu_read_unlock_sched(); ++ preempt_enable(); ++ rcu_read_unlock(); + + return ret; + } +@@ -4457,16 +4467,15 @@ unsigned int work_busy(struct work_struc + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + +- local_irq_save(flags); ++ rcu_read_lock() + pool = get_work_pool(work); + if (pool) { +- spin_lock(&pool->lock); ++ spin_lock_irqsave(&pool->lock, flags); + if (find_worker_executing_work(pool, work)) + ret |= WORK_BUSY_RUNNING; +- spin_unlock(&pool->lock); ++ spin_unlock_irqrestore(&pool->lock, flags); + } +- local_irq_restore(flags); +- ++ rcu_read_unlock(); + return ret; + } + EXPORT_SYMBOL_GPL(work_busy); +@@ -4914,16 +4923,16 @@ bool freeze_workqueues_busy(void) + * nr_active is monotonically decreasing. It's safe + * to peek without lock. + */ +- rcu_read_lock_sched(); ++ rcu_read_lock(); + for_each_pwq(pwq, wq) { + WARN_ON_ONCE(pwq->nr_active < 0); + if (pwq->nr_active) { + busy = true; +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + goto out_unlock; + } + } +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + out_unlock: + mutex_unlock(&wq_pool_mutex); diff --git a/debian/patches/features/all/rt/write_lock-migrate_disable-pushdown-to-rt_write_lock.patch b/debian/patches/features/all/rt/write_lock-migrate_disable-pushdown-to-rt_write_lock.patch new file mode 100644 index 000000000..9c0bff97d --- /dev/null +++ b/debian/patches/features/all/rt/write_lock-migrate_disable-pushdown-to-rt_write_lock.patch @@ -0,0 +1,145 @@ +From 96e366850784a9103b94c321cf5e7eb34e3b6c14 Mon Sep 17 00:00:00 2001 +From: Nicholas Mc Guire +Date: Thu, 2 Jan 2014 10:18:42 +0100 +Subject: [PATCH 4/7] write_lock migrate_disable pushdown to rt_write_lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +pushdown of migrate_disable/enable from write_*lock* to the rt_write_*lock* +api level + +general mapping of write_*lock* to mutexes: + +write_*lock* + `-> rt_write_*lock* + `-> __spin_lock (the sleeping __spin_lock) + `-> rt_mutex + +write_*lock*s are non-recursive so we have two lock chains to consider + - write_trylock*/write_unlock + - write_lock*/wirte_unlock +for both paths the migration_disable/enable must be balanced. + +write_trylock* mapping: + +write_trylock_irqsave + `-> rt_write_trylock_irqsave +write_trylock \ + `--------> rt_write_trylock + ret = rt_mutex_trylock + rt_mutex_fasttrylock + rt_mutex_cmpxchg + if (ret) + migrate_disable + +write_lock* mapping: + + write_lock_irqsave + `-> rt_write_lock_irqsave +write_lock_irq -> write_lock ----. \ + write_lock_bh -+ \ + `-> rt_write_lock + __rt_spin_lock() + rt_spin_lock_fastlock() + rt_mutex_cmpxchg() + migrate_disable() + +write_unlock* mapping: + + write_unlock_irqrestore. + write_unlock_bh -------+ +write_unlock_irq -> write_unlock ----------+ + `-> rt_write_unlock() + __rt_spin_unlock() + rt_spin_lock_fastunlock() + rt_mutex_cmpxchg() + migrate_enable() + +So calls to migrate_disable/enable() are better placed at the rt_write_* +level of lock/trylock/unlock as all of the write_*lock* API has this as a +common path. + +This approach to write_*_bh also eliminates the concerns raised with +regards to api inbalances (write_lock_bh -> write_unlock+local_bh_enable) + +Tested-by: Carsten Emde +Signed-off-by: Nicholas Mc Guire +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/rwlock_rt.h | 6 ------ + kernel/locking/rt.c | 4 ++-- + 2 files changed, 2 insertions(+), 8 deletions(-) + +--- a/include/linux/rwlock_rt.h ++++ b/include/linux/rwlock_rt.h +@@ -40,7 +40,6 @@ extern void __rt_rwlock_init(rwlock_t *r + #define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ +- migrate_disable(); \ + flags = rt_write_lock_irqsave(lock); \ + } while (0) + +@@ -61,14 +60,12 @@ extern void __rt_rwlock_init(rwlock_t *r + + #define write_lock(lock) \ + do { \ +- migrate_disable(); \ + rt_write_lock(lock); \ + } while (0) + + #define write_lock_bh(lock) \ + do { \ + local_bh_disable(); \ +- migrate_disable(); \ + rt_write_lock(lock); \ + } while (0) + +@@ -92,13 +89,11 @@ extern void __rt_rwlock_init(rwlock_t *r + #define write_unlock(lock) \ + do { \ + rt_write_unlock(lock); \ +- migrate_enable(); \ + } while (0) + + #define write_unlock_bh(lock) \ + do { \ + rt_write_unlock(lock); \ +- migrate_enable(); \ + local_bh_enable(); \ + } while (0) + +@@ -117,7 +112,6 @@ extern void __rt_rwlock_init(rwlock_t *r + typecheck(unsigned long, flags); \ + (void) flags; \ + rt_write_unlock(lock); \ +- migrate_enable(); \ + } while (0) + + #endif +--- a/kernel/locking/rt.c ++++ b/kernel/locking/rt.c +@@ -197,8 +197,6 @@ int __lockfunc rt_write_trylock_irqsave( + + *flags = 0; + ret = rt_write_trylock(rwlock); +- if (ret) +- migrate_disable(); + return ret; + } + EXPORT_SYMBOL(rt_write_trylock_irqsave); +@@ -232,6 +230,7 @@ EXPORT_SYMBOL(rt_read_trylock); + void __lockfunc rt_write_lock(rwlock_t *rwlock) + { + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ migrate_disable(); + __rt_spin_lock(&rwlock->lock); + } + EXPORT_SYMBOL(rt_write_lock); +@@ -257,6 +256,7 @@ void __lockfunc rt_write_unlock(rwlock_t + /* NOTE: we always pass in '1' for nested, for simplicity */ + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + __rt_spin_unlock(&rwlock->lock); ++ migrate_enable(); + } + EXPORT_SYMBOL(rt_write_unlock); + diff --git a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch new file mode 100644 index 000000000..50a70243b --- /dev/null +++ b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch @@ -0,0 +1,113 @@ +Subject: x86: crypto: Reduce preempt disabled regions +From: Peter Zijlstra +Date: Mon, 14 Nov 2011 18:19:27 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Restrict the preempt disabled regions to the actual floating point +operations and enable preemption for the administrative actions. + +This is necessary on RT to avoid that kfree and other operations are +called with preemption disabled. + +Reported-and-tested-by: Carsten Emde +Signed-off-by: Peter Zijlstra +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +--- + arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++----------- + 1 file changed, 13 insertions(+), 11 deletions(-) + +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, +- nbytes & AES_BLOCK_MASK); ++ nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -484,18 +484,20 @@ static int ctr_crypt(struct blkcipher_de + err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } +- kernel_fpu_end(); + + return err; + } diff --git a/debian/patches/features/all/rt/x86-disable-debug-stack.patch b/debian/patches/features/all/rt/x86-disable-debug-stack.patch new file mode 100644 index 000000000..f36801cb9 --- /dev/null +++ b/debian/patches/features/all/rt/x86-disable-debug-stack.patch @@ -0,0 +1,103 @@ +From: Andi Kleen +Date: Fri, 3 Jul 2009 08:44:10 -0500 +Subject: x86: Disable IST stacks for debug/int 3/stack fault for PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Normally the x86-64 trap handlers for debug/int 3/stack fault run +on a special interrupt stack to make them more robust +when dealing with kernel code. + +The PREEMPT_RT kernel can sleep in locks even while allocating +GFP_ATOMIC memory. When one of these trap handlers needs to send +real time signals for ptrace it allocates memory and could then +try to to schedule. But it is not allowed to schedule on a +IST stack. This can cause warnings and hangs. + +This patch disables the IST stacks for these handlers for PREEMPT_RT +kernel. Instead let them run on the normal process stack. + +The kernel only really needs the ISTs here to make kernel debuggers more +robust in case someone sets a break point somewhere where the stack is +invalid. But there are no kernel debuggers in the standard kernel +that do this. + +It also means kprobes cannot be set in situations with invalid stack; +but that sounds like a reasonable restriction. + +The stack fault change could minimally impact oops quality, but not very +much because stack faults are fairly rare. + +A better solution would be to use similar logic as the NMI "paranoid" +path: check if signal is for user space, if yes go back to entry.S, switch stack, +call sync_regs, then do the signal sending etc. + +But this patch is much simpler and should work too with minimal impact. + +Signed-off-by: Andi Kleen +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +--- + arch/x86/include/asm/page_64_types.h | 21 +++++++++++++++------ + arch/x86/kernel/cpu/common.c | 2 ++ + arch/x86/kernel/dumpstack_64.c | 4 ++++ + 3 files changed, 21 insertions(+), 6 deletions(-) + +--- a/arch/x86/include/asm/page_64_types.h ++++ b/arch/x86/include/asm/page_64_types.h +@@ -14,12 +14,21 @@ + #define IRQ_STACK_ORDER 2 + #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) + +-#define STACKFAULT_STACK 1 +-#define DOUBLEFAULT_STACK 2 +-#define NMI_STACK 3 +-#define DEBUG_STACK 4 +-#define MCE_STACK 5 +-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define STACKFAULT_STACK 0 ++# define DOUBLEFAULT_STACK 1 ++# define NMI_STACK 2 ++# define DEBUG_STACK 0 ++# define MCE_STACK 3 ++# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */ ++#else ++# define STACKFAULT_STACK 1 ++# define DOUBLEFAULT_STACK 2 ++# define NMI_STACK 3 ++# define DEBUG_STACK 4 ++# define MCE_STACK 5 ++# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ ++#endif + + #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) + #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1116,7 +1116,9 @@ DEFINE_PER_CPU(struct task_struct *, fpu + */ + static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, ++#if DEBUG_STACK > 0 + [DEBUG_STACK - 1] = DEBUG_STKSZ ++#endif + }; + + static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks +--- a/arch/x86/kernel/dumpstack_64.c ++++ b/arch/x86/kernel/dumpstack_64.c +@@ -21,10 +21,14 @@ + (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) + + static char x86_stack_ids[][8] = { ++#if DEBUG_STACK > 0 + [ DEBUG_STACK-1 ] = "#DB", ++#endif + [ NMI_STACK-1 ] = "NMI", + [ DOUBLEFAULT_STACK-1 ] = "#DF", ++#if STACKFAULT_STACK > 0 + [ STACKFAULT_STACK-1 ] = "#SS", ++#endif + [ MCE_STACK-1 ] = "#MC", + #if DEBUG_STKSZ > EXCEPTION_STKSZ + [ N_EXCEPTION_STACKS ... diff --git a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch new file mode 100644 index 000000000..32bc1eeb7 --- /dev/null +++ b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch @@ -0,0 +1,27 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:27 -0500 +Subject: x86: Do not unmask io_apic when interrupt is in progress +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +With threaded interrupts we might see an interrupt in progress on +migration. Do not unmask it when this is the case. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + arch/x86/kernel/apic/io_apic.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -2393,7 +2393,8 @@ static bool io_apic_level_ack_pending(st + static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) + { + /* If we are moving the irq we need to mask it */ +- if (unlikely(irqd_is_setaffinity_pending(data))) { ++ if (unlikely(irqd_is_setaffinity_pending(data) && ++ !irqd_irq_inprogress(data))) { + mask_ioapic(cfg); + return true; + } diff --git a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch new file mode 100644 index 000000000..2e60da14a --- /dev/null +++ b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch @@ -0,0 +1,26 @@ +Subject: x86-kvm-require-const-tsc-for-rt.patch +From: Thomas Gleixner +Date: Sun, 06 Nov 2011 12:26:18 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/x86/kvm/x86.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -5532,6 +5532,13 @@ int kvm_arch_init(void *opaque) + goto out; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { ++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); ++ return -EOPNOTSUPP; ++ } ++#endif ++ + r = kvm_mmu_module_init(); + if (r) + goto out_free_percpu; diff --git a/debian/patches/features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch b/debian/patches/features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch new file mode 100644 index 000000000..9a1ed506c --- /dev/null +++ b/debian/patches/features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch @@ -0,0 +1,164 @@ +From df12896518bc6db6a717de580116a07cdd19fbd9 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Thu, 11 Apr 2013 14:33:34 -0400 +Subject: [PATCH 4/5] x86/mce: Defer mce wakeups to threads for PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +We had a customer report a lockup on a 3.0-rt kernel that had the +following backtrace: + +[ffff88107fca3e80] rt_spin_lock_slowlock at ffffffff81499113 +[ffff88107fca3f40] rt_spin_lock at ffffffff81499a56 +[ffff88107fca3f50] __wake_up at ffffffff81043379 +[ffff88107fca3f80] mce_notify_irq at ffffffff81017328 +[ffff88107fca3f90] intel_threshold_interrupt at ffffffff81019508 +[ffff88107fca3fa0] smp_threshold_interrupt at ffffffff81019fc1 +[ffff88107fca3fb0] threshold_interrupt at ffffffff814a1853 + +It actually bugged because the lock was taken by the same owner that +already had that lock. What happened was the thread that was setting +itself on a wait queue had the lock when an MCE triggered. The MCE +interrupt does a wake up on its wait list and grabs the same lock. + +NOTE: THIS IS NOT A BUG ON MAINLINE + +Sorry for yelling, but as I Cc'd mainline maintainers I want them to +know that this is an PREEMPT_RT bug only. I only Cc'd them for advice. + +On PREEMPT_RT the wait queue locks are converted from normal +"spin_locks" into an rt_mutex (see the rt_spin_lock_slowlock above). +These are not to be taken by hard interrupt context. This usually isn't +a problem as most all interrupts in PREEMPT_RT are converted into +schedulable threads. Unfortunately that's not the case with the MCE irq. + +As wait queue locks are notorious for long hold times, we can not +convert them to raw_spin_locks without causing issues with -rt. But +Thomas has created a "simple-wait" structure that uses raw spin locks +which may have been a good fit. + +Unfortunately, wait queues are not the only issue, as the mce_notify_irq +also does a schedule_work(), which grabs the workqueue spin locks that +have the exact same issue. + +Thus, this patch I'm proposing is to move the actual work of the MCE +interrupt into a helper thread that gets woken up on the MCE interrupt +and does the work in a schedulable context. + +NOTE: THIS PATCH ONLY CHANGES THE BEHAVIOR WHEN PREEMPT_RT IS SET + +Oops, sorry for yelling again, but I want to stress that I keep the same +behavior of mainline when PREEMPT_RT is not set. Thus, this only changes +the MCE behavior when PREEMPT_RT is configured. + +Signed-off-by: Steven Rostedt +[bigeasy@linutronix: make mce_notify_work() a proper prototype, use + kthread_run()] +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/kernel/cpu/mcheck/mce.c | 73 ++++++++++++++++++++++++++++++++------- + 1 file changed, 61 insertions(+), 12 deletions(-) + +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1355,6 +1356,63 @@ static void mce_do_trigger(struct work_s + + static DECLARE_WORK(mce_trigger_work, mce_do_trigger); + ++static void __mce_notify_work(void) ++{ ++ /* Not more than two messages every minute */ ++ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); ++ ++ /* wake processes polling /dev/mcelog */ ++ wake_up_interruptible(&mce_chrdev_wait); ++ ++ /* ++ * There is no risk of missing notifications because ++ * work_pending is always cleared before the function is ++ * executed. ++ */ ++ if (mce_helper[0] && !work_pending(&mce_trigger_work)) ++ schedule_work(&mce_trigger_work); ++ ++ if (__ratelimit(&ratelimit)) ++ pr_info(HW_ERR "Machine check events logged\n"); ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++struct task_struct *mce_notify_helper; ++ ++static int mce_notify_helper_thread(void *unused) ++{ ++ while (1) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ if (kthread_should_stop()) ++ break; ++ __mce_notify_work(); ++ } ++ return 0; ++} ++ ++static int mce_notify_work_init(void) ++{ ++ mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL, ++ "mce-notify"); ++ if (!mce_notify_helper) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void mce_notify_work(void) ++{ ++ wake_up_process(mce_notify_helper); ++} ++#else ++static void mce_notify_work(void) ++{ ++ __mce_notify_work(); ++} ++static inline int mce_notify_work_init(void) { return 0; } ++#endif ++ + /* + * Notify the user(s) about new machine check events. + * Can be called from interrupt context, but not from machine check/NMI +@@ -1362,19 +1420,8 @@ static DECLARE_WORK(mce_trigger_work, mc + */ + int mce_notify_irq(void) + { +- /* Not more than two messages every minute */ +- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); +- + if (test_and_clear_bit(0, &mce_need_notify)) { +- /* wake processes polling /dev/mcelog */ +- wake_up_interruptible(&mce_chrdev_wait); +- +- if (mce_helper[0]) +- schedule_work(&mce_trigger_work); +- +- if (__ratelimit(&ratelimit)) +- pr_info(HW_ERR "Machine check events logged\n"); +- ++ mce_notify_work(); + return 1; + } + return 0; +@@ -2456,6 +2503,8 @@ static __init int mcheck_init_device(voi + /* register character device /dev/mcelog */ + misc_register(&mce_chrdev_device); + ++ err = mce_notify_work_init(); ++ + return err; + } + device_initcall_sync(mcheck_init_device); diff --git a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch new file mode 100644 index 000000000..dfebaae48 --- /dev/null +++ b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch @@ -0,0 +1,190 @@ +From: Thomas Gleixner +Date: Mon, 13 Dec 2010 16:33:39 +0100 +Subject: x86: Convert mce timer to hrtimer +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +mce_timer is started in atomic contexts of cpu bringup. This results +in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to +avoid this. + +Signed-off-by: Thomas Gleixner +fold in: +|From: Mike Galbraith +|Date: Wed, 29 May 2013 13:52:13 +0200 +|Subject: [PATCH] x86/mce: fix mce timer interval +| +|Seems mce timer fire at the wrong frequency in -rt kernels since roughly +|forever due to 32 bit overflow. 3.8-rt is also missing a multiplier. +| +|Add missing us -> ns conversion and 32 bit overflow prevention. +| +|Signed-off-by: Mike Galbraith +|[bigeasy: use ULL instead of u64 cast] +|Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/kernel/cpu/mcheck/mce.c | 58 ++++++++++++++++++++++----------------- + 1 file changed, 34 insertions(+), 24 deletions(-) + +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1268,7 +1269,7 @@ void mce_log_therm_throt_event(__u64 sta + static unsigned long check_interval = 5 * 60; /* 5 minutes */ + + static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ +-static DEFINE_PER_CPU(struct timer_list, mce_timer); ++static DEFINE_PER_CPU(struct hrtimer, mce_timer); + + static unsigned long mce_adjust_timer_default(unsigned long interval) + { +@@ -1278,13 +1279,10 @@ static unsigned long mce_adjust_timer_de + static unsigned long (*mce_adjust_timer)(unsigned long interval) = + mce_adjust_timer_default; + +-static void mce_timer_fn(unsigned long data) ++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) + { +- struct timer_list *t = &__get_cpu_var(mce_timer); + unsigned long iv; + +- WARN_ON(smp_processor_id() != data); +- + if (mce_available(__this_cpu_ptr(&cpu_info))) { + machine_check_poll(MCP_TIMESTAMP, + &__get_cpu_var(mce_poll_banks)); +@@ -1305,9 +1303,11 @@ static void mce_timer_fn(unsigned long d + __this_cpu_write(mce_next_interval, iv); + /* Might have become 0 after CMCI storm subsided */ + if (iv) { +- t->expires = jiffies + iv; +- add_timer_on(t, smp_processor_id()); ++ hrtimer_forward_now(timer, ns_to_ktime( ++ jiffies_to_usecs(iv) * 1000ULL)); ++ return HRTIMER_RESTART; + } ++ return HRTIMER_NORESTART; + } + + /* +@@ -1315,28 +1315,37 @@ static void mce_timer_fn(unsigned long d + */ + void mce_timer_kick(unsigned long interval) + { +- struct timer_list *t = &__get_cpu_var(mce_timer); +- unsigned long when = jiffies + interval; ++ struct hrtimer *t = &__get_cpu_var(mce_timer); + unsigned long iv = __this_cpu_read(mce_next_interval); + +- if (timer_pending(t)) { +- if (time_before(when, t->expires)) +- mod_timer_pinned(t, when); ++ if (hrtimer_active(t)) { ++ s64 exp; ++ s64 intv_us; ++ ++ intv_us = jiffies_to_usecs(interval); ++ exp = ktime_to_us(hrtimer_expires_remaining(t)); ++ if (intv_us < exp) { ++ hrtimer_cancel(t); ++ hrtimer_start_range_ns(t, ++ ns_to_ktime(intv_us * 1000), ++ 0, HRTIMER_MODE_REL_PINNED); ++ } + } else { +- t->expires = round_jiffies(when); +- add_timer_on(t, smp_processor_id()); ++ hrtimer_start_range_ns(t, ++ ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL), ++ 0, HRTIMER_MODE_REL_PINNED); + } + if (interval < iv) + __this_cpu_write(mce_next_interval, interval); + } + +-/* Must not be called in IRQ context where del_timer_sync() can deadlock */ ++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ + static void mce_timer_delete_all(void) + { + int cpu; + + for_each_online_cpu(cpu) +- del_timer_sync(&per_cpu(mce_timer, cpu)); ++ hrtimer_cancel(&per_cpu(mce_timer, cpu)); + } + + static void mce_do_trigger(struct work_struct *work) +@@ -1636,7 +1645,7 @@ static void __mcheck_cpu_init_vendor(str + } + } + +-static void mce_start_timer(unsigned int cpu, struct timer_list *t) ++static void mce_start_timer(unsigned int cpu, struct hrtimer *t) + { + unsigned long iv = check_interval * HZ; + +@@ -1645,16 +1654,17 @@ static void mce_start_timer(unsigned int + + per_cpu(mce_next_interval, cpu) = iv; + +- t->expires = round_jiffies(jiffies + iv); +- add_timer_on(t, cpu); ++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), ++ 0, HRTIMER_MODE_REL_PINNED); + } + + static void __mcheck_cpu_init_timer(void) + { +- struct timer_list *t = &__get_cpu_var(mce_timer); ++ struct hrtimer *t = &__get_cpu_var(mce_timer); + unsigned int cpu = smp_processor_id(); + +- setup_timer(t, mce_timer_fn, cpu); ++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ t->function = mce_timer_fn; + mce_start_timer(cpu, t); + } + +@@ -2331,6 +2341,8 @@ static void mce_disable_cpu(void *h) + if (!mce_available(__this_cpu_ptr(&cpu_info))) + return; + ++ hrtimer_cancel(&__get_cpu_var(mce_timer)); ++ + if (!(action & CPU_TASKS_FROZEN)) + cmci_clear(); + for (i = 0; i < mca_cfg.banks; i++) { +@@ -2357,6 +2369,7 @@ static void mce_reenable_cpu(void *h) + if (b->init) + wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); + } ++ __mcheck_cpu_init_timer(); + } + + /* Get notified when a cpu comes on/off. Be hotplug friendly. */ +@@ -2364,7 +2377,6 @@ static int + mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) + { + unsigned int cpu = (unsigned long)hcpu; +- struct timer_list *t = &per_cpu(mce_timer, cpu); + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: +@@ -2380,11 +2392,9 @@ mce_cpu_callback(struct notifier_block * + break; + case CPU_DOWN_PREPARE: + smp_call_function_single(cpu, mce_disable_cpu, &action, 1); +- del_timer_sync(t); + break; + case CPU_DOWN_FAILED: + smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); +- mce_start_timer(cpu, t); + break; + } + diff --git a/debian/patches/features/all/rt/x86-preempt-lazy.patch b/debian/patches/features/all/rt/x86-preempt-lazy.patch new file mode 100644 index 000000000..f4e6eeca9 --- /dev/null +++ b/debian/patches/features/all/rt/x86-preempt-lazy.patch @@ -0,0 +1,177 @@ +Subject: x86-preempt-lazy.patch +From: Thomas Gleixner +Date: Thu, 01 Nov 2012 11:03:47 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/x86/Kconfig | 1 + + arch/x86/include/asm/thread_info.h | 6 ++++++ + arch/x86/kernel/asm-offsets.c | 1 + + arch/x86/kernel/entry_32.S | 17 +++++++++++++++-- + arch/x86/kernel/entry_64.S | 28 ++++++++++++++++++++-------- + 5 files changed, 43 insertions(+), 10 deletions(-) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -21,6 +21,7 @@ config X86_64 + ### Arch settings + config X86 + def_bool y ++ select HAVE_PREEMPT_LAZY + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h +@@ -29,6 +29,8 @@ struct thread_info { + __u32 status; /* thread synchronous flags */ + __u32 cpu; /* current CPU */ + int saved_preempt_count; ++ int preempt_lazy_count; /* 0 => lazy preemptable ++ <0 => BUG */ + mm_segment_t addr_limit; + struct restart_block restart_block; + void __user *sysenter_return; +@@ -80,6 +82,7 @@ struct thread_info { + #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SECCOMP 8 /* secure computing */ ++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ + #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ + #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ + #define TIF_UPROBE 12 /* breakpointed or singlestepping */ +@@ -104,6 +107,7 @@ struct thread_info { + #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) + #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) + #define _TIF_UPROBE (1 << TIF_UPROBE) +@@ -153,6 +157,8 @@ struct thread_info { + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) + ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) ++ + #ifdef CONFIG_X86_32 + + #define STACK_WARN (THREAD_SIZE/8) +--- a/arch/x86/kernel/asm-offsets.c ++++ b/arch/x86/kernel/asm-offsets.c +@@ -32,6 +32,7 @@ void common(void) { + OFFSET(TI_flags, thread_info, flags); + OFFSET(TI_status, thread_info, status); + OFFSET(TI_addr_limit, thread_info, addr_limit); ++ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); + + BLANK(); + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -363,8 +363,21 @@ END(ret_from_exception) + ENTRY(resume_kernel) + DISABLE_INTERRUPTS(CLBR_ANY) + need_resched: ++ # preempt count == 0 + NEED_RS set? + cmpl $0,PER_CPU_VAR(__preempt_count) ++ jz test_int_off ++ ++ # atleast preempt count == 0 ? ++ cmpl $_TIF_NEED_RESCHED,PER_CPU_VAR(__preempt_count) ++ jne restore_all ++ ++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? + jnz restore_all ++ ++ testl $_TIF_NEED_RESCHED_LAZY, %ecx ++ jz restore_all ++ ++test_int_off: + testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? + jz restore_all + call preempt_schedule_irq +@@ -604,7 +617,7 @@ ENDPROC(system_call) + ALIGN + RING0_PTREGS_FRAME # can't unwind into user space anyway + work_pending: +- testb $_TIF_NEED_RESCHED, %cl ++ testl $_TIF_NEED_RESCHED_MASK, %ecx + jz work_notifysig + work_resched: + call schedule +@@ -617,7 +630,7 @@ ENDPROC(system_call) + andl $_TIF_WORK_MASK, %ecx # is there any work to be done other + # than syscall tracing? + jz restore_all +- testb $_TIF_NEED_RESCHED, %cl ++ testl $_TIF_NEED_RESCHED_MASK, %ecx + jnz work_resched + + work_notifysig: # deal with pending signals and +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -658,8 +658,8 @@ GLOBAL(system_call_after_swapgs) + /* Handle reschedules */ + /* edx: work, edi: workmask */ + sysret_careful: +- bt $TIF_NEED_RESCHED,%edx +- jnc sysret_signal ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz sysret_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -771,8 +771,8 @@ GLOBAL(int_with_check) + /* First do a reschedule test. */ + /* edx: work, edi: workmask */ + int_careful: +- bt $TIF_NEED_RESCHED,%edx +- jnc int_very_careful ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz int_very_careful + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -1071,8 +1071,8 @@ ENTRY(native_iret) + /* edi: workmask, edx: work */ + retint_careful: + CFI_RESTORE_STATE +- bt $TIF_NEED_RESCHED,%edx +- jnc retint_signal ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz retint_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -1104,7 +1104,19 @@ ENTRY(native_iret) + /* rcx: threadinfo. interrupts off. */ + ENTRY(retint_kernel) + cmpl $0,PER_CPU_VAR(__preempt_count) +- jnz retint_restore_args ++ jz check_int_off ++ ++ # atleast preempt count == 0 ? ++ cmpl $_TIF_NEED_RESCHED,PER_CPU_VAR(__preempt_count) ++ jnz retint_restore_args ++ ++ cmpl $0, TI_preempt_lazy_count(%rcx) ++ jnz retint_restore_args ++ ++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) ++ jnc retint_restore_args ++ ++check_int_off: + bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ + jnc retint_restore_args + call preempt_schedule_irq +@@ -1540,7 +1552,7 @@ ENTRY(paranoid_exit) + movq %rsp,%rdi /* &pt_regs */ + call sync_regs + movq %rax,%rsp /* switch stack for scheduling */ +- testl $_TIF_NEED_RESCHED,%ebx ++ testl $_TIF_NEED_RESCHED_MASK,%ebx + jnz paranoid_schedule + movl %ebx,%edx /* arg3: thread flags */ + TRACE_IRQS_ON diff --git a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch new file mode 100644 index 000000000..5e0c9e667 --- /dev/null +++ b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch @@ -0,0 +1,48 @@ +From: Thomas Gleixner +Date: Thu, 16 Dec 2010 14:25:18 +0100 +Subject: x86: stackprotector: Avoid random pool on rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +CPU bringup calls into the random pool to initialize the stack +canary. During boot that works nicely even on RT as the might sleep +checks are disabled. During CPU hotplug the might sleep checks +trigger. Making the locks in random raw is a major PITA, so avoid the +call on RT is the only sensible solution. This is basically the same +randomness which we get during boot where the random pool has no +entropy and we rely on the TSC randomnness. + +Reported-by: Carsten Emde +Signed-off-by: Thomas Gleixner + +--- + arch/x86/include/asm/stackprotector.h | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h +@@ -57,7 +57,7 @@ + */ + static __always_inline void boot_init_stack_canary(void) + { +- u64 canary; ++ u64 uninitialized_var(canary); + u64 tsc; + + #ifdef CONFIG_X86_64 +@@ -68,8 +68,16 @@ static __always_inline void boot_init_st + * of randomness. The TSC only matters for very early init, + * there it already has some randomness on most systems. Later + * on during the bootup the random pool has true entropy too. ++ * ++ * For preempt-rt we need to weaken the randomness a bit, as ++ * we can't call into the random generator from atomic context ++ * due to locking constraints. We just leave canary ++ * uninitialized and use the TSC based randomness on top of ++ * it. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + get_random_bytes(&canary, sizeof(canary)); ++#endif + tsc = __native_read_tsc(); + canary += tsc + (tsc << 32UL); + diff --git a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch new file mode 100644 index 000000000..7fa7a7e97 --- /dev/null +++ b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch @@ -0,0 +1,29 @@ +From: Thomas Gleixner +Date: Sun, 26 Jul 2009 02:21:32 +0200 +Subject: x86: Use generic rwsem_spinlocks on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz + +Simplifies the separation of anon_rw_semaphores and rw_semaphores for +-rt. + +Signed-off-by: Thomas Gleixner + +--- + arch/x86/Kconfig | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -183,8 +183,11 @@ config ARCH_MAY_HAVE_PC_FDC + def_bool y + depends on ISA_DMA_API + ++config RWSEM_GENERIC_SPINLOCK ++ def_bool PREEMPT_RT_FULL ++ + config RWSEM_XCHGADD_ALGORITHM +- def_bool y ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_CALIBRATE_DELAY + def_bool y diff --git a/debian/patches/series-rt b/debian/patches/series-rt new file mode 100644 index 000000000..3d065a8e9 --- /dev/null +++ b/debian/patches/series-rt @@ -0,0 +1,652 @@ +########################################################### +# DELTA against a known Linus release +########################################################### + +############################################################ +# UPSTREAM changes queued +############################################################ +features/all/rt/rt-sched-numa-Move-task_numa_free-to-__put_task_stru.patch + +# SCHED +features/all/rt/sched-Init-idle-on_rq-in-init_idle.patch +features/all/rt/sched-Check-for-idle-task-in-might_sleep.patch +features/all/rt/sched-Add-better-debug-output-for-might_sleep.patch +features/all/rt/sched-Adjust-p-sched_reset_on_fork-when-nothing-else.patch +features/all/rt/sched-Queue-RT-tasks-to-head-when-prio-drops.patch +features/all/rt/sched-Consider-pi-boosting-in-setscheduler.patch +features/all/rt/sched-Fix-broken-setscheduler.patch + +############################################################ +# UPSTREAM FIXES, patches pending +############################################################ + +############################################################ +# Stuff broken upstream, patches submitted +############################################################ + +############################################################ +# Stuff which needs addressing upstream, but requires more +# information +############################################################ +#x86-hpet-disable-msi-on-lenovo-w510.patch + +############################################################ +# Stuff broken upstream, need to be sent +############################################################ + +############################################################ +# Submitted on LKML +############################################################ + +# SPARC part of erly printk consolidation +features/all/rt/early-printk-consolidate.patch +features/all/rt/sparc-provide-EARLY_PRINTK-for-SPARC.patch +features/all/rt/0001-sparc64-use-generic-rwsem-spinlocks-rt.patch + +# XXX 0002-sparc64-convert-spinlock_t-to-raw_spinlock_t-in-mmu_.patch +# XXX 0003-sparc64-convert-ctx_alloc_lock-raw_spinlock_t.patch + +# SRCU +features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch + +############################################################ +# Submitted to mips ML +############################################################ + +############################################################ +# Submitted to ARM ML +############################################################ + +############################################################ +# Submitted to PPC ML +############################################################ + +############################################################ +# Submitted on LKML +############################################################ + +############################################################ +# Submitted to net-dev +############################################################ + +############################################################ +# Pending in tip +############################################################ + +############################################################ +# Stuff which should go upstream ASAP +############################################################ + +# SCHED BLOCK/WQ +features/all/rt/block-shorten-interrupt-disabled-regions.patch + +# Timekeeping split jiffies lock. Needs a good argument :) +features/all/rt/timekeeping-split-jiffies-lock.patch + +# CHECKME: Should local_irq_enable() generally do a preemption check ? +features/all/rt/vtime-split-lock-and-seqcount.patch +features/all/rt/mips-enable-interrupts-in-signal.patch + +# Tracing +features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch + +# PTRACE/SIGNAL crap +features/all/rt/signal-revert-ptrace-preempt-magic.patch +# wait for feedback +features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch + +# ARM lock annotation +features/all/rt/arm-convert-boot-lock-to-raw.patch + +# PREEMPT_ENABLE_NO_RESCHED + +# SIGNALS / POSIXTIMERS +features/all/rt/posix-timers-no-broadcast.patch +features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch +features/all/rt/oleg-signal-rt-fix.patch + +# SCHED + +# GENERIC CMPXCHG + +# SHORTEN PREEMPT DISABLED +features/all/rt/drivers-random-reduce-preempt-disabled-region.patch + +# CLOCKSOURCE +features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch +features/all/rt/clocksource-tclib-allow-higher-clockrates.patch + +# DRIVERS NET +features/all/rt/drivers-net-8139-disable-irq-nosync.patch + +# PREEMPT + +# PAGEFAULT DISABLE +features/all/rt/mm-prepare-pf-disable-discoupling.patch +features/all/rt/arch-use-pagefault-disabled.patch +features/all/rt/peter_zijlstra-frob-pagefault_disable.patch +features/all/rt/peterz-raw_pagefault_disable.patch +features/all/rt/filemap-fix-up.patch +features/all/rt/mm-remove-preempt-count-from-pf.patch + +# PM +features/all/rt/suspend-prevernt-might-sleep-splats.patch + +# MM/LISTS +features/all/rt/mm-page-alloc-use-list-last-entry.patch + +# INCLUDE MESS +features/all/rt/pid-h-include-atomic-h.patch +features/all/rt/sysctl-include-atomic-h.patch + +# NETWORKING +features/all/rt/net-flip-lock-dep-thingy.patch +features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch + +# SOFTIRQ +features/all/rt/softirq-thread-do-softirq.patch +features/all/rt/softirq-split-out-code.patch + +# X86 +features/all/rt/x86-io-apic-migra-no-unmask.patch +features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch + +# RCU + +# LOCKING INIT FIXES + +# PCI +features/all/rt/pci-access-use-__wake_up_all_locked.patch + +# WORKQUEUE + + +##################################################### +# Stuff which should go mainline, but wants some care +##################################################### + +# SEQLOCK + +# ANON RW SEMAPHORES + +# TRACING +features/all/rt/latency-hist.patch + +# HW LATENCY DETECTOR - this really wants a rewrite +features/all/rt/hwlatdetect.patch +features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch +features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch +features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch +features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch + +################################################## +# REAL RT STUFF starts here +################################################## + +# PRINTK +features/all/rt/printk-kill.patch +features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch + +# Enable RT CONFIG +features/all/rt/rt-preempt-base-config.patch +features/all/rt/kconfig-disable-a-few-options-rt.patch +features/all/rt/kconfig-preempt-rt-full.patch + +# WARN/BUG_ON_RT +features/all/rt/bug-rt-dependend-variants.patch + +# LOCAL_IRQ_RT/NON_RT +features/all/rt/local-irq-rt-depending-variants.patch + +# PREEMPT NORT +features/all/rt/preempt-nort-rt-variants.patch + +# ANNOTATE local_irq_disable sites +features/all/rt/ata-disable-interrupts-if-non-rt.patch +features/all/rt/ide-use-nort-local-irq-variants.patch +features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch +features/all/rt/inpt-gameport-use-local-irq-nort.patch +#acpi-use-local-irq-nort.patch +features/all/rt/user-use-local-irq-nort.patch +features/all/rt/resource-counters-use-localirq-nort.patch +features/all/rt/usb-use-_nort-in-giveback.patch +features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch + +# Sigh +features/all/rt/signal-fix-up-rcu-wreckage.patch + +# ANNOTATE BUG/WARNON +features/all/rt/net-wireless-warn-nort.patch + +# BIT SPINLOCKS - SIGH +features/all/rt/mm-cgroup-page-bit-spinlock.patch +features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch +features/all/rt/fs-jbd-replace-bh_state-lock.patch + +# GENIRQ +features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch +features/all/rt/genirq-nodebug-shirq.patch +features/all/rt/genirq-disable-irqpoll-on-rt.patch +features/all/rt/genirq-force-threading.patch +features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch + +# DRIVERS NET +features/all/rt/drivers-net-fix-livelock-issues.patch +features/all/rt/drivers-net-vortex-fix-locking-issues.patch +features/all/rt/drivers-net-gianfar-make-rt-aware.patch +features/all/rt/net-gianfar-do-not-disable-interrupts.patch +features/all/rt/net-gianfar-do-not-try-to-cleanup-TX-packets-if-they.patch + +# DRIVERS USB +# Revisit. Looks weird +features/all/rt/usb-fix-mouse-problem-copying-large-data.patch + +# LOCAL_IRQ_LOCKS +features/all/rt/local-var.patch +features/all/rt/rt-local-irq-lock.patch +features/all/rt/use-local-spin_locks-in-local_lock.patch +features/all/rt/cpu-rt-variants.patch + +# MM PAGE_ALLOC +features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch +features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch +features/all/rt/mm-page-alloc-fix.patch + +# MM SWAP +features/all/rt/mm-convert-swap-to-percpu-locked.patch + +# MM vmstat +features/all/rt/mm-make-vmstat-rt-aware.patch + +# MM memory +features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch + +# MM bounce +features/all/rt/mm-bounce-local-irq-save-nort.patch + + +# MM SLxB +features/all/rt/mm-disable-sloub-rt.patch +features/all/rt/mm-enable-slub.patch +features/all/rt/slub-enable-irqs-for-no-wait.patch +features/all/rt/slub_delay_ctor_on_rt.patch + +# Revisit for avr/frv/ia64/mn10300/sh/sparc ... +#mm-quicklists-percpu-locked.patch + +# MM +features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch +features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch + +# RADIX TREE +features/all/rt/radix-tree-rt-aware.patch + +# PANIC +features/all/rt/panic-disable-random-on-rt.patch + +# IPC +features/all/rt/ipc-make-rt-aware.patch +features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch + +# RELAY +features/all/rt/relay-fix-timer-madness.patch + +# NETWORKING + +# WORKQUEUE SIGH + +# TIMERS +features/all/rt/timers-prepare-for-full-preemption.patch +features/all/rt/timers-prepare-for-full-preemption-improve.patch +features/all/rt/timers-preempt-rt-support.patch +#timers-mov-printk_tick-to-soft-interrupt.patch +features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch +features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch + +# More PRINTK +#rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch + +# HRTIMERS +features/all/rt/hrtimers-prepare-full-preemption.patch +features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch +features/all/rt/timer-fd-avoid-live-lock.patch +features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch +features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch + +# POSIX-CPU-TIMERS +features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch +features/all/rt/posix-timers-shorten-cpu-timers-thread.patch +features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch + +# SCHEDULER +features/all/rt/sched-delay-put-task.patch +features/all/rt/sched-limit-nr-migrate.patch +features/all/rt/sched-mmdrop-delayed.patch +features/all/rt/sched-rt-mutex-wakeup.patch +features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch +# CHECKME sched-load-balance-break-on-rq-contention.patch +features/all/rt/sched-cond-resched.patch +features/all/rt/cond-resched-softirq-rt.patch +features/all/rt/cond-resched-lock-rt-tweak.patch +features/all/rt/sched-disable-ttwu-queue.patch +features/all/rt/sched-disable-rt-group-sched-on-rt.patch +features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch +features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch + +# STOP MACHINE +features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch +#stomp-machine-mark-stomper-thread.patch +features/all/rt/stomp-machine-raw-lock.patch +# XXX stomp-machine-deal-clever-with-stopper-lock.patch + +# MIGRATE DISABLE AND PER CPU +features/all/rt/hotplug-light-get-online-cpus.patch +features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch +features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch +features/all/rt/sched-migrate-disable.patch +features/all/rt/hotplug-use-migrate-disable.patch + +features/all/rt/ftrace-migrate-disable-tracing.patch +features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch + +features/all/rt/migrate-disable-rt-variant.patch +features/all/rt/peter_zijlstra-frob-migrate_disable.patch +features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch +features/all/rt/sched-rt-fix-migrate_enable-thinko.patch +features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch +features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch +features/all/rt/allow-preemption-in-recursive-migrate_disable-call.patch +features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch +features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch +features/all/rt/sched-dont-calculate-hweight-in-update_migrate_disab.patch +features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch + +# FTRACE +# XXX checkme ftrace-crap.patch +# CHECKME rt-ring-buffer-convert-reader_lock-from-raw_spin_lock-into-spin_lock.patch +# CHECKME rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch + +# NETWORKING +features/all/rt/net-netif_rx_ni-migrate-disable.patch + +# NOHZ +features/all/rt/softirq-sanitize-softirq-pending.patch + +# LOCKDEP +features/all/rt/lockdep-no-softirq-accounting-on-rt.patch + +# SOFTIRQ local lock +features/all/rt/mutex-no-spin-on-rt.patch +features/all/rt/softirq-local-lock.patch +# XXX checkme softirq-fix-unplug-deadlock.patch +features/all/rt/softirq-disable-softirq-stacks-for-rt.patch +features/all/rt/softirq-make-fifo.patch +features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch +features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch + +# LOCAL VARS and GETCPU STUFF +features/all/rt/local-vars-migrate-disable.patch + +# RAID5 +features/all/rt/md-raid5-percpu-handling-rt-aware.patch + +# FUTEX/RTMUTEX +features/all/rt/rtmutex-futex-prepare-rt.patch +features/all/rt/futex-requeue-pi-fix.patch +features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch + +# RTMUTEX +features/all/rt/rtmutex-lock-killable.patch +features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch +features/all/rt/spinlock-types-separate-raw.patch +features/all/rt/rtmutex-avoid-include-hell.patch +features/all/rt/rt-add-rt-spinlock-to-headers.patch +features/all/rt/rt-add-rt-to-mutex-headers.patch +features/all/rt/rwsem-add-rt-variant.patch +features/all/rt/rt-add-rt-locks.patch +features/all/rt/condition-migration_disable-on-lock-acquisition.patch +features/all/rt/migrate_disable-pushd-down-in-atomic_dec_and_spin_lo.patch +features/all/rt/migrate_disable-pushd-down-in-rt_spin_trylock_irqsav.patch +features/all/rt/migrate_disable-pushd-down-in-rt_write_trylock_irqsa.patch +features/all/rt/write_lock-migrate_disable-pushdown-to-rt_write_lock.patch +features/all/rt/read_lock-migrate_disable-pushdown-to-rt_read_lock.patch +features/all/rt/rt-Cleanup-of-unnecessary-do-while-0-in-read-write-_.patch +features/all/rt/percpu-rwsem-compilefix.patch +features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch +features/all/rt/rtmutex-ww-bad-return-value-in-__mutex_lock_check_stamp.patch + +# RTMUTEX Fallout +features/all/rt/tasklist-lock-fix-section-conflict.patch + +# NOHZ/RTMUTEX +features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch +features/all/rt/timers-do-not-raise-softirq-unconditionally.patch +features/all/rt/timer-Raise-softirq-if-there-s-irq_work.patch +features/all/rt/timer-rt-Always-raise-the-softirq-if-there-s-irq_wor.patch +features/all/rt/rtmutex-use-a-trylock-for-waiter-lock-in-trylock.patch + +# RCU +features/all/rt/peter_zijlstra-frob-rcu.patch +features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch +features/all/rt/rcu-tiny-merge-bh.patch +features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch +features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch + +# LGLOCKS - lovely +features/all/rt/lglocks-rt.patch + +# DRIVERS SERIAL +features/all/rt/drivers-serial-cleanup-locking-for-rt.patch +features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch +features/all/rt/drivers-tty-fix-omap-lock-crap.patch +features/all/rt/drivers-tty-pl011-irq-disable-madness.patch +features/all/rt/rt-serial-warn-fix.patch + +# FS +features/all/rt/fs-namespace-preemption-fix.patch +features/all/rt/mm-protect-activate-switch-mm.patch +features/all/rt/fs-block-rt-support.patch +features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch +features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch +features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch + +# X86 +features/all/rt/x86-mce-timer-hrtimer.patch +features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch +features/all/rt/x86-stackprot-no-random-on-rt.patch +features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch +features/all/rt/x86-disable-debug-stack.patch +features/all/rt/Revert-x86-Disable-IST-stacks-for-debug-int-3-stack-.patch + +# CPU get light +features/all/rt/epoll-use-get-cpu-light.patch +features/all/rt/mm-vmalloc-use-get-cpu-light.patch +features/all/rt/block-mq-use-cpu_light.patch + +# CPU CHILL +features/all/rt/rt-introduce-cpu-chill.patch +features/all/rt/rt-Make-cpu_chill-use-hrtimer-instead-of-msleep.patch +features/all/rt/kernel-hrtimer-be-non-freezeable-in-cpu_chill.patch +features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch + +# BLOCK LIVELOCK PREVENTION +features/all/rt/block-use-cpu-chill.patch + +# FS LIVELOCK PREVENTION +features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch +features/all/rt/net-use-cpu-chill.patch + +# WORKQUEUE more fixes +features/all/rt/workqueue-use-rcu.patch +features/all/rt/workqueue-use-locallock.patch +features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch +features/all/rt/workqueue-distangle-from-rq-lock.patch + +# IDR +features/all/rt/idr-use-local-lock-for-protection.patch +features/all/rt/percpu_ida-use-locklocks.patch + +# DEBUGOBJECTS +features/all/rt/debugobjects-rt.patch + +# JUMPLABEL +features/all/rt/jump-label-rt.patch + +# NET +features/all/rt/skbufhead-raw-lock.patch + +# PERF +features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch +features/all/rt/irq_work-allow-certain-work-in-hard-irq-context.patch + +# CONSOLE. NEEDS more thought !!! +features/all/rt/printk-rt-aware.patch +features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch + +# POWERC +features/all/rt/power-use-generic-rwsem-on-rt.patch +features/all/rt/power-disable-highmem-on-rt.patch + +# ARM +features/all/rt/arm-disable-highmem-on-rt.patch +features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch +features/all/rt/arm-unwind-use_raw_lock.patch + +# MIPS +features/all/rt/mips-disable-highmem-on-rt.patch + +# NETWORK livelock fix +features/all/rt/net-tx-action-avoid-livelock-on-rt.patch + +# NETWORK DEBUGGING AID +features/all/rt/ping-sysrq.patch + +# KGDB +features/all/rt/kgb-serial-hackaround.patch + +# SYSFS - RT indicator +features/all/rt/sysfs-realtime-entry.patch + +# KMAP/HIGHMEM +features/all/rt/mm-rt-kmap-atomic-scheduling.patch +features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch +features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch +features/all/rt/arm-enable-highmem-for-rt.patch + +# IPC +features/all/rt/ipc-sem-rework-semaphore-wakeups.patch + +# SYSRQ + +# KVM require constant freq TSC (smp function call -> cpufreq) +features/all/rt/x86-kvm-require-const-tsc-for-rt.patch + +# SCSI/FCOE +features/all/rt/scsi-fcoe-rt-aware.patch + +# X86 crypto +features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch +features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch + +# Device mapper +features/all/rt/dm-make-rt-aware.patch + +# ACPI +features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch + +# CPUMASK OFFSTACK +features/all/rt/cpumask-disable-offstack-on-rt.patch + +# RANDOM +features/all/rt/random-make-it-work-on-rt.patch + +# SEQLOCKS +features/all/rt/seqlock-prevent-rt-starvation.patch +features/all/rt/seqlock-consolidate-spin_lock-unlock-waiting-with-sp.patch + +# HOTPLUG +features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch +features/all/rt/cpu-rt-rework-cpu-down.patch +features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch +features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch +features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch +features/all/rt/cpu_down_move_migrate_enable_back.patch + +# SCSCI QLA2xxx +features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch + +# NET +features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch +features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch +features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch +features/all/rt/net-ip_send_unicast_reply-add-missing-local-serializ.patch +features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch +features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch +features/all/rt/net-make-devnet_rename_seq-a-mutex.patch + +# CRYPTO +features/all/rt/peterz-srcu-crypto-chain.patch + +# LOCKDEP +features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch +features/all/rt/rt-rw-lockdep-annotations.patch + +# PERF +features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch + +# SOFTIRQ +features/all/rt/softirq-preempt-fix-3-re.patch +features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch +features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch +features/all/rt/softirq-split-handling-function.patch +features/all/rt/softirq-split-locks.patch +features/all/rt/API-cleanup-use-local_lock-not-__local_lock-for-soft.patch +features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch +features/all/rt/softirq-make-migrate-disable-enable-conditioned-on-softirq_n.patch + +# RCU +features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch +features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch +features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch + +# PREEMPT LAZY +features/all/rt/preempt-lazy-support.patch +features/all/rt/x86-preempt-lazy.patch +features/all/rt/arm-preempt-lazy-support.patch +features/all/rt/powerpc-preempt-lazy-support.patch + +# LEDS +features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch + +# DRIVERS +features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch +features/all/rt/mmci-remove-bogus-irq-save.patch + +# I915 +features/all/rt/i915_compile_fix.patch +features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch + +# SIMPLE WAITQUEUE +features/all/rt/wait.h-include-atomic.h.patch +features/all/rt/wait-simple-implementation.patch +features/all/rt/wait-simple-rework-for-completions.patch +features/all/rt/simple-wait-rename-and-export-the-equivalent-of-wait.patch + +features/all/rt/treercu-use-simple-waitqueue.patch +features/all/rt/rcu-more-swait-conversions.patch + +features/all/rt/completion-use-simple-wait-queues.patch +features/all/rt/fixup_opencoded_completions.patch + +# New stuff +# Revisit: We need this in other places as well +features/all/rt/move_sched_delayed_work_to_helper.patch + +# bcache disabled +features/all/rt/md-disable-bcache.patch + +# Preempt lazy seems broken on x86-64. +features/all/rt/disable-lazy-preempt-on-x86-64.patch + +# Add RT to version +features/all/rt/localversion.patch