From 4b4fa14653414c24e3c27b00dc50703a83105ad4 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 15 Feb 2016 16:36:21 +0000 Subject: [PATCH] Update to 4.5-rc4 Drop many, many patches which went upstream. Refresh others for filename or context changes, and for removal of radeon UMS support. [rt] Disable until it is updated for 4.5 or later aufs: Update support patches to aufs4.x-rcN-20160215 --- debian/changelog | 9 +- debian/config/defines | 2 +- ...bcache-add-a-cond_resched-call-to-gc.patch | 26 - ...e-of-register-in-udev-to-avoid-devic.patch | 46 - ...fill_dirty-to-always-scan-entire-dis.patch | 91 - ...che_dev_unlink_done-flag-when-attach.patch | 109 - ...che-fix-a-leak-in-bch_cached_dev_run.patch | 32 - ...elock-when-we-cause-a-huge-number-of.patch | 67 - ...-crash-on-changing-writeback_running.patch | 32 - ...r-reboot-notifier-if-bcache-fails-to.patch | 35 - .../all/disable-some-marvell-phys.patch | 32 +- ...-redundant-log-messages-from-drivers.patch | 169 +- ...fix-crash-on-detecting-device-with-i.patch | 44 - ...er-user-amount-of-pages-allocated-in.patch | 237 -- ...-a-process-requires-mapped-uids-gids.patch | 12 +- ...uired-for-drm-and-kms-on-r600-onward.patch | 31 +- ...nish-a-td-if-we-get-a-short-transfer.patch | 36 - .../rt2x00-fix-monitor-mode-regression.patch | 149 - ...-fix-crashes-in-sd-and-sr-runtime-pm.patch | 82 - ...e-ldisc-reference-via-ioctl-tiocgetd.patch | 63 - ...-detecting-device-without-write_urbs.patch | 31 - ...ion-fix-overflow-of-interfaces-array.patch | 33 - ...rypto-sun4i-ss-add-missing-statesize.patch | 40 - ...h-emu-correctly-handle-nop-emulation.patch | 140 - ...915-shut-up-gen8-sde-irq-dmesg-noise.patch | 67 - ...-width-pitch-mismatch-on-framebuffer.patch | 60 - ...-the-Debian-memory-resource-controll.patch | 6 +- ...i-do-not-request-unreleased-firmware.patch | 4 +- debian/patches/debian/version.patch | 14 +- .../features/all/aufs4/aufs4-base.patch | 24 +- .../features/all/aufs4/aufs4-mmap.patch | 58 +- .../features/all/aufs4/aufs4-standalone.patch | 84 +- ...mory-cgroup-support-to-be-included-b.patch | 16 +- ...k-unlock-symetry-versus-pi_lock-and-.patch | 43 - ...n-translation-section-permission-fau.patch | 86 - ...intk-drop-the-logbuf_lock-more-often.patch | 77 - ...-mark-LAPIC-timer-handler-as-irqsafe.patch | 26 - ...KVM-use-simple-waitqueue-for-vcpu-wq.patch | 335 -- ...acpi_gbl_hardware-lock-back-to-a-raw.patch | 174 - .../arch-arm64-Add-lazy-preempt-support.patch | 104 - ...reempt-add-TIF_NEED_RESCHED_LAZY-to-.patch | 77 - ...ove-irq-handler-when-clock-is-unused.patch | 147 - ...-tclib-default-to-tclib-timer-for-rt.patch | 33 - .../all/rt/arm-convert-boot-lock-to-raw.patch | 408 -- .../all/rt/arm-enable-highmem-for-rt.patch | 174 - .../rt/arm-highmem-flush-tlb-on-unmap.patch | 28 - .../all/rt/arm-preempt-lazy-support.patch | 106 - .../all/rt/arm-unwind-use_raw_lock.patch | 84 - ...arm64-xen--Make-XEN-depend-on-non-rt.patch | 24 - .../rt/ata-disable-interrupts-if-non-rt.patch | 65 - ...ne-notifier-to-POST_D.patchto-POST_D.patch | 84 - .../all/rt/block-blk-mq-use-swait.patch | 115 - ...k-mq-don-t-complete-requests-via-IPI.patch | 102 - .../rt/block-mq-drop-per-ctx-cpu_lock.patch | 125 - .../rt/block-mq-drop-preempt-disable.patch | 52 - .../all/rt/block-mq-use-cpu_light.patch | 90 - ...k-shorten-interrupt-disabled-regions.patch | 97 - .../features/all/rt/block-use-cpu-chill.patch | 46 - ...-the-seq-counter-in-struct-btrfs_dev.patch | 37 - .../all/rt/bug-rt-dependend-variants.patch | 37 - ...heduling-while-atomic-in-cgroup-code.patch | 65 - ...roups-use-simple-wait-in-css_release.patch | 87 - ...source-tclib-allow-higher-clockrates.patch | 158 - .../completion-use-simple-wait-queues.patch | 225 -- .../all/rt/cond-resched-lock-rt-tweak.patch | 27 - .../all/rt/cond-resched-softirq-rt.patch | 53 - ...ument-why-PREEMPT_RT-uses-a-spinlock.patch | 56 - ...tplug-lock-a-sleeping-spinlock-on-rt.patch | 117 - .../all/rt/cpu-rt-rework-cpu-down.patch | 530 --- ...-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch | 107 - .../cpu_down_move_migrate_enable_back.patch | 53 - ...rop-K8-s-driver-from-beeing-selected.patch | 33 - .../rt/cpumask-disable-offstack-on-rt.patch | 35 - ...-preempt-disabled-regions-more-algos.patch | 242 -- .../features/all/rt/debugobjects-rt.patch | 26 - .../features/all/rt/dm-make-rt-aware.patch | 27 - ...coupled-fix-warning-cpuidle_coupled_.patch | 26 - ...s-media-vsp1_video-fix-compile-error.patch | 33 - .../drivers-net-8139-disable-irq-nosync.patch | 26 - .../rt/drivers-net-fix-livelock-issues.patch | 127 - ...rivers-net-vortex-fix-locking-issues.patch | 49 - ...andom-reduce-preempt-disabled-region.patch | 33 - .../rt/drivers-tty-fix-omap-lock-crap.patch | 43 - ...rivers-tty-pl011-irq-disable-madness.patch | 48 - ...op-trace_i915_gem_ring_dispatch-onrt.patch | 59 - ...on-t-disable-preemption-during-trace.patch | 99 - .../all/rt/epoll-use-get-cpu-light.patch | 31 - .../all/rt/fs-aio-simple-simple-work.patch | 107 - .../features/all/rt/fs-block-rt-support.patch | 23 - ...cache-use-cpu-chill-in-trylock-loops.patch | 86 - .../all/rt/fs-jbd-replace-bh_state-lock.patch | 97 - ...ull-your-plug-when-waiting-for-space.patch | 32 - .../all/rt/fs-namespace-preemption-fix.patch | 31 - .../rt/fs-ntfs-disable-interrupt-non-rt.patch | 60 - .../fs-replace-bh_uptodate_lock-for-rt.patch | 162 - .../rt/ftrace-migrate-disable-tracing.patch | 74 - .../all/rt/futex-requeue-pi-fix.patch | 114 - ...lt-affinity-mask-command-line-option.patch | 68 - .../all/rt/genirq-disable-irqpoll-on-rt.patch | 38 - ...voke-the-affinity-callback-via-a-wor.patch | 153 - .../all/rt/genirq-force-threading.patch | 49 - ...cpus_allowed_ptr-in-sync_unplug_thre.patch | 47 - .../rt/hotplug-light-get-online-cpus.patch | 205 - ...ync_unplug-no-27-5cn-27-in-task-name.patch | 25 - .../all/rt/hotplug-use-migrate-disable.patch | 40 - ...-schedule_work-call-to-helper-thread.patch | 118 - .../rt/hrtimer-enfore-64byte-alignment.patch | 28 - ...timer-callback-changes-for-preempt-r.patch | 336 -- .../rt/hrtimers-prepare-full-preemption.patch | 205 - ...on-t-ignore-threshold-module-paramet.patch | 26 - ...pdate-hwlat_detector-to-add-outer-lo.patch | 126 - ...r-Use-thread-instead-of-stop-machine.patch | 184 - ...r-Use-trace_clock_local-if-available.patch | 93 - .../patches/features/all/rt/hwlatdetect.patch | 1348 ------- ...-omap-drop-the-lock-hard-irq-context.patch | 34 - ...ng-from-i915-when-running-on-PREEMPT.patch | 30 - .../features/all/rt/i915_compile_fix.patch | 24 - .../rt/ide-use-nort-local-irq-variants.patch | 170 - .../idr-use-local-lock-for-protection.patch | 124 - .../infiniband-mellanox-ib-use-nort-irq.patch | 41 - .../rt/inpt-gameport-use-local-irq-nort.patch | 61 - .../introduce_migrate_disable_cpu_light.patch | 281 -- ...Implement-lockless-pipelined-wakeups.patch | 228 -- .../rt/ipc-sem-rework-semaphore-wakeups.patch | 70 - ...irq-processing-in-irq-thread-context.patch | 147 - ...rk-Move-irq-safe-work-to-irq-context.patch | 78 - ...-push_most_work_into_softirq_context.patch | 198 - .../features/all/rt/jump-label-rt.patch | 36 - .../rt/kconfig-disable-a-few-options-rt.patch | 34 - .../all/rt/kconfig-preempt-rt-full.patch | 59 - ...el-SRCU-provide-a-static-initializer.patch | 125 - ...pu-down-problem-if-kthread-s-cpu-is-.patch | 86 - ...restore-original-cpu-mask-oncpu-down.patch | 59 - ...-perf_cpu_context-s-timer-as-irqsafe.patch | 26 - .../all/rt/kgb-serial-hackaround.patch | 102 - .../features/all/rt/latency-hist.patch | 1817 --------- ...tency_hist-update-sched_wakeup-probe.patch | 41 - .../rt/latencyhist-disable-jump-labels.patch | 62 - ...ds-trigger-disable-CPU-trigger-on-RT.patch | 36 - .../patches/features/all/rt/lglocks-rt.patch | 200 - ..._bl.h-make-list-head-locking-RT-safe.patch | 115 - .../rt/local-irq-rt-depending-variants.patch | 53 - .../features/all/rt/localversion.patch | 14 - .../lockdep-no-softirq-accounting-on-rt.patch | 59 - ...-fix-warnings-due-to-missing-PREEMPT.patch | 142 - ...ardirq-context-test-for-raw-spinlock.patch | 57 - ...ure-Do-NOT-include-rwlock.h-directly.patch | 27 - .../features/all/rt/md-disable-bcache.patch | 32 - .../md-raid5-percpu-handling-rt-aware.patch | 62 - .../all/rt/mips-disable-highmem-on-rt.patch | 23 - .../rt/mm-bounce-local-irq-save-nort.patch | 28 - .../rt/mm-convert-swap-to-percpu-locked.patch | 135 - .../features/all/rt/mm-disable-sloub-rt.patch | 32 - .../features/all/rt/mm-enable-slub.patch | 428 -- .../all/rt/mm-make-vmstat-rt-aware.patch | 89 - ...n-t-call-schedule_work_on-in-preempt.patch | 69 - .../rt/mm-memcontrol-do_not_disable_irq.patch | 140 - ...e-alloc-use-local-lock-on-target-cpu.patch | 28 - ...e_alloc-reduce-lock-sections-further.patch | 198 - ...page_alloc-rt-friendly-per-cpu-pages.patch | 207 - .../rt/mm-protect-activate-switch-mm.patch | 72 - .../all/rt/mm-rt-kmap-atomic-scheduling.patch | 289 -- ...-scatterlist-dont-disable-irqs-on-RT.patch | 44 - .../all/rt/mm-vmalloc-use-get-cpu-light.patch | 66 - ...-not-protect-workingset_shadow_nodes.patch | 151 - .../all/rt/mmci-remove-bogus-irq-save.patch | 40 - .../move_sched_delayed_work_to_helper.patch | 89 - .../features/all/rt/mutex-no-spin-on-rt.patch | 29 - ...al-irq-disable-alloc-atomic-headache.patch | 59 - ...tplug-drain-input_pkt_queue-lockless.patch | 47 - ...-users-of-napi_alloc_cache-against-r.patch | 77 - ...ble-xt-write-recseq-begin-rt-fallout.patch | 74 - .../net-make-devnet_rename_seq-a-mutex.patch | 107 - ...recursion-to-per-task-variable-on-RT.patch | 126 - .../all/rt/net-prevent-abba-deadlock.patch | 112 - ...y-to-delegate-processing-a-softirq-t.patch | 79 - ...activate_many-use-msleep-1-instead-o.patch | 58 - .../net-tx-action-avoid-livelock-on-rt.patch | 93 - .../features/all/rt/net-use-cpu-chill.patch | 63 - .../all/rt/net-wireless-warn-nort.patch | 24 - .../features/all/rt/oleg-signal-rt-fix.patch | 143 - .../all/rt/panic-disable-random-on-rt.patch | 27 - ...ce-rcu-bh-qs-where-safe-from-softirq.patch | 109 - .../pci-access-use-__wake_up_all_locked.patch | 26 - .../all/rt/percpu_ida-use-locklocks.patch | 102 - .../perf-make-swevent-hrtimer-irqsafe.patch | 69 - .../all/rt/peter_zijlstra-frob-rcu.patch | 167 - .../all/rt/peterz-srcu-crypto-chain.patch | 183 - .../all/rt/pid.h-include-atomic.h.patch | 37 - .../patches/features/all/rt/ping-sysrq.patch | 122 - .../all/rt/posix-timers-no-broadcast.patch | 34 - ...timers-thread-posix-cpu-timers-on-rt.patch | 302 -- .../all/rt/power-disable-highmem-on-rt.patch | 23 - .../rt/power-use-generic-rwsem-on-rt.patch | 27 - ...ble-in-kernel-MPIC-emulation-for-PRE.patch | 38 - .../all/rt/powerpc-preempt-lazy-support.patch | 174 - ...ce-init.c-adapt-to-completions-using.patch | 32 - .../preempt-lazy-check-preempt_schedule.patch | 74 - .../all/rt/preempt-lazy-support.patch | 590 --- .../all/rt/preempt-nort-rt-variants.patch | 48 - ...27-boot-param-to-help-with-debugging.patch | 32 - .../patches/features/all/rt/printk-kill.patch | 164 - .../features/all/rt/printk-rt-aware.patch | 101 - ...n-IRQs-in-ptrace_freeze_traced-too-e.patch | 35 - ...ace-fix-ptrace-vs-tasklist_lock-race.patch | 152 - .../features/all/rt/radix-tree-rt-aware.patch | 74 - .../all/rt/random-make-it-work-on-rt.patch | 116 - ...nate-softirq-processing-from-rcutree.patch | 434 -- .../rt/rcu-disable-rcu-fast-no-hz-on-rt.patch | 25 - .../rt/rcu-make-RCU_BOOST-default-on-RT.patch | 35 - ...merge-rcu-bh-into-rcu-preempt-for-rt.patch | 270 -- .../all/rt/rcu-more-swait-conversions.patch | 213 - ...s-disable-irq-while-calling-rcu_pree.patch | 49 - ...ate_disable-race-with-cpu-hotplug-3f.patch | 35 - ...l-arm-coredump-fails-for-cpu-3e-3d-4.patch | 69 - .../all/rt/relay-fix-timer-madness.patch | 53 - ...function-called-from-invalid-context.patch | 96 - .../features/all/rt/rt-add-rt-locks.patch | 2173 ----------- .../all/rt/rt-introduce-cpu-chill.patch | 129 - .../features/all/rt/rt-local-irq-lock.patch | 324 -- .../all/rt/rt-preempt-base-config.patch | 54 - .../features/all/rt/rt-serial-warn-fix.patch | 38 - ...tmutex-Use-chainwalking-control-enum.patch | 28 - ...rtmutex-add-a-first-shot-of-ww_mutex.patch | 424 -- .../all/rt/rtmutex-avoid-include-hell.patch | 24 - .../all/rt/rtmutex-futex-prepare-rt.patch | 239 -- .../all/rt/rtmutex-lock-killable.patch | 52 - .../rt/rtmutex-trylock-is-okay-on-RT.patch | 28 - .../all/rt/rtmutex_dont_include_rcu.patch | 76 - ...t-t-disable-interrupts-in-qc_issue-h.patch | 79 - ...line-dl_task_timer-has-to-be-irqsafe.patch | 23 - .../all/rt/sched-delay-put-task.patch | 82 - .../sched-disable-rt-group-sched-on-rt.patch | 29 - .../all/rt/sched-disable-ttwu-queue.patch | 32 - .../all/rt/sched-limit-nr-migrate.patch | 27 - ...might-sleep-do-not-account-rcu-depth.patch | 48 - .../all/rt/sched-mmdrop-delayed.patch | 134 - ...provide-a-tsk_nr_cpus_allowed-helper.patch | 262 -- .../all/rt/sched-rt-mutex-wakeup.patch | 94 - ...twu-ensure-success-return-is-correct.patch | 35 - ...us_allowed-instead-of-accessing-cpus.patch | 58 - ...Only-wake-up-idle-workers-if-not-blo.patch | 38 - .../features/all/rt/scsi-fcoe-rt-aware.patch | 115 - ...function-called-from-invalid-context.patch | 48 - .../rt/seqlock-prevent-rt-starvation.patch | 191 - .../all/rt/signal-fix-up-rcu-wreckage.patch | 39 - .../signal-revert-ptrace-preempt-magic.patch | 32 - ...t-tasks-to-cache-one-sigqueue-struct.patch | 199 - .../features/all/rt/skbufhead-raw-lock.patch | 114 - .../rt/slub-disable-SLUB_CPU_PARTIAL.patch | 48 - .../all/rt/slub-enable-irqs-for-no-wait.patch | 48 - ...pcm_stream_lock-irqs_disabled-splats.patch | 70 - ...oftirq-disable-softirq-stacks-for-rt.patch | 157 - .../all/rt/softirq-preempt-fix-3-re.patch | 154 - .../features/all/rt/softirq-split-locks.patch | 820 ---- ...plit-timer-softirqs-out-of-ksoftirqd.patch | 208 - ...arc64-use-generic-rwsem-spinlocks-rt.patch | 28 - .../all/rt/spinlock-types-separate-raw.patch | 209 - ...eate-lg_global_trylock_relax-primiti.patch | 87 - ...e-lg_global_trylock_relax-to-dead-wi.patch | 96 - .../all/rt/stop-machine-raw-lock.patch | 193 - ...nvert-stop_machine_run-to-PREEMPT_RT.patch | 35 - ...vc_xprt_do_enqueue-use-get_cpu_light.patch | 63 - .../suspend-prevernt-might-sleep-splats.patch | 107 - .../all/rt/sysfs-realtime-entry.patch | 48 - ...-from-going-into-infinite-spin-in-rt.patch | 392 -- .../tasklist-lock-fix-section-conflict.patch | 56 - ...rmal-Defer-thermal-wakups-to-threads.patch | 133 - .../rt/timekeeping-split-jiffies-lock.patch | 157 - ...-waking-softirqs-from-the-jiffy-tick.patch | 76 - .../all/rt/timer-fd-avoid-live-lock.patch | 31 - ...id-the-base-null-otptimization-on-rt.patch | 74 - .../all/rt/timers-preempt-rt-support.patch | 55 - .../timers-prepare-for-full-preemption.patch | 150 - ...st-Consider-new-argument-when-probin.patch | 38 - ...-for-preempt-off-in-preempt_schedule.patch | 47 - ...ove-preemption-disabling-in-netif_rx.patch | 66 - .../all/rt/usb-use-_nort-in-giveback.patch | 58 - .../all/rt/user-use-local-irq-nort.patch | 30 - .../rt/vtime-split-lock-and-seqcount.patch | 206 - .../all/rt/wait-simple-implementation.patch | 363 -- .../all/rt/wait.h-include-atomic.h.patch | 33 - ...rk-around-irqsafe-timer-optimization.patch | 133 - ...mple-Simple-work-queue-implemenation.patch | 234 -- .../rt/workqueue-distangle-from-rq-lock.patch | 271 -- .../rt/workqueue-prevent-deadlock-stall.patch | 201 - .../all/rt/workqueue-use-locallock.patch | 145 - .../features/all/rt/workqueue-use-rcu.patch | 355 -- .../rt/x86-UV-raw_spinlock-conversion.patch | 245 -- ...ypto-reduce-preempt-disabled-regions.patch | 113 - ...highmem-add-a-already-used-pte-check.patch | 23 - .../all/rt/x86-io-apic-migra-no-unmask.patch | 28 - .../rt/x86-kvm-require-const-tsc-for-rt.patch | 31 - .../all/rt/x86-mce-timer-hrtimer.patch | 180 - ...-mce-use-swait-queue-for-mce-wakeups.patch | 160 - .../features/all/rt/x86-preempt-lazy.patch | 152 - ...ignal-delay-calling-signals-on-32bit.patch | 43 - .../rt/x86-stackprot-no-random-on-rt.patch | 47 - .../rt/x86-use-gen-rwsem-spinlocks-rt.patch | 29 - ...m-orion-always-use-multi_irq_handler.patch | 315 -- ...-move-watchdog-setup-to-mach-orion5x.patch | 132 - .../arm-orion-use-sparse_irq-everywhere.patch | 392 -- ...arm-orion5x-clean-up-mach-.h-headers.patch | 1020 ----- .../arm-orion5x-multiplatform-support.patch | 147 - ...a-compat-string-for-bcm2836-machine-.patch | 30 - ...devicetree-for-bcm2836-and-raspberry.patch | 156 - ...2835-add-kconfig-support-for-bcm2836.patch | 76 - ...-bcm2835-add-rpi-power-domain-driver.patch | 376 -- ...the-auxiliary-clocks-to-the-device-t.patch | 30 - ...ne-two-new-packets-from-the-latest-f.patch | 32 - ...-the-cpu-peripheral-include-out-of-c.patch | 68 - ...t-the-dt-for-peripherals-from-the-dt.patch | 440 --- ...ver-hook-for-allocating-gem-object-s.patch | 58 - .../arm/rpi/drm-vc4-add-a-bo-cache.patch | 509 --- ...pi-for-creating-gpu-shaders-in-gem-b.patch | 1165 ------ ...nterface-for-capturing-the-gpu-state.patch | 330 -- ...drm-vc4-add-create-and-map-bo-ioctls.patch | 201 - ...-vc4-add-support-for-async-pageflips.patch | 508 --- ...c4-add-support-for-drawing-3d-frames.patch | 3474 ----------------- ...-enough-memory-in-vc4_save_hang_stat.patch | 27 - ...4-bind-and-initialize-the-v3d-engine.patch | 330 -- ...user-returns-the-number-of-bytes-rem.patch | 87 - ...4-fix-a-typo-in-a-v3d-debug-register.patch | 23 - .../arm/rpi/drm-vc4-fix-an-error-code.patch | 28 - ...d-root-properties-for-raspberry-pi-2.patch | 26 - ...add-rpi-power-domain-driver-bindings.patch | 71 - ...m-bcm2835-calculate-scaler-in-config.patch | 57 - ...2835-fix-email-address-specification.patch | 24 - ...pwm-bcm2835-prevent-division-by-zero.patch | 36 - debian/patches/series | 50 - debian/patches/series-rt | 572 --- 331 files changed, 219 insertions(+), 45598 deletions(-) delete mode 100644 debian/patches/bugfix/all/bcache-add-a-cond_resched-call-to-gc.patch delete mode 100644 debian/patches/bugfix/all/bcache-allows-use-of-register-in-udev-to-avoid-devic.patch delete mode 100644 debian/patches/bugfix/all/bcache-change-refill_dirty-to-always-scan-entire-dis.patch delete mode 100644 debian/patches/bugfix/all/bcache-clear-bcache_dev_unlink_done-flag-when-attach.patch delete mode 100644 debian/patches/bugfix/all/bcache-fix-a-leak-in-bch_cached_dev_run.patch delete mode 100644 debian/patches/bugfix/all/bcache-fix-a-livelock-when-we-cause-a-huge-number-of.patch delete mode 100644 debian/patches/bugfix/all/bcache-prevent-crash-on-changing-writeback_running.patch delete mode 100644 debian/patches/bugfix/all/bcache-unregister-reboot-notifier-if-bcache-fails-to.patch delete mode 100644 debian/patches/bugfix/all/media-usbvision-fix-crash-on-detecting-device-with-i.patch delete mode 100644 debian/patches/bugfix/all/pipe-limit-the-per-user-amount-of-pages-allocated-in.patch delete mode 100644 debian/patches/bugfix/all/revert-xhci-don-t-finish-a-td-if-we-get-a-short-transfer.patch delete mode 100644 debian/patches/bugfix/all/rt2x00-fix-monitor-mode-regression.patch delete mode 100644 debian/patches/bugfix/all/scsi-fix-crashes-in-sd-and-sr-runtime-pm.patch delete mode 100644 debian/patches/bugfix/all/tty-fix-unsafe-ldisc-reference-via-ioctl-tiocgetd.patch delete mode 100644 debian/patches/bugfix/all/usb-serial-visor-fix-crash-on-detecting-device-without-write_urbs.patch delete mode 100644 debian/patches/bugfix/all/usbvision-fix-overflow-of-interfaces-array.patch delete mode 100644 debian/patches/bugfix/arm/crypto-sun4i-ss-add-missing-statesize.patch delete mode 100644 debian/patches/bugfix/mips/mips-math-emu-correctly-handle-nop-emulation.patch delete mode 100644 debian/patches/bugfix/x86/drm-i915-shut-up-gen8-sde-irq-dmesg-noise.patch delete mode 100644 debian/patches/bugfix/x86/drm-vmwgfx-fix-a-width-pitch-mismatch-on-framebuffer.patch delete mode 100644 debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch delete mode 100644 debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch delete mode 100644 debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch delete mode 100644 debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch delete mode 100644 debian/patches/features/all/rt/KVM-use-simple-waitqueue-for-vcpu-wq.patch delete mode 100644 debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch delete mode 100644 debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch delete mode 100644 debian/patches/features/all/rt/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch delete mode 100644 debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch delete mode 100644 debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch delete mode 100644 debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch delete mode 100644 debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch delete mode 100644 debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch delete mode 100644 debian/patches/features/all/rt/arm-preempt-lazy-support.patch delete mode 100644 debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch delete mode 100644 debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch delete mode 100644 debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch delete mode 100644 debian/patches/features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch delete mode 100644 debian/patches/features/all/rt/block-blk-mq-use-swait.patch delete mode 100644 debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch delete mode 100644 debian/patches/features/all/rt/block-mq-drop-per-ctx-cpu_lock.patch delete mode 100644 debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch delete mode 100644 debian/patches/features/all/rt/block-mq-use-cpu_light.patch delete mode 100644 debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch delete mode 100644 debian/patches/features/all/rt/block-use-cpu-chill.patch delete mode 100644 debian/patches/features/all/rt/btrfs-initialize-the-seq-counter-in-struct-btrfs_dev.patch delete mode 100644 debian/patches/features/all/rt/bug-rt-dependend-variants.patch delete mode 100644 debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch delete mode 100644 debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch delete mode 100644 debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch delete mode 100644 debian/patches/features/all/rt/completion-use-simple-wait-queues.patch delete mode 100644 debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch delete mode 100644 debian/patches/features/all/rt/cond-resched-softirq-rt.patch delete mode 100644 debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch delete mode 100644 debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch delete mode 100644 debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch delete mode 100644 debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch delete mode 100644 debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch delete mode 100644 debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch delete mode 100644 debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch delete mode 100644 debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch delete mode 100644 debian/patches/features/all/rt/debugobjects-rt.patch delete mode 100644 debian/patches/features/all/rt/dm-make-rt-aware.patch delete mode 100644 debian/patches/features/all/rt/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch delete mode 100644 debian/patches/features/all/rt/drivers-media-vsp1_video-fix-compile-error.patch delete mode 100644 debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch delete mode 100644 debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch delete mode 100644 debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch delete mode 100644 debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch delete mode 100644 debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch delete mode 100644 debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch delete mode 100644 debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch delete mode 100644 debian/patches/features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch delete mode 100644 debian/patches/features/all/rt/epoll-use-get-cpu-light.patch delete mode 100644 debian/patches/features/all/rt/fs-aio-simple-simple-work.patch delete mode 100644 debian/patches/features/all/rt/fs-block-rt-support.patch delete mode 100644 debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch delete mode 100644 debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch delete mode 100644 debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch delete mode 100644 debian/patches/features/all/rt/fs-namespace-preemption-fix.patch delete mode 100644 debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch delete mode 100644 debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch delete mode 100644 debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch delete mode 100644 debian/patches/features/all/rt/futex-requeue-pi-fix.patch delete mode 100644 debian/patches/features/all/rt/genirq-Add-default-affinity-mask-command-line-option.patch delete mode 100644 debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch delete mode 100644 debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch delete mode 100644 debian/patches/features/all/rt/genirq-force-threading.patch delete mode 100644 debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch delete mode 100644 debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch delete mode 100644 debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch delete mode 100644 debian/patches/features/all/rt/hotplug-use-migrate-disable.patch delete mode 100644 debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch delete mode 100644 debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch delete mode 100644 debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch delete mode 100644 debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch delete mode 100644 debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch delete mode 100644 debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch delete mode 100644 debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch delete mode 100644 debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch delete mode 100644 debian/patches/features/all/rt/hwlatdetect.patch delete mode 100644 debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch delete mode 100644 debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch delete mode 100644 debian/patches/features/all/rt/i915_compile_fix.patch delete mode 100644 debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch delete mode 100644 debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch delete mode 100644 debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch delete mode 100644 debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch delete mode 100644 debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch delete mode 100644 debian/patches/features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch delete mode 100644 debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch delete mode 100644 debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch delete mode 100644 debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch delete mode 100644 debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch delete mode 100644 debian/patches/features/all/rt/jump-label-rt.patch delete mode 100644 debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch delete mode 100644 debian/patches/features/all/rt/kconfig-preempt-rt-full.patch delete mode 100644 debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch delete mode 100644 debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch delete mode 100644 debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch delete mode 100644 debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch delete mode 100644 debian/patches/features/all/rt/kgb-serial-hackaround.patch delete mode 100644 debian/patches/features/all/rt/latency-hist.patch delete mode 100644 debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch delete mode 100644 debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch delete mode 100644 debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch delete mode 100644 debian/patches/features/all/rt/lglocks-rt.patch delete mode 100644 debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch delete mode 100644 debian/patches/features/all/rt/local-irq-rt-depending-variants.patch delete mode 100644 debian/patches/features/all/rt/localversion.patch delete mode 100644 debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch delete mode 100644 debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch delete mode 100644 debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch delete mode 100644 debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch delete mode 100644 debian/patches/features/all/rt/md-disable-bcache.patch delete mode 100644 debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch delete mode 100644 debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch delete mode 100644 debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch delete mode 100644 debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch delete mode 100644 debian/patches/features/all/rt/mm-disable-sloub-rt.patch delete mode 100644 debian/patches/features/all/rt/mm-enable-slub.patch delete mode 100644 debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch delete mode 100644 debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch delete mode 100644 debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch delete mode 100644 debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch delete mode 100644 debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch delete mode 100644 debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch delete mode 100644 debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch delete mode 100644 debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch delete mode 100644 debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch delete mode 100644 debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch delete mode 100644 debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch delete mode 100644 debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch delete mode 100644 debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch delete mode 100644 debian/patches/features/all/rt/mutex-no-spin-on-rt.patch delete mode 100644 debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch delete mode 100644 debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch delete mode 100644 debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch delete mode 100644 debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch delete mode 100644 debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch delete mode 100644 debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch delete mode 100644 debian/patches/features/all/rt/net-prevent-abba-deadlock.patch delete mode 100644 debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch delete mode 100644 debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch delete mode 100644 debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch delete mode 100644 debian/patches/features/all/rt/net-use-cpu-chill.patch delete mode 100644 debian/patches/features/all/rt/net-wireless-warn-nort.patch delete mode 100644 debian/patches/features/all/rt/oleg-signal-rt-fix.patch delete mode 100644 debian/patches/features/all/rt/panic-disable-random-on-rt.patch delete mode 100644 debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch delete mode 100644 debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch delete mode 100644 debian/patches/features/all/rt/percpu_ida-use-locklocks.patch delete mode 100644 debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch delete mode 100644 debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch delete mode 100644 debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch delete mode 100644 debian/patches/features/all/rt/pid.h-include-atomic.h.patch delete mode 100644 debian/patches/features/all/rt/ping-sysrq.patch delete mode 100644 debian/patches/features/all/rt/posix-timers-no-broadcast.patch delete mode 100644 debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch delete mode 100644 debian/patches/features/all/rt/power-disable-highmem-on-rt.patch delete mode 100644 debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch delete mode 100644 debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch delete mode 100644 debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch delete mode 100644 debian/patches/features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch delete mode 100644 debian/patches/features/all/rt/preempt-lazy-check-preempt_schedule.patch delete mode 100644 debian/patches/features/all/rt/preempt-lazy-support.patch delete mode 100644 debian/patches/features/all/rt/preempt-nort-rt-variants.patch delete mode 100644 debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch delete mode 100644 debian/patches/features/all/rt/printk-kill.patch delete mode 100644 debian/patches/features/all/rt/printk-rt-aware.patch delete mode 100644 debian/patches/features/all/rt/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch delete mode 100644 debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch delete mode 100644 debian/patches/features/all/rt/radix-tree-rt-aware.patch delete mode 100644 debian/patches/features/all/rt/random-make-it-work-on-rt.patch delete mode 100644 debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch delete mode 100644 debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch delete mode 100644 debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch delete mode 100644 debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch delete mode 100644 debian/patches/features/all/rt/rcu-more-swait-conversions.patch delete mode 100644 debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch delete mode 100644 debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch delete mode 100644 debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch delete mode 100644 debian/patches/features/all/rt/relay-fix-timer-madness.patch delete mode 100644 debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch delete mode 100644 debian/patches/features/all/rt/rt-add-rt-locks.patch delete mode 100644 debian/patches/features/all/rt/rt-introduce-cpu-chill.patch delete mode 100644 debian/patches/features/all/rt/rt-local-irq-lock.patch delete mode 100644 debian/patches/features/all/rt/rt-preempt-base-config.patch delete mode 100644 debian/patches/features/all/rt/rt-serial-warn-fix.patch delete mode 100644 debian/patches/features/all/rt/rtmutex-Use-chainwalking-control-enum.patch delete mode 100644 debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch delete mode 100644 debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch delete mode 100644 debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch delete mode 100644 debian/patches/features/all/rt/rtmutex-lock-killable.patch delete mode 100644 debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch delete mode 100644 debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch delete mode 100644 debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch delete mode 100644 debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch delete mode 100644 debian/patches/features/all/rt/sched-delay-put-task.patch delete mode 100644 debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch delete mode 100644 debian/patches/features/all/rt/sched-disable-ttwu-queue.patch delete mode 100644 debian/patches/features/all/rt/sched-limit-nr-migrate.patch delete mode 100644 debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch delete mode 100644 debian/patches/features/all/rt/sched-mmdrop-delayed.patch delete mode 100644 debian/patches/features/all/rt/sched-provide-a-tsk_nr_cpus_allowed-helper.patch delete mode 100644 debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch delete mode 100644 debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch delete mode 100644 debian/patches/features/all/rt/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch delete mode 100644 debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch delete mode 100644 debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch delete mode 100644 debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch delete mode 100644 debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch delete mode 100644 debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch delete mode 100644 debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch delete mode 100644 debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch delete mode 100644 debian/patches/features/all/rt/skbufhead-raw-lock.patch delete mode 100644 debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch delete mode 100644 debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch delete mode 100644 debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch delete mode 100644 debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch delete mode 100644 debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch delete mode 100644 debian/patches/features/all/rt/softirq-split-locks.patch delete mode 100644 debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch delete mode 100644 debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch delete mode 100644 debian/patches/features/all/rt/spinlock-types-separate-raw.patch delete mode 100644 debian/patches/features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch delete mode 100644 debian/patches/features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch delete mode 100644 debian/patches/features/all/rt/stop-machine-raw-lock.patch delete mode 100644 debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch delete mode 100644 debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch delete mode 100644 debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch delete mode 100644 debian/patches/features/all/rt/sysfs-realtime-entry.patch delete mode 100644 debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch delete mode 100644 debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch delete mode 100644 debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch delete mode 100644 debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch delete mode 100644 debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch delete mode 100644 debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch delete mode 100644 debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch delete mode 100644 debian/patches/features/all/rt/timers-preempt-rt-support.patch delete mode 100644 debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch delete mode 100644 debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch delete mode 100644 debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch delete mode 100644 debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch delete mode 100644 debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch delete mode 100644 debian/patches/features/all/rt/user-use-local-irq-nort.patch delete mode 100644 debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch delete mode 100644 debian/patches/features/all/rt/wait-simple-implementation.patch delete mode 100644 debian/patches/features/all/rt/wait.h-include-atomic.h.patch delete mode 100644 debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch delete mode 100644 debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch delete mode 100644 debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch delete mode 100644 debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch delete mode 100644 debian/patches/features/all/rt/workqueue-use-locallock.patch delete mode 100644 debian/patches/features/all/rt/workqueue-use-rcu.patch delete mode 100644 debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch delete mode 100644 debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch delete mode 100644 debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch delete mode 100644 debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch delete mode 100644 debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch delete mode 100644 debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch delete mode 100644 debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch delete mode 100644 debian/patches/features/all/rt/x86-preempt-lazy.patch delete mode 100644 debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch delete mode 100644 debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch delete mode 100644 debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch delete mode 100644 debian/patches/features/arm/arm-orion-always-use-multi_irq_handler.patch delete mode 100644 debian/patches/features/arm/arm-orion-move-watchdog-setup-to-mach-orion5x.patch delete mode 100644 debian/patches/features/arm/arm-orion-use-sparse_irq-everywhere.patch delete mode 100644 debian/patches/features/arm/arm-orion5x-clean-up-mach-.h-headers.patch delete mode 100644 debian/patches/features/arm/arm-orion5x-multiplatform-support.patch delete mode 100644 debian/patches/features/arm/rpi/arm-bcm2835-add-a-compat-string-for-bcm2836-machine-.patch delete mode 100644 debian/patches/features/arm/rpi/arm-bcm2835-add-devicetree-for-bcm2836-and-raspberry.patch delete mode 100644 debian/patches/features/arm/rpi/arm-bcm2835-add-kconfig-support-for-bcm2836.patch delete mode 100644 debian/patches/features/arm/rpi/arm-bcm2835-add-rpi-power-domain-driver.patch delete mode 100644 debian/patches/features/arm/rpi/arm-bcm2835-add-the-auxiliary-clocks-to-the-device-t.patch delete mode 100644 debian/patches/features/arm/rpi/arm-bcm2835-define-two-new-packets-from-the-latest-f.patch delete mode 100644 debian/patches/features/arm/rpi/arm-bcm2835-move-the-cpu-peripheral-include-out-of-c.patch delete mode 100644 debian/patches/features/arm/rpi/arm-bcm2835-split-the-dt-for-peripherals-from-the-dt.patch delete mode 100644 debian/patches/features/arm/rpi/drm-create-a-driver-hook-for-allocating-gem-object-s.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-add-a-bo-cache.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-add-an-api-for-creating-gpu-shaders-in-gem-b.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-add-an-interface-for-capturing-the-gpu-state.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-add-create-and-map-bo-ioctls.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-add-support-for-async-pageflips.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-add-support-for-drawing-3d-frames.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-allocate-enough-memory-in-vc4_save_hang_stat.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-bind-and-initialize-the-v3d-engine.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-copy_to_user-returns-the-number-of-bytes-rem.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-fix-a-typo-in-a-v3d-debug-register.patch delete mode 100644 debian/patches/features/arm/rpi/drm-vc4-fix-an-error-code.patch delete mode 100644 debian/patches/features/arm/rpi/dt-bindings-add-root-properties-for-raspberry-pi-2.patch delete mode 100644 debian/patches/features/arm/rpi/dt-bindings-add-rpi-power-domain-driver-bindings.patch delete mode 100644 debian/patches/features/arm/rpi/pwm-bcm2835-calculate-scaler-in-config.patch delete mode 100644 debian/patches/features/arm/rpi/pwm-bcm2835-fix-email-address-specification.patch delete mode 100644 debian/patches/features/arm/rpi/pwm-bcm2835-prevent-division-by-zero.patch delete mode 100644 debian/patches/series-rt diff --git a/debian/changelog b/debian/changelog index 8c95f2c46..38f4977e5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,7 +1,14 @@ -linux (4.4.1-1~exp2) UNRELEASED; urgency=medium +linux (4.5~rc4-1~exp1) UNRELEASED; urgency=medium + * New upstream release candidate + + [ Roger Shimizu ] * Enable TTY_PRINTK as module (Closes: #814540). + [ Ben Hutchings ] + * [rt] Disable until it is updated for 4.5 or later + * aufs: Update support patches to aufs4.x-rcN-20160215 + -- Roger Shimizu Sun, 14 Feb 2016 00:32:40 +0900 linux (4.4.1-1~exp1) experimental; urgency=medium diff --git a/debian/config/defines b/debian/config/defines index fb8a712be..ec784fd74 100644 --- a/debian/config/defines +++ b/debian/config/defines @@ -32,7 +32,7 @@ featuresets: rt [featureset-rt_base] -enabled: true +enabled: false [description] part-long-up: This kernel is not suitable for SMP (multi-processor, diff --git a/debian/patches/bugfix/all/bcache-add-a-cond_resched-call-to-gc.patch b/debian/patches/bugfix/all/bcache-add-a-cond_resched-call-to-gc.patch deleted file mode 100644 index 0580e6163..000000000 --- a/debian/patches/bugfix/all/bcache-add-a-cond_resched-call-to-gc.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Kent Overstreet -Date: Sun, 29 Nov 2015 17:18:33 -0800 -Subject: [2/8] bcache: Add a cond_resched() call to gc -Origin: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git/commit?id=c5f1e5adf956e3ba82d204c7c141a75da9fa449a - -Signed-off-by: Takashi Iwai -Tested-by: Eric Wheeler -Cc: Kent Overstreet -Cc: stable@vger.kernel.org -Signed-off-by: Jens Axboe ---- - drivers/md/bcache/btree.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c -index 4a1179c..22b9e34 100644 ---- a/drivers/md/bcache/btree.c -+++ b/drivers/md/bcache/btree.c -@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c) - do { - ret = btree_root(gc_root, c, &op, &writes, &stats); - closure_sync(&writes); -+ cond_resched(); - - if (ret && ret != -EAGAIN) - pr_warn("gc failed!"); diff --git a/debian/patches/bugfix/all/bcache-allows-use-of-register-in-udev-to-avoid-devic.patch b/debian/patches/bugfix/all/bcache-allows-use-of-register-in-udev-to-avoid-devic.patch deleted file mode 100644 index 782979770..000000000 --- a/debian/patches/bugfix/all/bcache-allows-use-of-register-in-udev-to-avoid-devic.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: Gabriel de Perthuis -Date: Sun, 29 Nov 2015 18:40:23 -0800 -Subject: [6/8] bcache: allows use of register in udev to avoid "device_busy" - error. -Origin: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git/commit?id=d7076f21629f8f329bca4a44dc408d94670f49e2 - -Allows to use register, not register_quiet in udev to avoid "device_busy" error. -The initial patch proposed at https://lkml.org/lkml/2013/8/26/549 by Gabriel de Perthuis - does not unlock the mutex and hangs the kernel. - -See http://thread.gmane.org/gmane.linux.kernel.bcache.devel/2594 for the discussion. - -Cc: Denis Bychkov -Cc: Kent Overstreet -Cc: Eric Wheeler -Cc: Gabriel de Perthuis -Cc: stable@vger.kernel.org - -Signed-off-by: Jens Axboe ---- - drivers/md/bcache/super.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c -index 18f14a2..8d0ead9 100644 ---- a/drivers/md/bcache/super.c -+++ b/drivers/md/bcache/super.c -@@ -1938,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, - else - err = "device busy"; - mutex_unlock(&bch_register_lock); -+ if (attr == &ksysfs_register_quiet) -+ goto out; - } - goto err; - } -@@ -1976,8 +1978,7 @@ out: - err_close: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); - err: -- if (attr != &ksysfs_register_quiet) -- pr_info("error opening %s: %s", path, err); -+ pr_info("error opening %s: %s", path, err); - ret = -EINVAL; - goto out; - } diff --git a/debian/patches/bugfix/all/bcache-change-refill_dirty-to-always-scan-entire-dis.patch b/debian/patches/bugfix/all/bcache-change-refill_dirty-to-always-scan-entire-dis.patch deleted file mode 100644 index 7dae0c8e9..000000000 --- a/debian/patches/bugfix/all/bcache-change-refill_dirty-to-always-scan-entire-dis.patch +++ /dev/null @@ -1,91 +0,0 @@ -From: Kent Overstreet -Date: Sun, 29 Nov 2015 18:47:01 -0800 -Subject: [8/8] bcache: Change refill_dirty() to always scan entire disk if - necessary -Origin: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git/commit?id=627ccd20b4ad3ba836472468208e2ac4dfadbf03 - -Previously, it would only scan the entire disk if it was starting from -the very start of the disk - i.e. if the previous scan got to the end. - -This was broken by refill_full_stripes(), which updates last_scanned so -that refill_dirty was never triggering the searched_from_start path. - -But if we change refill_dirty() to always scan the entire disk if -necessary, regardless of what last_scanned was, the code gets cleaner -and we fix that bug too. - -Signed-off-by: Kent Overstreet -Cc: stable@vger.kernel.org -Signed-off-by: Jens Axboe ---- - drivers/md/bcache/writeback.c | 37 ++++++++++++++++++++++++++++++------- - 1 file changed, 30 insertions(+), 7 deletions(-) - -diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c -index b23f88d..b9346cd 100644 ---- a/drivers/md/bcache/writeback.c -+++ b/drivers/md/bcache/writeback.c -@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, - - static bool dirty_pred(struct keybuf *buf, struct bkey *k) - { -+ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); -+ -+ BUG_ON(KEY_INODE(k) != dc->disk.id); -+ - return KEY_DIRTY(k); - } - -@@ -372,11 +376,24 @@ next: - } - } - -+/* -+ * Returns true if we scanned the entire disk -+ */ - static bool refill_dirty(struct cached_dev *dc) - { - struct keybuf *buf = &dc->writeback_keys; -+ struct bkey start = KEY(dc->disk.id, 0, 0); - struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); -- bool searched_from_start = false; -+ struct bkey start_pos; -+ -+ /* -+ * make sure keybuf pos is inside the range for this disk - at bringup -+ * we might not be attached yet so this disk's inode nr isn't -+ * initialized then -+ */ -+ if (bkey_cmp(&buf->last_scanned, &start) < 0 || -+ bkey_cmp(&buf->last_scanned, &end) > 0) -+ buf->last_scanned = start; - - if (dc->partial_stripes_expensive) { - refill_full_stripes(dc); -@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc) - return false; - } - -- if (bkey_cmp(&buf->last_scanned, &end) >= 0) { -- buf->last_scanned = KEY(dc->disk.id, 0, 0); -- searched_from_start = true; -- } -- -+ start_pos = buf->last_scanned; - bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); - -- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start; -+ if (bkey_cmp(&buf->last_scanned, &end) < 0) -+ return false; -+ -+ /* -+ * If we get to the end start scanning again from the beginning, and -+ * only scan up to where we initially started scanning from: -+ */ -+ buf->last_scanned = start; -+ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); -+ -+ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; - } - - static int bch_writeback_thread(void *arg) diff --git a/debian/patches/bugfix/all/bcache-clear-bcache_dev_unlink_done-flag-when-attach.patch b/debian/patches/bugfix/all/bcache-clear-bcache_dev_unlink_done-flag-when-attach.patch deleted file mode 100644 index 83e0bf81f..000000000 --- a/debian/patches/bugfix/all/bcache-clear-bcache_dev_unlink_done-flag-when-attach.patch +++ /dev/null @@ -1,109 +0,0 @@ -From: Zheng Liu -Date: Sun, 29 Nov 2015 17:19:32 -0800 -Subject: [3/8] bcache: clear BCACHE_DEV_UNLINK_DONE flag when attaching a - backing device -Origin: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git/commit?id=fecaee6f20ee122ad75402c53d8278f9bb142ddc - -This bug can be reproduced by the following script: - - #!/bin/bash - - bcache_sysfs="/sys/fs/bcache" - - function clear_cache() - { - if [ ! -e $bcache_sysfs ]; then - echo "no bcache sysfs" - exit - fi - - cset_uuid=$(ls -l $bcache_sysfs|head -n 2|tail -n 1|awk '{print $9}') - sudo sh -c "echo $cset_uuid > /sys/block/sdb/sdb1/bcache/detach" - sleep 5 - sudo sh -c "echo $cset_uuid > /sys/block/sdb/sdb1/bcache/attach" - } - - for ((i=0;i<10;i++)); do - clear_cache - done - -The warning messages look like below: -[ 275.948611] ------------[ cut here ]------------ -[ 275.963840] WARNING: at fs/sysfs/dir.c:512 sysfs_add_one+0xb8/0xd0() (Tainted: P W ---------------- ) -[ 275.979253] Hardware name: Tecal RH2285 -[ 275.994106] sysfs: cannot create duplicate filename '/devices/pci0000:00/0000:00:09.0/0000:08:00.0/host4/target4:2:1/4:2:1:0/block/sdb/sdb1/bcache/cache' -[ 276.024105] Modules linked in: bcache tcp_diag inet_diag ipmi_devintf ipmi_si ipmi_msghandler -bonding 8021q garp stp llc ipv6 ext3 jbd loop sg iomemory_vsl(P) bnx2 microcode serio_raw i2c_i801 -i2c_core iTCO_wdt iTCO_vendor_support i7core_edac edac_core shpchp ext4 jbd2 mbcache megaraid_sas -pata_acpi ata_generic ata_piix dm_mod [last unloaded: scsi_wait_scan] -[ 276.072643] Pid: 2765, comm: sh Tainted: P W --------------- 2.6.32 #1 -[ 276.089315] Call Trace: -[ 276.105801] [] ? warn_slowpath_common+0x87/0xc0 -[ 276.122650] [] ? warn_slowpath_fmt+0x46/0x50 -[ 276.139361] [] ? sysfs_add_one+0xb8/0xd0 -[ 276.156012] [] ? sysfs_do_create_link+0x12b/0x170 -[ 276.172682] [] ? sysfs_create_link+0x13/0x20 -[ 276.189282] [] ? bcache_device_link+0xc1/0x110 [bcache] -[ 276.205993] [] ? bch_cached_dev_attach+0x478/0x4f0 [bcache] -[ 276.222794] [] ? bch_cached_dev_store+0x627/0x780 [bcache] -[ 276.239680] [] ? alloc_pages_current+0xaa/0x110 -[ 276.256594] [] ? sysfs_write_file+0xe5/0x170 -[ 276.273364] [] ? vfs_write+0xb8/0x1a0 -[ 276.290133] [] ? sys_write+0x51/0x90 -[ 276.306368] [] ? system_call_fastpath+0x16/0x1b -[ 276.322301] ---[ end trace 9f5d4fcdd0c3edfb ]--- -[ 276.338241] ------------[ cut here ]------------ -[ 276.354109] WARNING: at /home/wenqing.lz/bcache/bcache/super.c:720 -bcache_device_link+0xdf/0x110 [bcache]() (Tainted: P W --------------- ) -[ 276.386017] Hardware name: Tecal RH2285 -[ 276.401430] Couldn't create device <-> cache set symlinks -[ 276.401759] Modules linked in: bcache tcp_diag inet_diag ipmi_devintf ipmi_si ipmi_msghandler -bonding 8021q garp stp llc ipv6 ext3 jbd loop sg iomemory_vsl(P) bnx2 microcode serio_raw i2c_i801 -i2c_core iTCO_wdt iTCO_vendor_support i7core_edac edac_core shpchp ext4 jbd2 mbcache megaraid_sas -pata_acpi ata_generic ata_piix dm_mod [last unloaded: scsi_wait_scan] -[ 276.465477] Pid: 2765, comm: sh Tainted: P W --------------- 2.6.32 #1 -[ 276.482169] Call Trace: -[ 276.498610] [] ? warn_slowpath_common+0x87/0xc0 -[ 276.515405] [] ? warn_slowpath_fmt+0x46/0x50 -[ 276.532059] [] ? bcache_device_link+0xdf/0x110 [bcache] -[ 276.548808] [] ? bch_cached_dev_attach+0x478/0x4f0 [bcache] -[ 276.565569] [] ? bch_cached_dev_store+0x627/0x780 [bcache] -[ 276.582418] [] ? alloc_pages_current+0xaa/0x110 -[ 276.599341] [] ? sysfs_write_file+0xe5/0x170 -[ 276.616142] [] ? vfs_write+0xb8/0x1a0 -[ 276.632607] [] ? sys_write+0x51/0x90 -[ 276.648671] [] ? system_call_fastpath+0x16/0x1b -[ 276.664756] ---[ end trace 9f5d4fcdd0c3edfc ]--- - -We forget to clear BCACHE_DEV_UNLINK_DONE flag in bcache_device_attach() -function when we attach a backing device first time. After detaching this -backing device, this flag will be true and sysfs_remove_link() isn't called in -bcache_device_unlink(). Then when we attach this backing device again, -sysfs_create_link() will return EEXIST error in bcache_device_link(). - -So the fix is trival and we clear this flag in bcache_device_link(). - -Signed-off-by: Zheng Liu -Tested-by: Joshua Schmid -Tested-by: Eric Wheeler -Cc: Kent Overstreet -Cc: stable@vger.kernel.org -Signed-off-by: Jens Axboe ---- - drivers/md/bcache/super.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c -index 679a093..383f060 100644 ---- a/drivers/md/bcache/super.c -+++ b/drivers/md/bcache/super.c -@@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c, - WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || - sysfs_create_link(&c->kobj, &d->kobj, d->name), - "Couldn't create device <-> cache set symlinks"); -+ -+ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); - } - - static void bcache_device_detach(struct bcache_device *d) diff --git a/debian/patches/bugfix/all/bcache-fix-a-leak-in-bch_cached_dev_run.patch b/debian/patches/bugfix/all/bcache-fix-a-leak-in-bch_cached_dev_run.patch deleted file mode 100644 index b287e3226..000000000 --- a/debian/patches/bugfix/all/bcache-fix-a-leak-in-bch_cached_dev_run.patch +++ /dev/null @@ -1,32 +0,0 @@ -From: Al Viro -Date: Sun, 29 Nov 2015 17:20:59 -0800 -Subject: [4/8] bcache: fix a leak in bch_cached_dev_run() -Origin: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git/commit?id=4d4d8573a8451acc9f01cbea24b7e55f04a252fe - -Signed-off-by: Al Viro -Tested-by: Joshua Schmid -Tested-by: Eric Wheeler -Cc: Kent Overstreet -Cc: stable@vger.kernel.org -Signed-off-by: Jens Axboe ---- - drivers/md/bcache/super.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c -index 383f060..43e911e 100644 ---- a/drivers/md/bcache/super.c -+++ b/drivers/md/bcache/super.c -@@ -849,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc) - buf[SB_LABEL_SIZE] = '\0'; - env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); - -- if (atomic_xchg(&dc->running, 1)) -+ if (atomic_xchg(&dc->running, 1)) { -+ kfree(env[1]); -+ kfree(env[2]); - return; -+ } - - if (!d->c && - BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { diff --git a/debian/patches/bugfix/all/bcache-fix-a-livelock-when-we-cause-a-huge-number-of.patch b/debian/patches/bugfix/all/bcache-fix-a-livelock-when-we-cause-a-huge-number-of.patch deleted file mode 100644 index 88a52f7b1..000000000 --- a/debian/patches/bugfix/all/bcache-fix-a-livelock-when-we-cause-a-huge-number-of.patch +++ /dev/null @@ -1,67 +0,0 @@ -From: Zheng Liu -Date: Sun, 29 Nov 2015 17:17:05 -0800 -Subject: [1/8] bcache: fix a livelock when we cause a huge number of cache - misses -Origin: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git/commit?id=2ef9ccbfcb90cf84bdba320a571b18b05c41101b - -Subject : [PATCH v2] bcache: fix a livelock in btree lock -Date : Wed, 25 Feb 2015 20:32:09 +0800 (02/25/2015 04:32:09 AM) - -This commit tries to fix a livelock in bcache. This livelock might -happen when we causes a huge number of cache misses simultaneously. - -When we get a cache miss, bcache will execute the following path. - -->cached_dev_make_request() - ->cached_dev_read() - ->cached_lookup() - ->bch->btree_map_keys() - ->btree_root() <------------------------ - ->bch_btree_map_keys_recurse() | - ->cache_lookup_fn() | - ->cached_dev_cache_miss() | - ->bch_btree_insert_check_key() -| - [If btree->seq is not equal to seq + 1, we should return - EINTR and traverse btree again.] - -In bch_btree_insert_check_key() function we first need to check upgrade -flag (op->lock == -1), and when this flag is true we need to release -read btree->lock and try to take write btree->lock. During taking and -releasing this write lock, btree->seq will be monotone increased in -order to prevent other threads modify this in cache miss (see btree.h:74). -But if there are some cache misses caused by some requested, we could -meet a livelock because btree->seq is always changed by others. Thus no -one can make progress. - -This commit will try to take write btree->lock if it encounters a race -when we traverse btree. Although it sacrifice the scalability but we -can ensure that only one can modify the btree. - -Signed-off-by: Zheng Liu -Tested-by: Joshua Schmid -Tested-by: Eric Wheeler -Cc: Joshua Schmid -Cc: Zhu Yanhai -Cc: Kent Overstreet -Cc: stable@vger.kernel.org -Signed-off-by: Jens Axboe ---- - drivers/md/bcache/btree.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c -index 83392f8..4a1179c 100644 ---- a/drivers/md/bcache/btree.c -+++ b/drivers/md/bcache/btree.c -@@ -2162,8 +2162,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, - rw_lock(true, b, b->level); - - if (b->key.ptr[0] != btree_ptr || -- b->seq != seq + 1) -+ b->seq != seq + 1) { -+ op->lock = b->level; - goto out; -+ } - } - - SET_KEY_PTRS(check_key, 1); diff --git a/debian/patches/bugfix/all/bcache-prevent-crash-on-changing-writeback_running.patch b/debian/patches/bugfix/all/bcache-prevent-crash-on-changing-writeback_running.patch deleted file mode 100644 index 5d4736fe6..000000000 --- a/debian/patches/bugfix/all/bcache-prevent-crash-on-changing-writeback_running.patch +++ /dev/null @@ -1,32 +0,0 @@ -From: Stefan Bader -Date: Sun, 29 Nov 2015 18:44:49 -0800 -Subject: [7/8] bcache: prevent crash on changing writeback_running -Origin: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git/commit?id=8d16ce540c94c9d366eb36fc91b7154d92d6397b - -Added a safeguard in the shutdown case. At least while not being -attached it is also possible to trigger a kernel bug by writing into -writeback_running. This change adds the same check before trying to -wake up the thread for that case. - -Signed-off-by: Stefan Bader -Cc: Kent Overstreet -Cc: stable@vger.kernel.org -Signed-off-by: Jens Axboe ---- - drivers/md/bcache/writeback.h | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h -index 0a9dab1..073a042 100644 ---- a/drivers/md/bcache/writeback.h -+++ b/drivers/md/bcache/writeback.h -@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, - - static inline void bch_writeback_queue(struct cached_dev *dc) - { -- wake_up_process(dc->writeback_thread); -+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) -+ wake_up_process(dc->writeback_thread); - } - - static inline void bch_writeback_add(struct cached_dev *dc) diff --git a/debian/patches/bugfix/all/bcache-unregister-reboot-notifier-if-bcache-fails-to.patch b/debian/patches/bugfix/all/bcache-unregister-reboot-notifier-if-bcache-fails-to.patch deleted file mode 100644 index 538f34b02..000000000 --- a/debian/patches/bugfix/all/bcache-unregister-reboot-notifier-if-bcache-fails-to.patch +++ /dev/null @@ -1,35 +0,0 @@ -From: Zheng Liu -Date: Sun, 29 Nov 2015 17:21:57 -0800 -Subject: [5/8] bcache: unregister reboot notifier if bcache fails to - unregister device -Origin: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git/commit?id=2ecf0cdb2b437402110ab57546e02abfa68a716b - -In bcache_init() function it forgot to unregister reboot notifier if -bcache fails to unregister a block device. This commit fixes this. - -Signed-off-by: Zheng Liu -Tested-by: Joshua Schmid -Tested-by: Eric Wheeler -Cc: Kent Overstreet -Cc: stable@vger.kernel.org -Signed-off-by: Jens Axboe ---- - drivers/md/bcache/super.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c -index 43e911e..18f14a2 100644 ---- a/drivers/md/bcache/super.c -+++ b/drivers/md/bcache/super.c -@@ -2071,8 +2071,10 @@ static int __init bcache_init(void) - closure_debug_init(); - - bcache_major = register_blkdev(0, "bcache"); -- if (bcache_major < 0) -+ if (bcache_major < 0) { -+ unregister_reboot_notifier(&reboot); - return bcache_major; -+ } - - if (!(bcache_wq = create_workqueue("bcache")) || - !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || diff --git a/debian/patches/bugfix/all/disable-some-marvell-phys.patch b/debian/patches/bugfix/all/disable-some-marvell-phys.patch index 5b30bbccd..f6c772777 100644 --- a/debian/patches/bugfix/all/disable-some-marvell-phys.patch +++ b/debian/patches/bugfix/all/disable-some-marvell-phys.patch @@ -16,7 +16,7 @@ correctness. --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c -@@ -591,6 +591,7 @@ static int m88e1118_config_init(struct p +@@ -681,6 +681,7 @@ static int m88e1118_config_init(struct p return phy_write(phydev, MII_BMCR, BMCR_RESET); } @@ -24,7 +24,7 @@ correctness. static int m88e1149_config_init(struct phy_device *phydev) { int err; -@@ -616,7 +617,9 @@ static int m88e1149_config_init(struct p +@@ -706,7 +707,9 @@ static int m88e1149_config_init(struct p return phy_write(phydev, MII_BMCR, BMCR_RESET); } @@ -34,7 +34,7 @@ correctness. static int m88e1145_config_init(struct phy_device *phydev) { int err; -@@ -682,6 +685,7 @@ static int m88e1145_config_init(struct p +@@ -787,6 +790,7 @@ static int m88e1145_config_init(struct p return 0; } @@ -42,41 +42,41 @@ correctness. /* marvell_read_status * -@@ -975,6 +979,7 @@ static struct phy_driver marvell_drivers - .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, +@@ -1180,6 +1184,7 @@ static struct phy_driver marvell_drivers + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, +#if 0 { .phy_id = MARVELL_PHY_ID_88E1145, .phy_id_mask = MARVELL_PHY_ID_MASK, -@@ -990,6 +995,8 @@ static struct phy_driver marvell_drivers - .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, +@@ -1198,6 +1203,8 @@ static struct phy_driver marvell_drivers + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, +#endif +#if 0 { .phy_id = MARVELL_PHY_ID_88E1149R, .phy_id_mask = MARVELL_PHY_ID_MASK, -@@ -1005,6 +1012,8 @@ static struct phy_driver marvell_drivers - .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, +@@ -1216,6 +1223,8 @@ static struct phy_driver marvell_drivers + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, +#endif +#if 0 { .phy_id = MARVELL_PHY_ID_88E1240, .phy_id_mask = MARVELL_PHY_ID_MASK, -@@ -1020,6 +1029,7 @@ static struct phy_driver marvell_drivers - .suspend = &genphy_suspend, - .driver = { .owner = THIS_MODULE }, +@@ -1234,6 +1243,7 @@ static struct phy_driver marvell_drivers + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, }, +#endif { .phy_id = MARVELL_PHY_ID_88E1116R, .phy_id_mask = MARVELL_PHY_ID_MASK, -@@ -1073,9 +1083,9 @@ static struct mdio_device_id __maybe_unu +@@ -1318,9 +1328,9 @@ static struct mdio_device_id __maybe_unu { MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1118, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1121R, MARVELL_PHY_ID_MASK }, diff --git a/debian/patches/bugfix/all/firmware-remove-redundant-log-messages-from-drivers.patch b/debian/patches/bugfix/all/firmware-remove-redundant-log-messages-from-drivers.patch index 123c29da6..2e2ffa42a 100644 --- a/debian/patches/bugfix/all/firmware-remove-redundant-log-messages-from-drivers.patch +++ b/debian/patches/bugfix/all/firmware-remove-redundant-log-messages-from-drivers.patch @@ -122,7 +122,7 @@ upstream submission. release_firmware(firmware); --- a/drivers/bluetooth/bcm203x.c +++ b/drivers/bluetooth/bcm203x.c -@@ -193,7 +193,6 @@ static int bcm203x_probe(struct usb_inte +@@ -191,7 +191,6 @@ static int bcm203x_probe(struct usb_inte } if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) { @@ -130,7 +130,7 @@ upstream submission. usb_free_urb(data->urb); return -EIO; } -@@ -218,7 +217,6 @@ static int bcm203x_probe(struct usb_inte +@@ -216,7 +215,6 @@ static int bcm203x_probe(struct usb_inte release_firmware(firmware); if (request_firmware(&firmware, "BCM2033-FW.bin", &udev->dev) < 0) { @@ -140,7 +140,7 @@ upstream submission. return -EIO; --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c -@@ -653,10 +653,8 @@ static int bfusb_probe(struct usb_interf +@@ -652,10 +652,8 @@ static int bfusb_probe(struct usb_interf skb_queue_head_init(&data->pending_q); skb_queue_head_init(&data->completed_q); @@ -154,7 +154,7 @@ upstream submission. --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c -@@ -565,10 +565,8 @@ static int bt3c_open(struct bt3c_info *i +@@ -566,10 +566,8 @@ static int bt3c_open(struct bt3c_info *i /* Load firmware */ err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev); @@ -233,7 +233,7 @@ upstream submission. where = 0; --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c -@@ -1646,10 +1646,8 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, co +@@ -1736,10 +1736,8 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, co snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); ret = request_firmware(&fw, f, device->dev); @@ -300,33 +300,6 @@ upstream submission. release_firmware(rdev->pfp_fw); rdev->pfp_fw = NULL; release_firmware(rdev->me_fw); ---- a/drivers/gpu/drm/radeon/r600_cp.c -+++ b/drivers/gpu/drm/radeon/r600_cp.c -@@ -376,10 +376,6 @@ out: - platform_device_unregister(pdev); - - if (err) { -- if (err != -EINVAL) -- printk(KERN_ERR -- "r600_cp: Failed to load firmware \"%s\"\n", -- fw_name); - release_firmware(dev_priv->pfp_fw); - dev_priv->pfp_fw = NULL; - release_firmware(dev_priv->me_fw); ---- a/drivers/gpu/drm/radeon/radeon_cp.c -+++ b/drivers/gpu/drm/radeon/radeon_cp.c -@@ -530,10 +530,7 @@ static int radeon_cp_init_microcode(drm_ - - err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev); - platform_device_unregister(pdev); -- if (err) { -- printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", -- fw_name); -- } else if (dev_priv->me_fw->size % 8) { -+ if (err == 0 && dev_priv->me_fw->size % 8) { - printk(KERN_ERR - "radeon_cp: Bogus length %zu in firmware \"%s\"\n", - dev_priv->me_fw->size, fw_name); --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -406,10 +406,8 @@ int qib_sd7220_init(struct qib_devdata * @@ -343,7 +316,7 @@ upstream submission. ret = qib_ibsd_ucode_loaded(dd->pport, fw); --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c -@@ -2209,10 +2209,8 @@ static int mxt_load_fw(struct device *de +@@ -2193,10 +2193,8 @@ static int mxt_load_fw(struct device *de int ret; ret = request_firmware(&fw, fn, dev); @@ -754,7 +727,7 @@ upstream submission. - printk(KERN_ERR "dvb-ttpci: and can be downloaded from" + if (ret == -ENOENT) + printk(KERN_ERR "dvb-ttpci: firmware can be downloaded from" - " http://www.linuxtv.org/download/dvb/firmware/\n"); + " https://linuxtv.org/download/dvb/firmware/\n"); - } else - printk(KERN_ERR "dvb-ttpci: cannot request firmware" - " (error %i)\n", ret); @@ -961,7 +934,7 @@ upstream submission. printk(KERN_ERR "ERROR: Firmware size mismatch " --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c -@@ -2165,11 +2165,7 @@ void cx23885_card_setup(struct cx23885_d +@@ -2279,11 +2279,7 @@ void cx23885_card_setup(struct cx23885_d cinfo.rev, filename); ret = request_firmware(&fw, filename, &dev->pci->dev); @@ -1216,7 +1189,7 @@ upstream submission. if (bp->mips_firmware->size < sizeof(*mips_fw) || --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c -@@ -13401,11 +13401,8 @@ static int bnx2x_init_firmware(struct bn +@@ -13398,11 +13398,8 @@ static int bnx2x_init_firmware(struct bn BNX2X_DEV_INFO("Loading %s\n", fw_file_name); rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); @@ -1231,7 +1204,7 @@ upstream submission. if (rc) { --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c -@@ -11335,11 +11335,8 @@ static int tg3_request_firmware(struct t +@@ -11346,11 +11346,8 @@ static int tg3_request_firmware(struct t { const struct tg3_firmware_hdr *fw_hdr; @@ -1260,7 +1233,7 @@ upstream submission. *bfi_image_size = fw->size/sizeof(u32); --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c -@@ -1034,12 +1034,8 @@ int t3_get_edc_fw(struct cphy *phy, int +@@ -1036,12 +1036,8 @@ int t3_get_edc_fw(struct cphy *phy, int fw_name = get_edc_fw_name(edc_idx); if (fw_name) ret = request_firmware(&fw, fw_name, &adapter->pdev->dev); @@ -1274,7 +1247,7 @@ upstream submission. /* check size, take checksum in account */ if (fw->size > size + 4) { -@@ -1076,11 +1072,8 @@ static int upgrade_fw(struct adapter *ad +@@ -1078,11 +1074,8 @@ static int upgrade_fw(struct adapter *ad struct device *dev = &adap->pdev->dev; ret = request_firmware(&fw, FW_FNAME, dev); @@ -1287,7 +1260,7 @@ upstream submission. ret = t3_load_fw(adap, fw->data, fw->size); release_firmware(fw); -@@ -1125,11 +1118,8 @@ static int update_tpsram(struct adapter +@@ -1127,11 +1120,8 @@ static int update_tpsram(struct adapter snprintf(buf, sizeof(buf), TPSRAM_NAME, rev); ret = request_firmware(&tpsram, buf, dev); @@ -1405,8 +1378,8 @@ upstream submission. kfree(i2400m_fw); i2400m_fw = (void *) ~0; } else ---- a/drivers/net/wireless/at76c50x-usb.c -+++ b/drivers/net/wireless/at76c50x-usb.c +--- a/drivers/net/wireless/atmel/at76c50x-usb.c ++++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -1622,13 +1622,8 @@ static struct fwentry *at76_load_firmwar at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname); @@ -1444,8 +1417,8 @@ upstream submission. carl9170_usb_firmware_failed(ar); } ---- a/drivers/net/wireless/atmel.c -+++ b/drivers/net/wireless/atmel.c +--- a/drivers/net/wireless/atmel/atmel.c ++++ b/drivers/net/wireless/atmel/atmel.c @@ -3917,12 +3917,8 @@ static int reset_atmel_card(struct net_d strcpy(priv->firmware_id, "atmel_at76c502.bin"); } @@ -1460,8 +1433,8 @@ upstream submission. } else { int fw_index = 0; int success = 0; ---- a/drivers/net/wireless/b43/main.c -+++ b/drivers/net/wireless/b43/main.c +--- a/drivers/net/wireless/broadcom/b43/main.c ++++ b/drivers/net/wireless/broadcom/b43/main.c @@ -2253,19 +2253,8 @@ int b43_do_request_fw(struct b43_request } err = request_firmware(&ctx->blob, ctx->fwname, @@ -1483,8 +1456,8 @@ upstream submission. fw_ready: if (ctx->blob->size < sizeof(struct b43_fw_header)) goto err_format; ---- a/drivers/net/wireless/b43legacy/main.c -+++ b/drivers/net/wireless/b43legacy/main.c +--- a/drivers/net/wireless/broadcom/b43legacy/main.c ++++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -1554,11 +1554,8 @@ static int do_request_fw(struct b43legac } else { err = request_firmware(fw, path, dev->dev->dev); @@ -1498,8 +1471,8 @@ upstream submission. if ((*fw)->size < sizeof(struct b43legacy_fw_header)) goto err_format; hdr = (struct b43legacy_fw_header *)((*fw)->data); ---- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c -+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -378,19 +378,13 @@ static int brcms_request_fw(struct brcms sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], UCODE_LOADER_API_VER); @@ -1522,9 +1495,9 @@ upstream submission. wl->fw.hdr_num_entries[i] = wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); } ---- a/drivers/net/wireless/ipw2x00/ipw2100.c -+++ b/drivers/net/wireless/ipw2x00/ipw2100.c -@@ -8411,12 +8411,8 @@ static int ipw2100_get_firmware(struct i +--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c ++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c +@@ -8418,12 +8418,8 @@ static int ipw2100_get_firmware(struct i rc = request_firmware(&fw->fw_entry, fw_name, &priv->pci_dev->dev); @@ -1538,8 +1511,8 @@ upstream submission. IPW_DEBUG_INFO("firmware data %p size %zd\n", fw->fw_entry->data, fw->fw_entry->size); ---- a/drivers/net/wireless/ipw2x00/ipw2200.c -+++ b/drivers/net/wireless/ipw2x00/ipw2200.c +--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c ++++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3418,10 +3418,8 @@ static int ipw_get_fw(struct ipw_priv *p /* ask firmware_class module to get the boot firmware off disk */ @@ -1552,8 +1525,8 @@ upstream submission. if ((*raw)->size < sizeof(*fw)) { IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size); ---- a/drivers/net/wireless/iwlegacy/3945-mac.c -+++ b/drivers/net/wireless/iwlegacy/3945-mac.c +--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c ++++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -1861,7 +1861,6 @@ il3945_read_ucode(struct il_priv *il) sprintf(buf, "%s%u%s", name_pre, idx, ".ucode"); ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev); @@ -1562,9 +1535,9 @@ upstream submission. if (ret == -ENOENT) continue; else ---- a/drivers/net/wireless/iwlwifi/iwl-drv.c -+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c -@@ -1201,13 +1201,8 @@ static void iwl_req_fw_callback(const st +--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +@@ -1206,13 +1206,8 @@ static void iwl_req_fw_callback(const st if (!pieces) return; @@ -1579,8 +1552,8 @@ upstream submission. IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", drv->firmware_name, ucode_raw->size); ---- a/drivers/net/wireless/libertas_tf/if_usb.c -+++ b/drivers/net/wireless/libertas_tf/if_usb.c +--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c ++++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -824,8 +824,6 @@ static int if_usb_prog_firmware(struct i kernel_param_lock(THIS_MODULE); ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev); @@ -1590,8 +1563,8 @@ upstream submission. kernel_param_unlock(THIS_MODULE); goto done; } ---- a/drivers/net/wireless/mwifiex/main.c -+++ b/drivers/net/wireless/mwifiex/main.c +--- a/drivers/net/wireless/marvell/mwifiex/main.c ++++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -508,11 +508,8 @@ static void mwifiex_fw_dpc(const struct bool init_failed = false; struct wireless_dev *wdev; @@ -1605,8 +1578,8 @@ upstream submission. memset(&fw, 0, sizeof(struct mwifiex_fw_image)); adapter->firmware = firmware; ---- a/drivers/net/wireless/mwl8k.c -+++ b/drivers/net/wireless/mwl8k.c +--- a/drivers/net/wireless/marvell/mwl8k.c ++++ b/drivers/net/wireless/marvell/mwl8k.c @@ -5712,16 +5712,12 @@ static int mwl8k_firmware_load_success(s static void mwl8k_fw_state_machine(const struct firmware *fw, void *context) { @@ -1650,8 +1623,8 @@ upstream submission. if (nowait) return rc; ---- a/drivers/net/wireless/orinoco/fw.c -+++ b/drivers/net/wireless/orinoco/fw.c +--- a/drivers/net/wireless/intersil/orinoco/fw.c ++++ b/drivers/net/wireless/intersil/orinoco/fw.c @@ -132,7 +132,6 @@ orinoco_dl_firmware(struct orinoco_priva err = request_firmware(&fw_entry, firmware, priv->dev); @@ -1684,8 +1657,8 @@ upstream submission. } else fw_entry = orinoco_cached_fw_get(priv, false); ---- a/drivers/net/wireless/orinoco/orinoco_usb.c -+++ b/drivers/net/wireless/orinoco/orinoco_usb.c +--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c ++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -1669,7 +1669,6 @@ static int ezusb_probe(struct usb_interf if (ezusb_firmware_download(upriv, &firmware) < 0) goto error; @@ -1694,8 +1667,8 @@ upstream submission. goto error; } ---- a/drivers/net/wireless/p54/p54pci.c -+++ b/drivers/net/wireless/p54/p54pci.c +--- a/drivers/net/wireless/intersil/p54/p54pci.c ++++ b/drivers/net/wireless/intersil/p54/p54pci.c @@ -499,7 +499,6 @@ static void p54p_firmware_step2(const st int err; @@ -1704,8 +1677,8 @@ upstream submission. err = -ENOENT; goto out; } ---- a/drivers/net/wireless/p54/p54spi.c -+++ b/drivers/net/wireless/p54/p54spi.c +--- a/drivers/net/wireless/intersil/p54/p54spi.c ++++ b/drivers/net/wireless/intersil/p54/p54spi.c @@ -170,10 +170,8 @@ static int p54spi_request_firmware(struc /* FIXME: should driver use it's own struct device? */ ret = request_firmware(&priv->firmware, "3826.arm", &priv->spi->dev); @@ -1718,8 +1691,8 @@ upstream submission. ret = p54_parse_firmware(dev, priv->firmware); if (ret) { ---- a/drivers/net/wireless/p54/p54usb.c -+++ b/drivers/net/wireless/p54/p54usb.c +--- a/drivers/net/wireless/intersil/p54/p54usb.c ++++ b/drivers/net/wireless/intersil/p54/p54usb.c @@ -929,7 +929,6 @@ static void p54u_load_firmware_cb(const err = p54u_start_ops(priv); } else { @@ -1728,8 +1701,8 @@ upstream submission. } if (err) { ---- a/drivers/net/wireless/prism54/islpci_dev.c -+++ b/drivers/net/wireless/prism54/islpci_dev.c +--- a/drivers/net/wireless/intersil/prism54/islpci_dev.c ++++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c @@ -92,12 +92,9 @@ isl_upload_firmware(islpci_private *priv const u32 *fw_ptr; @@ -1745,8 +1718,8 @@ upstream submission. /* prepare the Direct Memory Base register */ reg = ISL38XX_DEV_FIRMWARE_ADDRES; ---- a/drivers/net/wireless/rt2x00/rt2x00firmware.c -+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00firmware.c ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00firmware.c @@ -49,10 +49,8 @@ static int rt2x00lib_request_firmware(st rt2x00_info(rt2x00dev, "Loading firmware file '%s'\n", fw_name); @@ -1807,7 +1780,7 @@ upstream submission. wl1251_error("nvs size is not multiple of 32 bits: %zu", --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c -@@ -762,10 +762,8 @@ static int wl12xx_fetch_firmware(struct +@@ -747,10 +747,8 @@ static int wl12xx_fetch_firmware(struct ret = request_firmware(&fw, fw_name, wl->dev); @@ -1819,8 +1792,8 @@ upstream submission. if (fw->size % 4) { wl1271_error("firmware size is not multiple of 32 bits: %zu", ---- a/drivers/net/wireless/zd1201.c -+++ b/drivers/net/wireless/zd1201.c +--- a/drivers/net/wireless/zydas/zd1201.c ++++ b/drivers/net/wireless/zydas/zd1201.c @@ -65,8 +65,6 @@ static int zd1201_fw_upload(struct usb_d err = request_firmware(&fw_entry, fwfile, &dev->dev); @@ -1830,8 +1803,8 @@ upstream submission. dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info.\n"); return err; } ---- a/drivers/net/wireless/zd1211rw/zd_usb.c -+++ b/drivers/net/wireless/zd1211rw/zd_usb.c +--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c ++++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -120,16 +120,9 @@ static void int_urb_complete(struct urb static int request_fw_file( const struct firmware **fw, const char *name, struct device *device) @@ -1916,7 +1889,7 @@ upstream submission. if (err) { --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c -@@ -1762,7 +1762,6 @@ bfad_read_firmware(struct pci_dev *pdev, +@@ -1758,7 +1758,6 @@ bfad_read_firmware(struct pci_dev *pdev, const struct firmware *fw; if (request_firmware(&fw, fw_name, &pdev->dev)) { @@ -1926,9 +1899,9 @@ upstream submission. } --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c -@@ -4011,10 +4011,8 @@ static ssize_t ipr_store_update_fw(struc - len = snprintf(fname, 99, "%s", buf); - fname[len-1] = '\0'; +@@ -4009,10 +4009,8 @@ static ssize_t ipr_store_update_fw(struc + + snprintf(fname, sizeof(fname), "%s", buf); - if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { - dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); @@ -1964,7 +1937,7 @@ upstream submission. } --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c -@@ -5520,8 +5520,6 @@ qla2x00_load_risc(scsi_qla_host_t *vha, +@@ -5540,8 +5540,6 @@ qla2x00_load_risc(scsi_qla_host_t *vha, /* Load firmware blob. */ blob = qla2x00_request_firmware(vha); if (!blob) { @@ -1973,7 +1946,7 @@ upstream submission. ql_log(ql_log_info, vha, 0x0084, "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); return QLA_FUNCTION_FAILED; -@@ -5623,8 +5621,6 @@ qla24xx_load_risc_blob(scsi_qla_host_t * +@@ -5643,8 +5641,6 @@ qla24xx_load_risc_blob(scsi_qla_host_t * /* Load firmware blob. */ blob = qla2x00_request_firmware(vha); if (!blob) { @@ -1999,7 +1972,7 @@ upstream submission. if (qla82xx_validate_firmware_blob(vha, --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c -@@ -5364,8 +5364,6 @@ qla2x00_request_firmware(scsi_qla_host_t +@@ -5529,8 +5529,6 @@ qla2x00_request_firmware(scsi_qla_host_t goto out; if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { @@ -2148,7 +2121,7 @@ upstream submission. for (i = 0; i < numsects; i++) { --- a/drivers/staging/vt6656/firmware.c +++ b/drivers/staging/vt6656/firmware.c -@@ -53,11 +53,8 @@ int vnt_download_firmware(struct vnt_pri +@@ -49,11 +49,8 @@ int vnt_download_firmware(struct vnt_pri dev_dbg(dev, "---->Download firmware\n"); rc = request_firmware(&fw, FIRMWARE_NAME, dev); @@ -2178,7 +2151,7 @@ upstream submission. positive, skip this board */ --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c -@@ -867,13 +867,8 @@ static int moxa_init_board(struct moxa_b +@@ -866,13 +866,8 @@ static int moxa_init_board(struct moxa_b } ret = request_firmware(&fw, file, dev); @@ -2195,7 +2168,7 @@ upstream submission. --- a/drivers/tty/serial/icom.c +++ b/drivers/tty/serial/icom.c -@@ -375,7 +375,6 @@ static void load_code(struct icom_port * +@@ -374,7 +374,6 @@ static void load_code(struct icom_port * /* Load Call Setup into Adapter */ if (request_firmware(&fw, "icom_call_setup.bin", &dev->dev) < 0) { @@ -2203,7 +2176,7 @@ upstream submission. status = -1; goto load_code_exit; } -@@ -395,7 +394,6 @@ static void load_code(struct icom_port * +@@ -394,7 +393,6 @@ static void load_code(struct icom_port * /* Load Resident DCE portion of Adapter */ if (request_firmware(&fw, "icom_res_dce.bin", &dev->dev) < 0) { @@ -2211,7 +2184,7 @@ upstream submission. status = -1; goto load_code_exit; } -@@ -440,7 +438,6 @@ static void load_code(struct icom_port * +@@ -439,7 +437,6 @@ static void load_code(struct icom_port * } if (request_firmware(&fw, "icom_asc.bin", &dev->dev) < 0) { @@ -2560,7 +2533,7 @@ upstream submission. filename, emu->firmware->size); --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c -@@ -1738,10 +1738,8 @@ static void azx_firmware_cb(const struct +@@ -1831,10 +1831,8 @@ static void azx_firmware_cb(const struct struct azx *chip = card->private_data; struct pci_dev *pci = chip->pci; diff --git a/debian/patches/bugfix/all/media-usbvision-fix-crash-on-detecting-device-with-i.patch b/debian/patches/bugfix/all/media-usbvision-fix-crash-on-detecting-device-with-i.patch deleted file mode 100644 index ee4365b9f..000000000 --- a/debian/patches/bugfix/all/media-usbvision-fix-crash-on-detecting-device-with-i.patch +++ /dev/null @@ -1,44 +0,0 @@ -From: Vladis Dronov -Date: Mon, 16 Nov 2015 15:55:11 -0200 -Subject: [media] usbvision: fix crash on detecting device with invalid - configuration -Origin: http://git.linuxtv.org/cgit.cgi/media_tree.git/commit?id=fa52bd506f274b7619955917abfde355e3d19ffe - -The usbvision driver crashes when a specially crafted usb device with invalid -number of interfaces or endpoints is detected. This fix adds checks that the -device has proper configuration expected by the driver. - -Reported-by: Ralf Spenneberg -Signed-off-by: Vladis Dronov -Signed-off-by: Mauro Carvalho Chehab ---- - drivers/media/usb/usbvision/usbvision-video.c | 16 +++++++++++++++- - 1 file changed, 15 insertions(+), 1 deletion(-) - ---- a/drivers/media/usb/usbvision/usbvision-video.c -+++ b/drivers/media/usb/usbvision/usbvision-video.c -@@ -1542,9 +1542,23 @@ static int usbvision_probe(struct usb_in - - if (usbvision_device_data[model].interface >= 0) - interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; -- else -+ else if (ifnum < dev->actconfig->desc.bNumInterfaces) - interface = &dev->actconfig->interface[ifnum]->altsetting[0]; -+ else { -+ dev_err(&intf->dev, "interface %d is invalid, max is %d\n", -+ ifnum, dev->actconfig->desc.bNumInterfaces - 1); -+ ret = -ENODEV; -+ goto err_usb; -+ } -+ -+ if (interface->desc.bNumEndpoints < 2) { -+ dev_err(&intf->dev, "interface %d has %d endpoints, but must" -+ " have minimum 2\n", ifnum, interface->desc.bNumEndpoints); -+ ret = -ENODEV; -+ goto err_usb; -+ } - endpoint = &interface->endpoint[1].desc; -+ - if (!usb_endpoint_xfer_isoc(endpoint)) { - dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n", - __func__, ifnum); diff --git a/debian/patches/bugfix/all/pipe-limit-the-per-user-amount-of-pages-allocated-in.patch b/debian/patches/bugfix/all/pipe-limit-the-per-user-amount-of-pages-allocated-in.patch deleted file mode 100644 index 149818968..000000000 --- a/debian/patches/bugfix/all/pipe-limit-the-per-user-amount-of-pages-allocated-in.patch +++ /dev/null @@ -1,237 +0,0 @@ -From: Willy Tarreau -Date: Mon, 18 Jan 2016 16:36:09 +0100 -Subject: pipe: limit the per-user amount of pages allocated in pipes -Origin: https://git.kernel.org/linus/759c01142a5d0f364a462346168a56de28a80f52 - -On no-so-small systems, it is possible for a single process to cause an -OOM condition by filling large pipes with data that are never read. A -typical process filling 4000 pipes with 1 MB of data will use 4 GB of -memory. On small systems it may be tricky to set the pipe max size to -prevent this from happening. - -This patch makes it possible to enforce a per-user soft limit above -which new pipes will be limited to a single page, effectively limiting -them to 4 kB each, as well as a hard limit above which no new pipes may -be created for this user. This has the effect of protecting the system -against memory abuse without hurting other users, and still allowing -pipes to work correctly though with less data at once. - -The limit are controlled by two new sysctls : pipe-user-pages-soft, and -pipe-user-pages-hard. Both may be disabled by setting them to zero. The -default soft limit allows the default number of FDs per process (1024) -to create pipes of the default size (64kB), thus reaching a limit of 64MB -before starting to create only smaller pipes. With 256 processes limited -to 1024 FDs each, this results in 1024*64kB + (256*1024 - 1024) * 4kB = -1084 MB of memory allocated for a user. The hard limit is disabled by -default to avoid breaking existing applications that make intensive use -of pipes (eg: for splicing). - -Reported-by: socketpair@gmail.com -Reported-by: Tetsuo Handa -Mitigates: CVE-2013-4312 (Linux 2.0+) -Suggested-by: Linus Torvalds -Signed-off-by: Willy Tarreau -Signed-off-by: Al Viro ---- - Documentation/sysctl/fs.txt | 23 ++++++++++++++++++++++ - fs/pipe.c | 47 +++++++++++++++++++++++++++++++++++++++++++-- - include/linux/pipe_fs_i.h | 4 ++++ - include/linux/sched.h | 1 + - kernel/sysctl.c | 14 ++++++++++++++ - 5 files changed, 87 insertions(+), 2 deletions(-) - ---- a/Documentation/sysctl/fs.txt -+++ b/Documentation/sysctl/fs.txt -@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/ - - nr_open - - overflowuid - - overflowgid -+- pipe-user-pages-hard -+- pipe-user-pages-soft - - protected_hardlinks - - protected_symlinks - - suid_dumpable -@@ -159,6 +161,27 @@ The default is 65534. - - ============================================================== - -+pipe-user-pages-hard: -+ -+Maximum total number of pages a non-privileged user may allocate for pipes. -+Once this limit is reached, no new pipes may be allocated until usage goes -+below the limit again. When set to 0, no limit is applied, which is the default -+setting. -+ -+============================================================== -+ -+pipe-user-pages-soft: -+ -+Maximum total number of pages a non-privileged user may allocate for pipes -+before the pipe size gets limited to a single page. Once this limit is reached, -+new pipes will be limited to a single page in size for this user in order to -+limit total memory usage, and trying to increase them using fcntl() will be -+denied until usage goes below the limit again. The default value allows to -+allocate up to 1024 pipes at their default size. When set to 0, no limit is -+applied. -+ -+============================================================== -+ - protected_hardlinks: - - A long-standing class of security issues is the hardlink-based ---- a/fs/pipe.c -+++ b/fs/pipe.c -@@ -38,6 +38,12 @@ unsigned int pipe_max_size = 1048576; - */ - unsigned int pipe_min_size = PAGE_SIZE; - -+/* Maximum allocatable pages per user. Hard limit is unset by default, soft -+ * matches default values. -+ */ -+unsigned long pipe_user_pages_hard; -+unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; -+ - /* - * We use a start+len construction, which provides full use of the - * allocated memory. -@@ -584,20 +590,49 @@ pipe_fasync(int fd, struct file *filp, i - return retval; - } - -+static void account_pipe_buffers(struct pipe_inode_info *pipe, -+ unsigned long old, unsigned long new) -+{ -+ atomic_long_add(new - old, &pipe->user->pipe_bufs); -+} -+ -+static bool too_many_pipe_buffers_soft(struct user_struct *user) -+{ -+ return pipe_user_pages_soft && -+ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft; -+} -+ -+static bool too_many_pipe_buffers_hard(struct user_struct *user) -+{ -+ return pipe_user_pages_hard && -+ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard; -+} -+ - struct pipe_inode_info *alloc_pipe_info(void) - { - struct pipe_inode_info *pipe; - - pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); - if (pipe) { -- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL); -+ unsigned long pipe_bufs = PIPE_DEF_BUFFERS; -+ struct user_struct *user = get_current_user(); -+ -+ if (!too_many_pipe_buffers_hard(user)) { -+ if (too_many_pipe_buffers_soft(user)) -+ pipe_bufs = 1; -+ pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL); -+ } -+ - if (pipe->bufs) { - init_waitqueue_head(&pipe->wait); - pipe->r_counter = pipe->w_counter = 1; -- pipe->buffers = PIPE_DEF_BUFFERS; -+ pipe->buffers = pipe_bufs; -+ pipe->user = user; -+ account_pipe_buffers(pipe, 0, pipe_bufs); - mutex_init(&pipe->mutex); - return pipe; - } -+ free_uid(user); - kfree(pipe); - } - -@@ -608,6 +643,8 @@ void free_pipe_info(struct pipe_inode_in - { - int i; - -+ account_pipe_buffers(pipe, pipe->buffers, 0); -+ free_uid(pipe->user); - for (i = 0; i < pipe->buffers; i++) { - struct pipe_buffer *buf = pipe->bufs + i; - if (buf->ops) -@@ -996,6 +1033,7 @@ static long pipe_set_size(struct pipe_in - memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); - } - -+ account_pipe_buffers(pipe, pipe->buffers, nr_pages); - pipe->curbuf = 0; - kfree(pipe->bufs); - pipe->bufs = bufs; -@@ -1067,6 +1105,11 @@ long pipe_fcntl(struct file *file, unsig - if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) { - ret = -EPERM; - goto out; -+ } else if ((too_many_pipe_buffers_hard(pipe->user) || -+ too_many_pipe_buffers_soft(pipe->user)) && -+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { -+ ret = -EPERM; -+ goto out; - } - ret = pipe_set_size(pipe, nr_pages); - break; ---- a/include/linux/pipe_fs_i.h -+++ b/include/linux/pipe_fs_i.h -@@ -42,6 +42,7 @@ struct pipe_buffer { - * @fasync_readers: reader side fasync - * @fasync_writers: writer side fasync - * @bufs: the circular array of pipe buffers -+ * @user: the user who created this pipe - **/ - struct pipe_inode_info { - struct mutex mutex; -@@ -57,6 +58,7 @@ struct pipe_inode_info { - struct fasync_struct *fasync_readers; - struct fasync_struct *fasync_writers; - struct pipe_buffer *bufs; -+ struct user_struct *user; - }; - - /* -@@ -123,6 +125,8 @@ void pipe_unlock(struct pipe_inode_info - void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); - - extern unsigned int pipe_max_size, pipe_min_size; -+extern unsigned long pipe_user_pages_hard; -+extern unsigned long pipe_user_pages_soft; - int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *); - - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -831,6 +831,7 @@ struct user_struct { - #endif - unsigned long locked_shm; /* How many pages of mlocked shm ? */ - unsigned long unix_inflight; /* How many files in flight in unix sockets */ -+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ - - #ifdef CONFIG_KEYS - struct key *uid_keyring; /* UID specific keyring */ ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -1714,6 +1714,20 @@ static struct ctl_table fs_table[] = { - .proc_handler = &pipe_proc_fn, - .extra1 = &pipe_min_size, - }, -+ { -+ .procname = "pipe-user-pages-hard", -+ .data = &pipe_user_pages_hard, -+ .maxlen = sizeof(pipe_user_pages_hard), -+ .mode = 0644, -+ .proc_handler = proc_doulongvec_minmax, -+ }, -+ { -+ .procname = "pipe-user-pages-soft", -+ .data = &pipe_user_pages_soft, -+ .maxlen = sizeof(pipe_user_pages_soft), -+ .mode = 0644, -+ .proc_handler = proc_doulongvec_minmax, -+ }, - { } - }; - diff --git a/debian/patches/bugfix/all/ptrace-being-capable-wrt-a-process-requires-mapped-uids-gids.patch b/debian/patches/bugfix/all/ptrace-being-capable-wrt-a-process-requires-mapped-uids-gids.patch index 1463047ed..1e5cb2876 100644 --- a/debian/patches/bugfix/all/ptrace-being-capable-wrt-a-process-requires-mapped-uids-gids.patch +++ b/debian/patches/bugfix/all/ptrace-being-capable-wrt-a-process-requires-mapped-uids-gids.patch @@ -34,8 +34,6 @@ Signed-off-by: Jann Horn kernel/ptrace.c | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) -diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index b760bae..260a08d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -20,6 +20,7 @@ @@ -46,7 +44,7 @@ index b760bae..260a08d 100644 #include #include #include -@@ -207,12 +208,34 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) +@@ -207,12 +208,34 @@ static int ptrace_check_attach(struct ta return ret; } @@ -84,16 +82,16 @@ index b760bae..260a08d 100644 } /* Returns 0 on success, -errno on denial. */ -@@ -241,7 +264,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) - gid_eq(cred->gid, tcred->sgid) && - gid_eq(cred->gid, tcred->gid)) +@@ -264,7 +287,7 @@ static int __ptrace_may_access(struct ta + gid_eq(caller_gid, tcred->sgid) && + gid_eq(caller_gid, tcred->gid)) goto ok; - if (ptrace_has_cap(tcred->user_ns, mode)) + if (ptrace_has_cap(tcred, mode)) goto ok; rcu_read_unlock(); return -EPERM; -@@ -252,7 +275,7 @@ ok: +@@ -275,7 +298,7 @@ ok: dumpable = get_dumpable(task->mm); rcu_read_lock(); if (dumpable != SUID_DUMP_USER && diff --git a/debian/patches/bugfix/all/radeon-firmware-is-required-for-drm-and-kms-on-r600-onward.patch b/debian/patches/bugfix/all/radeon-firmware-is-required-for-drm-and-kms-on-r600-onward.patch index d55fef997..65a4005b3 100644 --- a/debian/patches/bugfix/all/radeon-firmware-is-required-for-drm-and-kms-on-r600-onward.patch +++ b/debian/patches/bugfix/all/radeon-firmware-is-required-for-drm-and-kms-on-r600-onward.patch @@ -18,12 +18,10 @@ firmware for the memory controller and other sub-blocks. radeon attempts to gracefully fall back and disable some features if the firmware is not available, but becomes unstable - the framebuffer and/or system memory may be corrupted, or the display may stay black. -This does not seem to happen if KMS is disabled, but with both KMS -and GPU acceleration disabled radeon is not doing anything useful! Therefore, perform a basic check for the existence of /lib/firmware/radeon when a device is probed, and abort if it is -missing, except for the pre-R600 KMS case. +missing, except for the pre-R600 case. --- --- a/drivers/gpu/drm/radeon/radeon_drv.c @@ -34,10 +32,10 @@ missing, except for the pre-R600 KMS case. #include "radeon_kfd.h" +#include +#include - + /* * KMS wrapper. -@@ -362,6 +365,42 @@ static struct drm_driver driver_old = { +@@ -293,6 +295,29 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static struct drm_driver kms_driver; @@ -63,24 +61,11 @@ missing, except for the pre-R600 KMS case. + return false; +#endif +} -+ -+#ifdef CONFIG_DRM_RADEON_UMS -+static int -+radeon_ums_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -+{ -+ if (!radeon_firmware_installed()) { -+ DRM_ERROR("radeon DRM requires firmware-linux-nonfree.\n"); -+ return -ENODEV; -+ } -+ -+ return 0; -+} -+#endif + static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) { struct apertures_struct *ap; -@@ -388,6 +427,12 @@ static int radeon_pci_probe(struct pci_d +@@ -319,6 +344,12 @@ static int radeon_pci_probe(struct pci_d { int ret; @@ -93,11 +78,3 @@ missing, except for the pre-R600 KMS case. /* Get rid of things like offb */ ret = radeon_kick_out_firmware_fb(pdev); if (ret) -@@ -610,6 +655,7 @@ static struct pci_driver *pdriver; - static struct pci_driver radeon_pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, -+ .probe = radeon_ums_pci_probe, - }; - #endif - diff --git a/debian/patches/bugfix/all/revert-xhci-don-t-finish-a-td-if-we-get-a-short-transfer.patch b/debian/patches/bugfix/all/revert-xhci-don-t-finish-a-td-if-we-get-a-short-transfer.patch deleted file mode 100644 index 4390571b4..000000000 --- a/debian/patches/bugfix/all/revert-xhci-don-t-finish-a-td-if-we-get-a-short-transfer.patch +++ /dev/null @@ -1,36 +0,0 @@ -From: Ben Hutchings -Date: Sat, 02 Jan 2016 03:03:27 +0000 -Subject: Revert "xhci: don't finish a TD if we get a short transfer event mid TD" -Bug-Debian: https://bugs.debian.org/808602 -Bug-Debian: https://bugs.debian.org/808953 - -This reverts commit e210c422b6fdd2dc123bedc588f399aefd8bf9de. It -caused serious regressions as referenced above. - ---- ---- a/drivers/usb/host/xhci-ring.c -+++ b/drivers/usb/host/xhci-ring.c -@@ -2192,10 +2192,6 @@ static int process_bulk_intr_td(struct x - } - /* Fast path - was this the last TRB in the TD for this URB? */ - } else if (event_trb == td->last_trb) { -- if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX) -- return finish_td(xhci, td, event_trb, event, ep, -- status, false); -- - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { - td->urb->actual_length = - td->urb->transfer_buffer_length - -@@ -2247,12 +2243,6 @@ static int process_bulk_intr_td(struct x - td->urb->actual_length += - TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); -- -- if (trb_comp_code == COMP_SHORT_TX) { -- xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n"); -- td->urb_length_set = true; -- return 0; -- } - } - - return finish_td(xhci, td, event_trb, event, ep, status, false); diff --git a/debian/patches/bugfix/all/rt2x00-fix-monitor-mode-regression.patch b/debian/patches/bugfix/all/rt2x00-fix-monitor-mode-regression.patch deleted file mode 100644 index c96ef41b4..000000000 --- a/debian/patches/bugfix/all/rt2x00-fix-monitor-mode-regression.patch +++ /dev/null @@ -1,149 +0,0 @@ -From: Eli Cooper -Date: Mon, 18 Jan 2016 19:30:19 +0800 -Subject: rt2x00: fix monitor mode regression -Origin: https://git.kernel.org/cgit/linux/kernel/git/wireless/wireless-testing.git/commit?id=262c741e0825b29447a9e53b6582afd6b14c3706 - -Since commit df1404650ccb ("mac80211: remove support for IFF_PROMISC") -monitor mode for rt2x00 has been made effectively useless because the -hardware filter is configured to drop packets whose intended recipient is -not the device, regardless of the presence of monitor mode interfaces. - -This patch fixes this regression by adding explicit monitor mode support, -and by configuring the hardware filter accordingly. - -Signed-off-by: Eli Cooper -Acked-by: Stanislaw Gruszka -Signed-off-by: Kalle Valo -[bwh: Backported to 4.3: adjust filenames] ---- - drivers/net/wireless/rt2x00/rt2400pci.c | 4 +++- - drivers/net/wireless/rt2x00/rt2500pci.c | 4 +++- - drivers/net/wireless/rt2x00/rt2500usb.c | 4 +++- - drivers/net/wireless/rt2x00/rt2800lib.c | 3 ++- - drivers/net/wireless/rt2x00/rt2x00.h | 1 + - drivers/net/wireless/rt2x00/rt2x00config.c | 5 +++++ - drivers/net/wireless/rt2x00/rt2x00mac.c | 5 ----- - drivers/net/wireless/rt2x00/rt61pci.c | 4 +++- - drivers/net/wireless/rt2x00/rt73usb.c | 4 +++- - 9 files changed, 23 insertions(+), 11 deletions(-) - ---- a/drivers/net/wireless/rt2x00/rt2400pci.c -+++ b/drivers/net/wireless/rt2x00/rt2400pci.c -@@ -273,8 +273,10 @@ static void rt2400pci_config_filter(stru - !(filter_flags & FIF_PLCPFAIL)); - rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, - !(filter_flags & FIF_CONTROL)); -- rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, 1); -+ rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); - rt2x00_set_field32(®, RXCSR0_DROP_TODS, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && - !rt2x00dev->intf_ap_count); - rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); - rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); ---- a/drivers/net/wireless/rt2x00/rt2500pci.c -+++ b/drivers/net/wireless/rt2x00/rt2500pci.c -@@ -274,8 +274,10 @@ static void rt2500pci_config_filter(stru - !(filter_flags & FIF_PLCPFAIL)); - rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, - !(filter_flags & FIF_CONTROL)); -- rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, 1); -+ rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); - rt2x00_set_field32(®, RXCSR0_DROP_TODS, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && - !rt2x00dev->intf_ap_count); - rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); - rt2x00_set_field32(®, RXCSR0_DROP_MCAST, ---- a/drivers/net/wireless/rt2x00/rt2500usb.c -+++ b/drivers/net/wireless/rt2x00/rt2500usb.c -@@ -434,8 +434,10 @@ static void rt2500usb_config_filter(stru - !(filter_flags & FIF_PLCPFAIL)); - rt2x00_set_field16(®, TXRX_CSR2_DROP_CONTROL, - !(filter_flags & FIF_CONTROL)); -- rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, 1); -+ rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); - rt2x00_set_field16(®, TXRX_CSR2_DROP_TODS, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && - !rt2x00dev->intf_ap_count); - rt2x00_set_field16(®, TXRX_CSR2_DROP_VERSION_ERROR, 1); - rt2x00_set_field16(®, TXRX_CSR2_DROP_MULTICAST, ---- a/drivers/net/wireless/rt2x00/rt2800lib.c -+++ b/drivers/net/wireless/rt2x00/rt2800lib.c -@@ -1490,7 +1490,8 @@ void rt2800_config_filter(struct rt2x00_ - !(filter_flags & FIF_FCSFAIL)); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PHY_ERROR, - !(filter_flags & FIF_PLCPFAIL)); -- rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_TO_ME, 1); -+ rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_TO_ME, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_VER_ERROR, 1); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_MULTICAST, ---- a/drivers/net/wireless/rt2x00/rt2x00.h -+++ b/drivers/net/wireless/rt2x00/rt2x00.h -@@ -669,6 +669,7 @@ enum rt2x00_state_flags { - CONFIG_POWERSAVING, - CONFIG_HT_DISABLED, - CONFIG_QOS_DISABLED, -+ CONFIG_MONITORING, - - /* - * Mark we currently are sequentially reading TX_STA_FIFO register ---- a/drivers/net/wireless/rt2x00/rt2x00config.c -+++ b/drivers/net/wireless/rt2x00/rt2x00config.c -@@ -277,6 +277,11 @@ void rt2x00lib_config(struct rt2x00_dev - else - clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); - -+ if (conf->flags & IEEE80211_CONF_MONITOR) -+ set_bit(CONFIG_MONITORING, &rt2x00dev->flags); -+ else -+ clear_bit(CONFIG_MONITORING, &rt2x00dev->flags); -+ - rt2x00dev->curr_band = conf->chandef.chan->band; - rt2x00dev->curr_freq = conf->chandef.chan->center_freq; - rt2x00dev->tx_power = conf->power_level; ---- a/drivers/net/wireless/rt2x00/rt2x00mac.c -+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c -@@ -385,11 +385,6 @@ void rt2x00mac_configure_filter(struct i - *total_flags |= FIF_PSPOLL; - } - -- /* -- * Check if there is any work left for us. -- */ -- if (rt2x00dev->packet_filter == *total_flags) -- return; - rt2x00dev->packet_filter = *total_flags; - - rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); ---- a/drivers/net/wireless/rt2x00/rt61pci.c -+++ b/drivers/net/wireless/rt2x00/rt61pci.c -@@ -530,8 +530,10 @@ static void rt61pci_config_filter(struct - !(filter_flags & FIF_PLCPFAIL)); - rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, - !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); -- rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, 1); -+ rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); - rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && - !rt2x00dev->intf_ap_count); - rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); - rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, ---- a/drivers/net/wireless/rt2x00/rt73usb.c -+++ b/drivers/net/wireless/rt2x00/rt73usb.c -@@ -480,8 +480,10 @@ static void rt73usb_config_filter(struct - !(filter_flags & FIF_PLCPFAIL)); - rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, - !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); -- rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, 1); -+ rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); - rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, -+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && - !rt2x00dev->intf_ap_count); - rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); - rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, diff --git a/debian/patches/bugfix/all/scsi-fix-crashes-in-sd-and-sr-runtime-pm.patch b/debian/patches/bugfix/all/scsi-fix-crashes-in-sd-and-sr-runtime-pm.patch deleted file mode 100644 index 780732318..000000000 --- a/debian/patches/bugfix/all/scsi-fix-crashes-in-sd-and-sr-runtime-pm.patch +++ /dev/null @@ -1,82 +0,0 @@ -From: Alan Stern -Subject: SCSI: fix crashes in sd and sr runtime PM -Date: Wed, 20 Jan 2016 11:26:01 -0500 (EST) -Origin: http://article.gmane.org/gmane.linux.scsi/109795 -Bug-Debian: https://bugs.debian.org/801925 - -Runtime suspend during driver probe and removal can cause problems. -The driver's runtime_suspend or runtime_resume callbacks may invoked -before the driver has finished binding to the device or after the -driver has unbound from the device. - -This problem shows up with the sd and sr drivers, and can cause disk -or CD/DVD drives to become unusable as a result. The fix is simple. -The drivers store a pointer to the scsi_disk or scsi_cd structure as -their private device data when probing is finished, so we simply have -to be sure to clear the private data during removal and test it during -runtime suspend/resume. - -This fixes . - -Signed-off-by: Alan Stern -Reported-by: Paul Menzel -Reported-by: Erich Schubert -Reported-by: Alexandre Rossi -Tested-by: Paul Menzel -CC: "James E.J. Bottomley" -CC: Ben Hutchings -CC: - ---- - - -[as1795] - - - drivers/scsi/sd.c | 7 +++++-- - drivers/scsi/sr.c | 4 ++++ - 2 files changed, 9 insertions(+), 2 deletions(-) - ---- a/drivers/scsi/sd.c -+++ b/drivers/scsi/sd.c -@@ -3142,8 +3142,8 @@ static int sd_suspend_common(struct devi - struct scsi_disk *sdkp = dev_get_drvdata(dev); - int ret = 0; - -- if (!sdkp) -- return 0; /* this can happen */ -+ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ -+ return 0; - - if (sdkp->WCE && sdkp->media_present) { - sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); -@@ -3182,6 +3182,9 @@ static int sd_resume(struct device *dev) - { - struct scsi_disk *sdkp = dev_get_drvdata(dev); - -+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ -+ return 0; -+ - if (!sdkp->device->manage_start_stop) - return 0; - ---- a/drivers/scsi/sr.c -+++ b/drivers/scsi/sr.c -@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct dev - { - struct scsi_cd *cd = dev_get_drvdata(dev); - -+ if (!cd) /* E.g.: runtime suspend following sr_remove() */ -+ return 0; -+ - if (cd->media_present) - return -EBUSY; - else -@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev) - scsi_autopm_get_device(cd->device); - - del_gendisk(cd->disk); -+ dev_set_drvdata(dev, NULL); - - mutex_lock(&sr_ref_mutex); - kref_put(&cd->kref, sr_kref_release); diff --git a/debian/patches/bugfix/all/tty-fix-unsafe-ldisc-reference-via-ioctl-tiocgetd.patch b/debian/patches/bugfix/all/tty-fix-unsafe-ldisc-reference-via-ioctl-tiocgetd.patch deleted file mode 100644 index 02fe5e74b..000000000 --- a/debian/patches/bugfix/all/tty-fix-unsafe-ldisc-reference-via-ioctl-tiocgetd.patch +++ /dev/null @@ -1,63 +0,0 @@ -From: Peter Hurley -Subject: tty: Fix unsafe ldisc reference via ioctl(TIOCGETD) -Date: Sun, 10 Jan 2016 22:40:55 -0800 -Origin: http://article.gmane.org/gmane.linux.kernel/2123249 - -ioctl(TIOCGETD) retrieves the line discipline id directly from the -ldisc because the line discipline id (c_line) in termios is untrustworthy; -userspace may have set termios via ioctl(TCSETS*) without actually -changing the line discipline via ioctl(TIOCSETD). - -However, directly accessing the current ldisc via tty->ldisc is -unsafe; the ldisc ptr dereferenced may be stale if the line discipline -is changing via ioctl(TIOCSETD) or hangup. - -Wait for the line discipline reference (just like read() or write()) -to retrieve the "current" line discipline id. - -Cc: -Signed-off-by: Peter Hurley ---- - drivers/tty/tty_io.c | 24 +++++++++++++++++++++++- - 1 file changed, 23 insertions(+), 1 deletion(-) - ---- a/drivers/tty/tty_io.c -+++ b/drivers/tty/tty_io.c -@@ -2654,6 +2654,28 @@ static int tiocsetd(struct tty_struct *t - } - - /** -+ * tiocgetd - get line discipline -+ * @tty: tty device -+ * @p: pointer to user data -+ * -+ * Retrieves the line discipline id directly from the ldisc. -+ * -+ * Locking: waits for ldisc reference (in case the line discipline -+ * is changing or the tty is being hungup) -+ */ -+ -+static int tiocgetd(struct tty_struct *tty, int __user *p) -+{ -+ struct tty_ldisc *ld; -+ int ret; -+ -+ ld = tty_ldisc_ref_wait(tty); -+ ret = put_user(ld->ops->num, p); -+ tty_ldisc_deref(ld); -+ return ret; -+} -+ -+/** - * send_break - performed time break - * @tty: device to break on - * @duration: timeout in mS -@@ -2879,7 +2901,7 @@ long tty_ioctl(struct file *file, unsign - case TIOCGSID: - return tiocgsid(tty, real_tty, p); - case TIOCGETD: -- return put_user(tty->ldisc->ops->num, (int __user *)p); -+ return tiocgetd(tty, p); - case TIOCSETD: - return tiocsetd(tty, p); - case TIOCVHANGUP: diff --git a/debian/patches/bugfix/all/usb-serial-visor-fix-crash-on-detecting-device-without-write_urbs.patch b/debian/patches/bugfix/all/usb-serial-visor-fix-crash-on-detecting-device-without-write_urbs.patch deleted file mode 100644 index 4b6a5d63c..000000000 --- a/debian/patches/bugfix/all/usb-serial-visor-fix-crash-on-detecting-device-without-write_urbs.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Vladis Dronov -Subject: usb: serial: visor: fix crash on detecting device without write_urbs -Date: Tue, 12 Jan 2016 15:10:50 +0100 -Origin: http://article.gmane.org/gmane.linux.usb.general/136045 -Bug: https://bugzilla.redhat.com/show_bug.cgi?id=1296466 - -The visor driver crashes in clie_5_attach() when a specially crafted USB -device without bulk-out endpoint is detected. This fix adds a check that -the device has proper configuration expected by the driver. - -Reported-by: Ralf Spenneberg -Signed-off-by: Vladis Dronov ---- - drivers/usb/serial/visor.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - ---- a/drivers/usb/serial/visor.c -+++ b/drivers/usb/serial/visor.c -@@ -597,8 +597,10 @@ static int clie_5_attach(struct usb_seri - */ - - /* some sanity check */ -- if (serial->num_ports < 2) -- return -1; -+ if (serial->num_bulk_out < 2) { -+ dev_err(&serial->interface->dev, "missing bulk out endpoints\n"); -+ return -ENODEV; -+ } - - /* port 0 now uses the modified endpoint Address */ - port = serial->port[0]; diff --git a/debian/patches/bugfix/all/usbvision-fix-overflow-of-interfaces-array.patch b/debian/patches/bugfix/all/usbvision-fix-overflow-of-interfaces-array.patch deleted file mode 100644 index 90d2bd9d8..000000000 --- a/debian/patches/bugfix/all/usbvision-fix-overflow-of-interfaces-array.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Oliver Neukum -Date: Tue, 27 Oct 2015 09:51:34 -0200 -Subject: [media] usbvision fix overflow of interfaces array -Origin: http://git.linuxtv.org/cgit.cgi/media_tree.git/commit?id=588afcc1c0e45358159090d95bf7b246fb67565f - -This fixes the crash reported in: -http://seclists.org/bugtraq/2015/Oct/35 -The interface number needs a sanity check. - -Signed-off-by: Oliver Neukum -Cc: Vladis Dronov -Signed-off-by: Hans Verkuil -Signed-off-by: Mauro Carvalho Chehab ---- - drivers/media/usb/usbvision/usbvision-video.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/drivers/media/usb/usbvision/usbvision-video.c -+++ b/drivers/media/usb/usbvision/usbvision-video.c -@@ -1533,6 +1533,13 @@ static int usbvision_probe(struct usb_in - printk(KERN_INFO "%s: %s found\n", __func__, - usbvision_device_data[model].model_string); - -+ /* -+ * this is a security check. -+ * an exploit using an incorrect bInterfaceNumber is known -+ */ -+ if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum]) -+ return -ENODEV; -+ - if (usbvision_device_data[model].interface >= 0) - interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; - else diff --git a/debian/patches/bugfix/arm/crypto-sun4i-ss-add-missing-statesize.patch b/debian/patches/bugfix/arm/crypto-sun4i-ss-add-missing-statesize.patch deleted file mode 100644 index 7aace658e..000000000 --- a/debian/patches/bugfix/arm/crypto-sun4i-ss-add-missing-statesize.patch +++ /dev/null @@ -1,40 +0,0 @@ -From: LABBE Corentin -Date: Mon, 16 Nov 2015 09:35:54 +0100 -Subject: crypto: sun4i-ss - add missing statesize -Origin: https://git.kernel.org/cgit/linux/kernel/git/herbert/cryptodev-2.6.git/commit?id=4f9ea86604e3ba64edd2817795798168fbb3c1a6 -Bug-Debian: https://bugs.debian.org/808625 - -sun4i-ss implementaton of md5/sha1 is via ahash algorithms. -Commit 8996eafdcbad ("crypto: ahash - ensure statesize is non-zero") -made impossible to load them without giving statesize. This patch -specifiy statesize for sha1 and md5. - -Fixes: 6298e948215f ("crypto: sunxi-ss - Add Allwinner Security System crypto accelerator") -Cc: # v4.3+ -Tested-by: Chen-Yu Tsai -Signed-off-by: LABBE Corentin -Signed-off-by: Herbert Xu ---- - drivers/crypto/sunxi-ss/sun4i-ss-core.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c -index eab6fe2..107cd2a 100644 ---- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c -+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c -@@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { - .import = sun4i_hash_import_md5, - .halg = { - .digestsize = MD5_DIGEST_SIZE, -+ .statesize = sizeof(struct md5_state), - .base = { - .cra_name = "md5", - .cra_driver_name = "md5-sun4i-ss", -@@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { - .import = sun4i_hash_import_sha1, - .halg = { - .digestsize = SHA1_DIGEST_SIZE, -+ .statesize = sizeof(struct sha1_state), - .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-sun4i-ss", diff --git a/debian/patches/bugfix/mips/mips-math-emu-correctly-handle-nop-emulation.patch b/debian/patches/bugfix/mips/mips-math-emu-correctly-handle-nop-emulation.patch deleted file mode 100644 index cb2e904bf..000000000 --- a/debian/patches/bugfix/mips/mips-math-emu-correctly-handle-nop-emulation.patch +++ /dev/null @@ -1,140 +0,0 @@ -From: "Maciej W. Rozycki" -Date: Fri, 22 Jan 2016 05:20:26 +0000 -Subject: MIPS: math-emu: Correctly handle NOP emulation -Origin: https://git.kernel.org/linus/e4553573b37c3f72533683cb5f3a1ad300b18d37 - -Fix an issue introduced with commit 9ab4471c9f1b ("MIPS: math-emu: -Correct delay-slot exception propagation") where the emulation of a NOP -instruction signals the need to terminate the emulation loop. This in -turn, if the PC has not changed from the entry to the loop, will cause -the kernel to terminate the program with SIGILL. - -Consider this program: - -static double div(double d) -{ - do - d /= 2.0; - while (d > .5); - return d; -} - -int main(int argc, char **argv) -{ - return div(argc); -} - -which gets compiled to the following binary code: - -00400490
: - 400490: 44840000 mtc1 a0,$f0 - 400494: 3c020040 lui v0,0x40 - 400498: d44207f8 ldc1 $f2,2040(v0) - 40049c: 46800021 cvt.d.w $f0,$f0 - 4004a0: 46220002 mul.d $f0,$f0,$f2 - 4004a4: 4620103c c.lt.d $f2,$f0 - 4004a8: 4501fffd bc1t 4004a0 - 4004ac: 00000000 nop - 4004b0: 4620000d trunc.w.d $f0,$f0 - 4004b4: 03e00008 jr ra - 4004b8: 44020000 mfc1 v0,$f0 - 4004bc: 00000000 nop - -Where the FPU emulator is used, depending on the number of command-line -arguments this code will either run to completion or terminate with -SIGILL. - -If no arguments are specified, then BC1T will not be taken, NOP will not -be emulated and code will complete successfully. - -If one argument is specified, then BC1T will be taken once and NOP will -be emulated. At this point the entry PC value will be 0x400498 and the -new PC value, set by `mips_dsemul' will be 0x4004a0, the target of BC1T. -The emulation loop will terminate, but SIGILL will not be issued, -because the PC has changed. The FPU emulator will be entered again and -on the second execution BC1T will not be taken, NOP will not be emulated -and code will complete successfully. - -If two or more arguments are specified, then the first execution of BC1T -will proceed as above. Upon reentering the FPU emulator the emulation -loop will continue to BC1T, at which point the branch will be taken and -NOP emulated again. At this point however the entry PC value will be -0x4004a0, the same as the target of BC1T. This will make the emulator -conclude that execution has not advanced and therefore an unsupported -FPU instruction has been encountered, and SIGILL will be sent to the -process. - -Fix the problem by extending the internal API of `mips_dsemul', making -it return -1 if no delay slot emulation frame has been made, the -instruction has been handled and execution of the emulation loop needs -to continue as if nothing happened. Remove code from `mips_dsemul' to -reproduce steps made by the emulation loop at the conclusion of each -iteration, as those will be reached normally now. Adjust call sites -accordingly. Document the API. - -Signed-off-by: Maciej W. Rozycki -Cc: Aurelien Jarno -Cc: linux-mips@linux-mips.org -Patchwork: https://patchwork.linux-mips.org/patch/12172/ -Signed-off-by: Ralf Baechle ---- - arch/mips/math-emu/cp1emu.c | 4 ++++ - arch/mips/math-emu/dsemul.c | 14 ++++++++------ - 2 files changed, 12 insertions(+), 6 deletions(-) - -diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c -index 32f0e19..cdfd44f 100644 ---- a/arch/mips/math-emu/cp1emu.c -+++ b/arch/mips/math-emu/cp1emu.c -@@ -1266,6 +1266,8 @@ branch_common: - */ - sig = mips_dsemul(xcp, ir, - contpc); -+ if (sig < 0) -+ break; - if (sig) - xcp->cp0_epc = bcpc; - /* -@@ -1319,6 +1321,8 @@ branch_common: - * instruction in the dslot - */ - sig = mips_dsemul(xcp, ir, contpc); -+ if (sig < 0) -+ break; - if (sig) - xcp->cp0_epc = bcpc; - /* SIGILL forces out of the emulation loop. */ -diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c -index cbb36c1..70e4824 100644 ---- a/arch/mips/math-emu/dsemul.c -+++ b/arch/mips/math-emu/dsemul.c -@@ -31,18 +31,20 @@ struct emuframe { - unsigned long epc; - }; - -+/* -+ * Set up an emulation frame for instruction IR, from a delay slot of -+ * a branch jumping to CPC. Return 0 if successful, -1 if no emulation -+ * required, otherwise a signal number causing a frame setup failure. -+ */ - int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) - { - struct emuframe __user *fr; - int err; - -+ /* NOP is easy */ - if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) || -- (ir == 0)) { -- /* NOP is easy */ -- regs->cp0_epc = cpc; -- clear_delay_slot(regs); -- return 0; -- } -+ (ir == 0)) -+ return -1; - - pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc); - --- -2.7.0.rc3 - diff --git a/debian/patches/bugfix/x86/drm-i915-shut-up-gen8-sde-irq-dmesg-noise.patch b/debian/patches/bugfix/x86/drm-i915-shut-up-gen8-sde-irq-dmesg-noise.patch deleted file mode 100644 index d28050471..000000000 --- a/debian/patches/bugfix/x86/drm-i915-shut-up-gen8-sde-irq-dmesg-noise.patch +++ /dev/null @@ -1,67 +0,0 @@ -From: Daniel Vetter -Date: Fri, 23 Oct 2015 10:56:12 +0200 -Subject: drm/i915: shut up gen8+ SDE irq dmesg noise -Origin: http://cgit.freedesktop.org/drm-intel/commit?id=97e5ed1111dcc5300a0f59a55248cd243937a8ab - -We get tons of cases where the master interrupt handler apparently set -a bit, with the SDEIIR disagreeing. No idea what's going on there, but -it's consistent on gen8+, no one seems to care about it and it's -making CI results flaky. - -Shut it up. - -No idea what's going on here, but we've had fun with PCH interrupts -before: - -commit 44498aea293b37af1d463acd9658cdce1ecdf427 -Author: Paulo Zanoni -Date: Fri Feb 22 17:05:28 2013 -0300 - - drm/i915: also disable south interrupts when handling them - -Note that there's a regression report in Bugzilla, and other -regression reports on the mailing lists keep croping up. But no ill -effects have ever been reported. But for paranoia still keep the -message at a debug level as a breadcrumb, just in case. - -This message was introduced in - -commit 38cc46d73ed99dd7002f1406002e52d7975d16cc -Author: Oscar Mateo -Date: Mon Jun 16 16:10:59 2014 +0100 - - drm/i915/bdw: Ack interrupts before handling them (GEN8) - -v2: Improve commit message a bit. - -Cc: Paulo Zanoni -Signed-off-by: Daniel Vetter -Link: http://patchwork.freedesktop.org/patch/msgid/1445590572-23631-2-git-send-email-daniel.vetter@ffwll.ch -Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=92084 -Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=80896 -Acked-by: Mika Kuoppala -Signed-off-by: Daniel Vetter -[bwh: Adjust context] ---- - drivers/gpu/drm/i915/i915_irq.c | 10 +++++++--- - 1 file changed, 7 insertions(+), 3 deletions(-) - ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int - spt_irq_handler(dev, pch_iir); - else - cpt_irq_handler(dev, pch_iir); -- } else -- DRM_ERROR("The master control interrupt lied (SDE)!\n"); -- -+ } else { -+ /* -+ * Like on previous PCH there seems to be something -+ * fishy going on with forwarding PCH interrupts. -+ */ -+ DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); -+ } - } - - I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); diff --git a/debian/patches/bugfix/x86/drm-vmwgfx-fix-a-width-pitch-mismatch-on-framebuffer.patch b/debian/patches/bugfix/x86/drm-vmwgfx-fix-a-width-pitch-mismatch-on-framebuffer.patch deleted file mode 100644 index dd2d3da59..000000000 --- a/debian/patches/bugfix/x86/drm-vmwgfx-fix-a-width-pitch-mismatch-on-framebuffer.patch +++ /dev/null @@ -1,60 +0,0 @@ -From: Thomas Hellstrom -Date: Fri, 8 Jan 2016 20:29:40 +0100 -Subject: drm/vmwgfx: Fix a width / pitch mismatch on framebuffer updates -Origin: https://git.kernel.org/linus/a50e2bf5a0f674d62b69f51f6935a30e82bd015c - -When the framebuffer is a vmwgfx dma buffer and a proxy surface is -created, the vmw_kms_update_proxy() function requires that the proxy -surface width and the framebuffer pitch are compatible, otherwise -display corruption occurs as seen in gnome-shell/native with software -3D. Since the framebuffer pitch is determined by user-space, allocate -a proxy surface the width of which is based on the framebuffer pitch -rather than on the framebuffer width. - -Cc: -Reported-by: Raphael Hertzog -Tested-by: Mati Aharoni -Signed-off-by: Thomas Hellstrom -Reviewed-by: Brian Paul -Signed-off-by: Dave Airlie ---- - drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c -@@ -725,21 +725,25 @@ static int vmw_create_dmabuf_proxy(struc - uint32_t format; - struct drm_vmw_size content_base_size; - struct vmw_resource *res; -+ unsigned int bytes_pp; - int ret; - - switch (mode_cmd->depth) { - case 32: - case 24: - format = SVGA3D_X8R8G8B8; -+ bytes_pp = 4; - break; - - case 16: - case 15: - format = SVGA3D_R5G6B5; -+ bytes_pp = 2; - break; - - case 8: - format = SVGA3D_P8; -+ bytes_pp = 1; - break; - - default: -@@ -747,7 +751,7 @@ static int vmw_create_dmabuf_proxy(struc - return -EINVAL; - } - -- content_base_size.width = mode_cmd->width; -+ content_base_size.width = mode_cmd->pitch / bytes_pp; - content_base_size.height = mode_cmd->height; - content_base_size.depth = 1; - diff --git a/debian/patches/debian/cgroups-Document-the-Debian-memory-resource-controll.patch b/debian/patches/debian/cgroups-Document-the-Debian-memory-resource-controll.patch index b6f4a49f8..2ac91ca20 100644 --- a/debian/patches/debian/cgroups-Document-the-Debian-memory-resource-controll.patch +++ b/debian/patches/debian/cgroups-Document-the-Debian-memory-resource-controll.patch @@ -3,9 +3,9 @@ Subject: cgroups: Document the Debian memory resource controller config change Forwarded: not-needed --- ---- a/Documentation/cgroups/memory.txt -+++ b/Documentation/cgroups/memory.txt -@@ -46,6 +46,10 @@ Features: +--- a/Documentation/cgroup-v1/memory.txt ++++ b/Documentation/cgroup-v1/memory.txt +@@ -52,6 +52,10 @@ Features: Kernel memory support is a work in progress, and the current version provides basically functionality. (See Section 2.7) diff --git a/debian/patches/debian/iwlwifi-do-not-request-unreleased-firmware.patch b/debian/patches/debian/iwlwifi-do-not-request-unreleased-firmware.patch index 7e05444c7..171e90a45 100644 --- a/debian/patches/debian/iwlwifi-do-not-request-unreleased-firmware.patch +++ b/debian/patches/debian/iwlwifi-do-not-request-unreleased-firmware.patch @@ -13,8 +13,8 @@ The installer appears to report any failed request, and it is probably not easy to detect that this particular failure is harmless. So stop requesting the unreleased firmware. ---- a/drivers/net/wireless/iwlwifi/iwl-6000.c -+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c +--- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c @@ -31,7 +31,7 @@ #include "dvm/commands.h" /* needed for BT for now */ diff --git a/debian/patches/debian/version.patch b/debian/patches/debian/version.patch index da54a6900..8311a6f51 100644 --- a/debian/patches/debian/version.patch +++ b/debian/patches/debian/version.patch @@ -9,7 +9,7 @@ are set. --- a/Makefile +++ b/Makefile -@@ -980,7 +980,7 @@ endif +@@ -984,7 +984,7 @@ endif prepare2: prepare3 outputmakefile asm-generic prepare1: prepare2 $(version_h) include/generated/utsrelease.h \ @@ -18,7 +18,7 @@ are set. $(cmd_crmodverdir) archprepare: archheaders archscripts prepare1 scripts_basic -@@ -1012,6 +1012,16 @@ define filechk_version.h +@@ -1016,6 +1016,16 @@ define filechk_version.h echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) endef @@ -35,7 +35,7 @@ are set. $(version_h): $(srctree)/Makefile FORCE $(call filechk,version.h) $(Q)rm -f $(old_version_h) -@@ -1019,6 +1029,9 @@ $(version_h): $(srctree)/Makefile FORCE +@@ -1023,6 +1033,9 @@ $(version_h): $(srctree)/Makefile FORCE include/generated/utsrelease.h: include/config/kernel.release FORCE $(call filechk,utsrelease.h) @@ -99,7 +99,7 @@ are set. #include #include -@@ -1001,8 +1002,9 @@ void show_regs(struct pt_regs * regs) +@@ -1150,8 +1151,9 @@ void show_regs(struct pt_regs * regs) printk("NIP: "REG" LR: "REG" CTR: "REG"\n", regs->nip, regs->link, regs->ctr); @@ -109,7 +109,7 @@ are set. + regs, regs->trap, print_tainted(), init_utsname()->release, + LINUX_PACKAGE_ID); printk("MSR: "REG" ", regs->msr); - printbits(regs->msr, msr_bits); + print_msr_bits(regs->msr); printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -120,8 +120,8 @@ are set. +#include #include - -@@ -3036,11 +3037,12 @@ void __init dump_stack_set_arch_desc(con + #include +@@ -3168,11 +3169,12 @@ void __init dump_stack_set_arch_desc(con */ void dump_stack_print_info(const char *log_lvl) { diff --git a/debian/patches/features/all/aufs4/aufs4-base.patch b/debian/patches/features/all/aufs4/aufs4-base.patch index 2bfba1543..d174952b9 100644 --- a/debian/patches/features/all/aufs4/aufs4-base.patch +++ b/debian/patches/features/all/aufs4/aufs4-base.patch @@ -1,7 +1,7 @@ From: J. R. Okajima -Date: Sat Nov 21 10:43:50 2015 +0900 +Date: Wed Feb 10 03:44:15 2016 +0900 Subject: aufs4.x-rcN base patch -Origin: https://github.com/sfjro/aufs4-standalone/tree/980697121e7bb079411664d77caa8f1e489f344c +Origin: https://github.com/sfjro/aufs4-standalone/tree/5fecf5788b797f74b83e36ca921dc1c4bfdcbad9 Bug-Debian: https://bugs.debian.org/541828 Patch headers added by debian/patches/features/all/aufs4/gen-patch @@ -9,10 +9,10 @@ Patch headers added by debian/patches/features/all/aufs4/gen-patch aufs4.x-rcN base patch diff --git a/MAINTAINERS b/MAINTAINERS -index e9caa4b..ddb7b8a 100644 +index 7f1fa4f..d910255 100644 --- a/MAINTAINERS +++ b/MAINTAINERS -@@ -2028,6 +2028,19 @@ F: include/linux/audit.h +@@ -2075,6 +2075,19 @@ F: include/linux/audit.h F: include/uapi/linux/audit.h F: kernel/audit* @@ -62,7 +62,7 @@ index 423f4ca..abfdd2b 100644 static ssize_t loop_attr_show(struct device *dev, char *page, diff --git a/fs/dcache.c b/fs/dcache.c -index 5c33aeb..8aa7f26 100644 +index 92d5140..63b22d7 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1167,7 +1167,7 @@ enum d_walk_ret { @@ -75,10 +75,10 @@ index 5c33aeb..8aa7f26 100644 void (*finish)(void *)) { diff --git a/fs/read_write.c b/fs/read_write.c -index 819ef3f..fd0414e 100644 +index 324ec27..d38892e 100644 --- a/fs/read_write.c +++ b/fs/read_write.c -@@ -494,6 +494,28 @@ ssize_t __vfs_write(struct file *file, const char __user *p, size_t count, +@@ -533,6 +533,28 @@ ssize_t __vfs_write(struct file *file, const char __user *p, size_t count, } EXPORT_SYMBOL(__vfs_write); @@ -108,10 +108,10 @@ index 819ef3f..fd0414e 100644 { mm_segment_t old_fs; diff --git a/fs/splice.c b/fs/splice.c -index 801c21c..218d188 100644 +index 82bc0d6..93aee51 100644 --- a/fs/splice.c +++ b/fs/splice.c -@@ -1102,8 +1102,8 @@ EXPORT_SYMBOL(generic_splice_sendpage); +@@ -1108,8 +1108,8 @@ EXPORT_SYMBOL(generic_splice_sendpage); /* * Attempt to initiate a splice from pipe to file. */ @@ -122,7 +122,7 @@ index 801c21c..218d188 100644 { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); -@@ -1119,9 +1119,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, +@@ -1125,9 +1125,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, /* * Attempt to initiate a splice from a file to a pipe. */ @@ -148,10 +148,10 @@ index f87d308..9a290b3 100644 static inline void fput_light(struct file *file, int fput_needed) { diff --git a/include/linux/fs.h b/include/linux/fs.h -index 3aa5142..8d48506 100644 +index ae68100..2ea096b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -1672,6 +1672,12 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, +@@ -1704,6 +1704,12 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, struct iovec *fast_pointer, struct iovec **ret_pointer); diff --git a/debian/patches/features/all/aufs4/aufs4-mmap.patch b/debian/patches/features/all/aufs4/aufs4-mmap.patch index 35be6d83c..7c62c8fb7 100644 --- a/debian/patches/features/all/aufs4/aufs4-mmap.patch +++ b/debian/patches/features/all/aufs4/aufs4-mmap.patch @@ -1,7 +1,7 @@ From: J. R. Okajima -Date: Sat Nov 21 10:43:50 2015 +0900 +Date: Wed Feb 10 03:44:15 2016 +0900 Subject: aufs4.x-rcN mmap patch -Origin: https://github.com/sfjro/aufs4-standalone/tree/980697121e7bb079411664d77caa8f1e489f344c +Origin: https://github.com/sfjro/aufs4-standalone/tree/5fecf5788b797f74b83e36ca921dc1c4bfdcbad9 Bug-Debian: https://bugs.debian.org/541828 Patch headers added by debian/patches/features/all/aufs4/gen-patch @@ -9,10 +9,10 @@ Patch headers added by debian/patches/features/all/aufs4/gen-patch aufs4.x-rcN mmap patch diff --git a/fs/proc/base.c b/fs/proc/base.c -index bd3e9e6..fc42216 100644 +index 4f764c2..229de5e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c -@@ -1921,7 +1921,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) +@@ -1933,7 +1933,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path) down_read(&mm->mmap_sem); vma = find_exact_vma(mm, vm_start, vm_end); if (vma && vma->vm_file) { @@ -38,10 +38,10 @@ index f8595e8..cb8eda0 100644 ino = inode->i_ino; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c -index 187b3b5..e03793e 100644 +index fa95ab2..d440354 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c -@@ -281,7 +281,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) +@@ -298,7 +298,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) const char *name = NULL; if (file) { @@ -53,7 +53,7 @@ index 187b3b5..e03793e 100644 dev = inode->i_sb->s_dev; ino = inode->i_ino; pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; -@@ -1505,7 +1508,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) +@@ -1576,7 +1579,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) struct proc_maps_private *proc_priv = &numa_priv->proc_maps; struct vm_area_struct *vma = v; struct numa_maps *md = &numa_priv->md; @@ -63,10 +63,10 @@ index 187b3b5..e03793e 100644 struct mm_walk walk = { .hugetlb_entry = gather_hugetlb_stats, diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c -index e0d64c9..7aa92db 100644 +index faacb0c..17b43be 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c -@@ -160,7 +160,10 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, +@@ -163,7 +163,10 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, file = vma->vm_file; if (file) { @@ -79,10 +79,10 @@ index e0d64c9..7aa92db 100644 ino = inode->i_ino; pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; diff --git a/include/linux/mm.h b/include/linux/mm.h -index 00bad77..cc616b0 100644 +index 516e149..ddd5454 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -1183,6 +1183,28 @@ static inline int fixup_user_fault(struct task_struct *tsk, +@@ -1217,6 +1217,28 @@ static inline int fixup_user_fault(struct task_struct *tsk, } #endif @@ -112,10 +112,10 @@ index 00bad77..cc616b0 100644 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index f8d1492..c3a3760 100644 +index 624b78b..1be91c5 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h -@@ -272,6 +272,7 @@ struct vm_region { +@@ -269,6 +269,7 @@ struct vm_region { unsigned long vm_top; /* region allocated to here */ unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ struct file *vm_file; /* the backing file or NULL */ @@ -123,7 +123,7 @@ index f8d1492..c3a3760 100644 int vm_usage; /* region usage count (access under nommu_region_sem) */ bool vm_icache_flushed : 1; /* true if the icache has been flushed for -@@ -346,6 +347,7 @@ struct vm_area_struct { +@@ -343,6 +344,7 @@ struct vm_area_struct { unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */ struct file * vm_file; /* File we map to (can be NULL). */ @@ -132,7 +132,7 @@ index f8d1492..c3a3760 100644 #ifndef CONFIG_MMU diff --git a/kernel/fork.c b/kernel/fork.c -index f97f2c4..3ef40d6 100644 +index 2e391c7..6c4215c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -464,7 +464,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) @@ -158,10 +158,10 @@ index 2ed4319..e3a53f5 100644 obj-y += init-mm.o diff --git a/mm/filemap.c b/mm/filemap.c -index 1bb0076..8eaece8 100644 +index bc94386..25cdcef 100644 --- a/mm/filemap.c +++ b/mm/filemap.c -@@ -2128,7 +2128,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +@@ -2209,7 +2209,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) int ret = VM_FAULT_LOCKED; sb_start_pagefault(inode->i_sb); @@ -171,10 +171,10 @@ index 1bb0076..8eaece8 100644 if (page->mapping != inode->i_mapping) { unlock_page(page); diff --git a/mm/memory.c b/mm/memory.c -index deb679c..df2ce3e 100644 +index 635451a..f589a4e 100644 --- a/mm/memory.c +++ b/mm/memory.c -@@ -2035,7 +2035,7 @@ static inline int wp_page_reuse(struct mm_struct *mm, +@@ -2042,7 +2042,7 @@ static inline int wp_page_reuse(struct mm_struct *mm, } if (!page_mkwrite) @@ -184,10 +184,10 @@ index deb679c..df2ce3e 100644 return VM_FAULT_WRITE; diff --git a/mm/mmap.c b/mm/mmap.c -index 2ce04a6..f555c0a 100644 +index 2f2415a..d5943a1 100644 --- a/mm/mmap.c +++ b/mm/mmap.c -@@ -275,7 +275,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) +@@ -290,7 +290,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) @@ -196,7 +196,7 @@ index 2ce04a6..f555c0a 100644 mpol_put(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); return next; -@@ -887,7 +887,7 @@ again: remove_next = 1 + (end > next->vm_end); +@@ -909,7 +909,7 @@ again: remove_next = 1 + (end > next->vm_end); if (remove_next) { if (file) { uprobe_munmap(next, next->vm_start, next->vm_end); @@ -205,7 +205,7 @@ index 2ce04a6..f555c0a 100644 } if (next->anon_vma) anon_vma_merge(vma, next); -@@ -1681,8 +1681,8 @@ out: +@@ -1683,8 +1683,8 @@ out: return addr; unmap_and_free_vma: @@ -215,7 +215,7 @@ index 2ce04a6..f555c0a 100644 /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); -@@ -2488,7 +2488,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2479,7 +2479,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, goto out_free_mpol; if (new->vm_file) @@ -224,7 +224,7 @@ index 2ce04a6..f555c0a 100644 if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); -@@ -2507,7 +2507,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2498,7 +2498,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, if (new->vm_ops && new->vm_ops->close) new->vm_ops->close(new); if (new->vm_file) @@ -233,7 +233,7 @@ index 2ce04a6..f555c0a 100644 unlink_anon_vmas(new); out_free_mpol: mpol_put(vma_policy(new)); -@@ -2649,7 +2649,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, +@@ -2640,7 +2640,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, struct vm_area_struct *vma; unsigned long populate = 0; unsigned long ret = -EINVAL; @@ -241,7 +241,7 @@ index 2ce04a6..f555c0a 100644 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. " "See Documentation/vm/remap_file_pages.txt.\n", -@@ -2693,10 +2692,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, +@@ -2684,10 +2683,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, munlock_vma_pages_range(vma, start, start + size); } @@ -254,7 +254,7 @@ index 2ce04a6..f555c0a 100644 out: up_write(&mm->mmap_sem); if (populate) -@@ -2966,7 +2965,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -2958,7 +2957,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) @@ -264,7 +264,7 @@ index 2ce04a6..f555c0a 100644 new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); diff --git a/mm/nommu.c b/mm/nommu.c -index 92be862..29179f7 100644 +index fbf6f0f1..1a4a06d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -671,7 +671,7 @@ static void __put_nommu_region(struct vm_region *region) diff --git a/debian/patches/features/all/aufs4/aufs4-standalone.patch b/debian/patches/features/all/aufs4/aufs4-standalone.patch index 1f7cf9a3c..02962ce33 100644 --- a/debian/patches/features/all/aufs4/aufs4-standalone.patch +++ b/debian/patches/features/all/aufs4/aufs4-standalone.patch @@ -1,7 +1,7 @@ From: J. R. Okajima -Date: Sat Nov 21 10:43:50 2015 +0900 +Date: Wed Feb 10 03:44:15 2016 +0900 Subject: aufs4.x-rcN standalone patch -Origin: https://github.com/sfjro/aufs4-standalone/tree/980697121e7bb079411664d77caa8f1e489f344c +Origin: https://github.com/sfjro/aufs4-standalone/tree/5fecf5788b797f74b83e36ca921dc1c4bfdcbad9 Bug-Debian: https://bugs.debian.org/541828 Patch headers added by debian/patches/features/all/aufs4/gen-patch @@ -9,7 +9,7 @@ Patch headers added by debian/patches/features/all/aufs4/gen-patch aufs4.x-rcN standalone patch diff --git a/fs/dcache.c b/fs/dcache.c -index 8aa7f26..f997345 100644 +index 63b22d7..4dcd979 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1272,6 +1272,7 @@ rename_retry: @@ -21,7 +21,7 @@ index 8aa7f26..f997345 100644 /* * Search for at least 1 mount point in the dentry's subdirs. diff --git a/fs/exec.c b/fs/exec.c -index b06623a..b9206c5 100644 +index dcd4ac7..f4e040f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -103,6 +103,7 @@ bool path_noexec(const struct path *path) @@ -33,7 +33,7 @@ index b06623a..b9206c5 100644 #ifdef CONFIG_USELIB /* diff --git a/fs/file_table.c b/fs/file_table.c -index ad17e05..df66450 100644 +index ad17e05..38e046a 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -147,6 +147,7 @@ over: @@ -44,7 +44,23 @@ index ad17e05..df66450 100644 /** * alloc_file - allocate and initialize a 'struct file' -@@ -308,6 +309,7 @@ void put_filp(struct file *file) +@@ -258,6 +259,7 @@ void flush_delayed_fput(void) + { + delayed_fput(NULL); + } ++EXPORT_SYMBOL_GPL(flush_delayed_fput); + + static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); + +@@ -300,6 +302,7 @@ void __fput_sync(struct file *file) + } + + EXPORT_SYMBOL(fput); ++EXPORT_SYMBOL_GPL(__fput_sync); + + void put_filp(struct file *file) + { +@@ -308,6 +311,7 @@ void put_filp(struct file *file) file_free(file); } } @@ -53,7 +69,7 @@ index ad17e05..df66450 100644 void __init files_init(void) { diff --git a/fs/namespace.c b/fs/namespace.c -index 0570729..ec560d8 100644 +index 4fb1691..765ebc3 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -463,6 +463,7 @@ void __mnt_drop_write(struct vfsmount *mnt) @@ -64,7 +80,7 @@ index 0570729..ec560d8 100644 /** * mnt_drop_write - give up write access to a mount -@@ -1803,6 +1804,7 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, +@@ -1811,6 +1812,7 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, } return 0; } @@ -109,10 +125,10 @@ index d16b62c..06ca6bc 100644 int fsnotify_fasync(int fd, struct file *file, int on) { diff --git a/fs/notify/mark.c b/fs/notify/mark.c -index fc0df44..325b5c6 100644 +index cfcbf11..2c024528 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c -@@ -109,6 +109,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) +@@ -106,6 +106,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) mark->free_mark(mark); } } @@ -120,7 +136,7 @@ index fc0df44..325b5c6 100644 /* Calculate mask of events for a list of marks */ u32 fsnotify_recalc_mask(struct hlist_head *head) -@@ -208,6 +209,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, +@@ -211,6 +212,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, mutex_unlock(&group->mark_mutex); fsnotify_free_mark(mark); } @@ -128,28 +144,25 @@ index fc0df44..325b5c6 100644 void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock) { -@@ -392,6 +394,7 @@ err: - +@@ -391,6 +393,7 @@ err: + call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); return ret; } +EXPORT_SYMBOL_GPL(fsnotify_add_mark); int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct inode *inode, struct vfsmount *mnt, int allow_dups) -@@ -492,6 +495,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark, +@@ -491,3 +494,4 @@ void fsnotify_init_mark(struct fsnotify_mark *mark, atomic_set(&mark->refcnt, 1); mark->free_mark = free_mark; } +EXPORT_SYMBOL_GPL(fsnotify_init_mark); - - static int fsnotify_mark_destroy(void *ignored) - { diff --git a/fs/open.c b/fs/open.c -index b6f1e96..4ab0d4e 100644 +index 55bdc75..e0606c6 100644 --- a/fs/open.c +++ b/fs/open.c @@ -64,6 +64,7 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, - mutex_unlock(&dentry->d_inode->i_mutex); + inode_unlock(dentry->d_inode); return ret; } +EXPORT_SYMBOL_GPL(do_truncate); @@ -165,10 +178,10 @@ index b6f1e96..4ab0d4e 100644 static int do_dentry_open(struct file *f, struct inode *inode, diff --git a/fs/read_write.c b/fs/read_write.c -index fd0414e..8ace6ec 100644 +index d38892e..fbd7169 100644 --- a/fs/read_write.c +++ b/fs/read_write.c -@@ -504,6 +504,7 @@ vfs_readf_t vfs_readf(struct file *file) +@@ -543,6 +543,7 @@ vfs_readf_t vfs_readf(struct file *file) return new_sync_read; return ERR_PTR(-ENOSYS); } @@ -176,7 +189,7 @@ index fd0414e..8ace6ec 100644 vfs_writef_t vfs_writef(struct file *file) { -@@ -515,6 +516,7 @@ vfs_writef_t vfs_writef(struct file *file) +@@ -554,6 +555,7 @@ vfs_writef_t vfs_writef(struct file *file) return new_sync_write; return ERR_PTR(-ENOSYS); } @@ -185,10 +198,10 @@ index fd0414e..8ace6ec 100644 ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) { diff --git a/fs/splice.c b/fs/splice.c -index 218d188..aa4f0d1 100644 +index 93aee51..5316378 100644 --- a/fs/splice.c +++ b/fs/splice.c -@@ -1115,6 +1115,7 @@ long do_splice_from(struct pipe_inode_info *pipe, struct file *out, +@@ -1121,6 +1121,7 @@ long do_splice_from(struct pipe_inode_info *pipe, struct file *out, return splice_write(pipe, out, ppos, len, flags); } @@ -196,7 +209,7 @@ index 218d188..aa4f0d1 100644 /* * Attempt to initiate a splice from a file to a pipe. -@@ -1141,6 +1142,7 @@ long do_splice_to(struct file *in, loff_t *ppos, +@@ -1147,6 +1148,7 @@ long do_splice_to(struct file *in, loff_t *ppos, return splice_read(in, ppos, pipe, len, flags); } @@ -205,7 +218,7 @@ index 218d188..aa4f0d1 100644 /** * splice_direct_to_actor - splices data directly between two non-pipes diff --git a/fs/xattr.c b/fs/xattr.c -index 9b932b9..44c457a 100644 +index 07d0e47..20c8815 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -207,6 +207,7 @@ vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, @@ -214,13 +227,22 @@ index 9b932b9..44c457a 100644 } +EXPORT_SYMBOL_GPL(vfs_getxattr_alloc); - /* Compare an extended attribute value with the given value */ - int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name, + ssize_t + vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) +diff --git a/kernel/task_work.c b/kernel/task_work.c +index 53fa971..f80d564 100644 +--- a/kernel/task_work.c ++++ b/kernel/task_work.c +@@ -118,3 +118,4 @@ void task_work_run(void) + } while (work); + } + } ++EXPORT_SYMBOL_GPL(task_work_run); diff --git a/security/commoncap.c b/security/commoncap.c -index 1832cf7..987ff5f 100644 +index 48071ed..bf82414 100644 --- a/security/commoncap.c +++ b/security/commoncap.c -@@ -1053,12 +1053,14 @@ int cap_mmap_addr(unsigned long addr) +@@ -1058,12 +1058,14 @@ int cap_mmap_addr(unsigned long addr) } return ret; } @@ -256,7 +278,7 @@ index 03c1652..b00aa76 100644 int devcgroup_inode_mknod(int mode, dev_t dev) { diff --git a/security/security.c b/security/security.c -index 46f405c..54488b0 100644 +index e8ffd92..6f07901 100644 --- a/security/security.c +++ b/security/security.c @@ -433,6 +433,7 @@ int security_path_rmdir(struct path *dir, struct dentry *dentry) diff --git a/debian/patches/features/all/cgroups-Allow-memory-cgroup-support-to-be-included-b.patch b/debian/patches/features/all/cgroups-Allow-memory-cgroup-support-to-be-included-b.patch index aacd458be..450d13ed6 100644 --- a/debian/patches/features/all/cgroups-Allow-memory-cgroup-support-to-be-included-b.patch +++ b/debian/patches/features/all/cgroups-Allow-memory-cgroup-support-to-be-included-b.patch @@ -13,7 +13,7 @@ Signed-off-by: Ben Hutchings --- --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt -@@ -588,8 +588,8 @@ bytes respectively. Such letter suffixes +@@ -597,8 +597,8 @@ bytes respectively. Such letter suffixes ccw_timeout_log [S390] See Documentation/s390/CommonIO for details. @@ -26,9 +26,9 @@ Signed-off-by: Ben Hutchings a single hierarchy --- a/init/Kconfig +++ b/init/Kconfig -@@ -1010,6 +1010,14 @@ config MEMCG - Provides a memory resource controller that manages both anonymous - memory and page cache. (See Documentation/cgroups/memory.txt) +@@ -945,6 +945,14 @@ config MEMCG + help + Provides control over the memory footprint of tasks in a cgroup. +config MEMCG_DISABLED + bool "Memory Resource Controller disabled by default" @@ -39,11 +39,11 @@ Signed-off-by: Ben Hutchings + enabled using the kernel parameter "cgroup_enable=memory". + config MEMCG_SWAP - bool "Memory Resource Controller Swap Extension" + bool "Swap controller" depends on MEMCG && SWAP --- a/kernel/cgroup.c +++ b/kernel/cgroup.c -@@ -5216,7 +5216,11 @@ int __init cgroup_init_early(void) +@@ -5269,7 +5269,11 @@ int __init cgroup_init_early(void) return 0; } @@ -55,7 +55,7 @@ Signed-off-by: Ben Hutchings /** * cgroup_init - cgroup initialization -@@ -5691,7 +5695,7 @@ out_free: +@@ -5729,7 +5733,7 @@ out_free: kfree(pathbuf); } @@ -64,7 +64,7 @@ Signed-off-by: Ben Hutchings { struct cgroup_subsys *ss; char *token; -@@ -5705,13 +5709,27 @@ static int __init cgroup_disable(char *s +@@ -5743,13 +5747,27 @@ static int __init cgroup_disable(char *s if (strcmp(token, ss->name) && strcmp(token, ss->legacy_name)) continue; diff --git a/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch deleted file mode 100644 index 9f5e33381..000000000 --- a/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch +++ /dev/null @@ -1,43 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 1 Mar 2013 11:17:42 +0100 -Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -In exit_pi_state_list() we have the following locking construct: - - spin_lock(&hb->lock); - raw_spin_lock_irq(&curr->pi_lock); - - ... - spin_unlock(&hb->lock); - -In !RT this works, but on RT the migrate_enable() function which is -called from spin_unlock() sees atomic context due to the held pi_lock -and just decrements the migrate_disable_atomic counter of the -task. Now the next call to migrate_disable() sees the counter being -negative and issues a warning. That check should be in -migrate_enable() already. - -Fix this by dropping pi_lock before unlocking hb->lock and reaquire -pi_lock after that again. This is safe as the loop code reevaluates -head again under the pi_lock. - -Reported-by: Yong Zhang -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/futex.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -815,7 +815,9 @@ void exit_pi_state_list(struct task_stru - * task still owns the PI-state: - */ - if (head->next != next) { -+ raw_spin_unlock_irq(&curr->pi_lock); - spin_unlock(&hb->lock); -+ raw_spin_lock_irq(&curr->pi_lock); - continue; - } - diff --git a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch deleted file mode 100644 index 0f260d5ff..000000000 --- a/debian/patches/features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch +++ /dev/null @@ -1,86 +0,0 @@ -From: "Yadi.hu" -Date: Wed, 10 Dec 2014 10:32:09 +0800 -Subject: ARM: enable irq in translation/section permission fault handlers -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Probably happens on all ARM, with -CONFIG_PREEMPT_RT_FULL -CONFIG_DEBUG_ATOMIC_SLEEP - -This simple program.... - -int main() { - *((char*)0xc0001000) = 0; -}; - -[ 512.742724] BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 -[ 512.743000] in_atomic(): 0, irqs_disabled(): 128, pid: 994, name: a -[ 512.743217] INFO: lockdep is turned off. -[ 512.743360] irq event stamp: 0 -[ 512.743482] hardirqs last enabled at (0): [< (null)>] (null) -[ 512.743714] hardirqs last disabled at (0): [] copy_process+0x3b0/0x11c0 -[ 512.744013] softirqs last enabled at (0): [] copy_process+0x3b0/0x11c0 -[ 512.744303] softirqs last disabled at (0): [< (null)>] (null) -[ 512.744631] [] (unwind_backtrace+0x0/0x104) -[ 512.745001] [] (dump_stack+0x20/0x24) -[ 512.745355] [] (__might_sleep+0x1dc/0x1e0) -[ 512.745717] [] (rt_spin_lock+0x34/0x6c) -[ 512.746073] [] (do_force_sig_info+0x34/0xf0) -[ 512.746457] [] (force_sig_info+0x18/0x1c) -[ 512.746829] [] (__do_user_fault+0x9c/0xd8) -[ 512.747185] [] (do_bad_area+0x7c/0x94) -[ 512.747536] [] (do_sect_fault+0x40/0x48) -[ 512.747898] [] (do_DataAbort+0x40/0xa0) -[ 512.748181] Exception stack(0xecaa1fb0 to 0xecaa1ff8) - -Oxc0000000 belongs to kernel address space, user task can not be -allowed to access it. For above condition, correct result is that -test case should receive a “segment fault” and exits but not stacks. - -the root cause is commit 02fe2845d6a8 ("avoid enabling interrupts in -prefetch/data abort handlers"),it deletes irq enable block in Data -abort assemble code and move them into page/breakpiont/alignment fault -handlers instead. But author does not enable irq in translation/section -permission fault handlers. ARM disables irq when it enters exception/ -interrupt mode, if kernel doesn't enable irq, it would be still disabled -during translation/section permission fault. - -We see the above splat because do_force_sig_info is still called with -IRQs off, and that code eventually does a: - - spin_lock_irqsave(&t->sighand->siglock, flags); - -As this is architecture independent code, and we've not seen any other -need for other arch to have the siglock converted to raw lock, we can -conclude that we should enable irq for ARM translation/section -permission exception. - - -Signed-off-by: Yadi.hu -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/mm/fault.c | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/arch/arm/mm/fault.c -+++ b/arch/arm/mm/fault.c -@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, - if (addr < TASK_SIZE) - return do_page_fault(addr, fsr, regs); - -+ if (interrupts_enabled(regs)) -+ local_irq_enable(); -+ - if (user_mode(regs)) - goto bad_area; - -@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr, - static int - do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) - { -+ if (interrupts_enabled(regs)) -+ local_irq_enable(); -+ - do_bad_area(addr, fsr, regs); - return 0; - } diff --git a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch deleted file mode 100644 index 89b0fbed5..000000000 --- a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch +++ /dev/null @@ -1,77 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 21 Mar 2013 19:01:05 +0100 -Subject: printk: Drop the logbuf_lock more often -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The lock is hold with irgs off. The latency drops 500us+ on my arm bugs -with a "full" buffer after executing "dmesg" on the shell. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 27 ++++++++++++++++++++++++++- - 1 file changed, 26 insertions(+), 1 deletion(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -1262,6 +1262,7 @@ static int syslog_print_all(char __user - { - char *text; - int len = 0; -+ int attempts = 0; - - text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); - if (!text) -@@ -1273,7 +1274,14 @@ static int syslog_print_all(char __user - u64 seq; - u32 idx; - enum log_flags prev; -- -+ int num_msg; -+try_again: -+ attempts++; -+ if (attempts > 10) { -+ len = -EBUSY; -+ goto out; -+ } -+ num_msg = 0; - if (clear_seq < log_first_seq) { - /* messages are gone, move to first available one */ - clear_seq = log_first_seq; -@@ -1294,6 +1302,14 @@ static int syslog_print_all(char __user - prev = msg->flags; - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ raw_spin_unlock_irq(&logbuf_lock); -+ raw_spin_lock_irq(&logbuf_lock); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } - - /* move first record forward until length fits into the buffer */ -@@ -1307,6 +1323,14 @@ static int syslog_print_all(char __user - prev = msg->flags; - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ raw_spin_unlock_irq(&logbuf_lock); -+ raw_spin_lock_irq(&logbuf_lock); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } - - /* last message fitting into this dump */ -@@ -1347,6 +1371,7 @@ static int syslog_print_all(char __user - clear_seq = log_next_seq; - clear_idx = log_next_idx; - } -+out: - raw_spin_unlock_irq(&logbuf_lock); - - kfree(text); diff --git a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch deleted file mode 100644 index ec9652767..000000000 --- a/debian/patches/features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Marcelo Tosatti -Date: Wed, 8 Apr 2015 20:33:25 -0300 -Subject: KVM: lapic: mark LAPIC timer handler as irqsafe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Since lapic timer handler only wakes up a simple waitqueue, -it can be executed from hardirq context. - -Reduces average cyclictest latency by 3us. - -Signed-off-by: Marcelo Tosatti -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/kvm/lapic.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/x86/kvm/lapic.c -+++ b/arch/x86/kvm/lapic.c -@@ -1801,6 +1801,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc - hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - apic->lapic_timer.timer.function = apic_timer_fn; -+ apic->lapic_timer.timer.irqsafe = 1; - - /* - * APIC is created enabled. This will prevent kvm_lapic_set_base from diff --git a/debian/patches/features/all/rt/KVM-use-simple-waitqueue-for-vcpu-wq.patch b/debian/patches/features/all/rt/KVM-use-simple-waitqueue-for-vcpu-wq.patch deleted file mode 100644 index f7a4bff0e..000000000 --- a/debian/patches/features/all/rt/KVM-use-simple-waitqueue-for-vcpu-wq.patch +++ /dev/null @@ -1,335 +0,0 @@ -From: Marcelo Tosatti -Date: Wed, 8 Apr 2015 20:33:24 -0300 -Subject: KVM: use simple waitqueue for vcpu->wq -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The problem: - -On -RT, an emulated LAPIC timer instances has the following path: - -1) hard interrupt -2) ksoftirqd is scheduled -3) ksoftirqd wakes up vcpu thread -4) vcpu thread is scheduled - -This extra context switch introduces unnecessary latency in the -LAPIC path for a KVM guest. - -The solution: - -Allow waking up vcpu thread from hardirq context, -thus avoiding the need for ksoftirqd to be scheduled. - -Normal waitqueues make use of spinlocks, which on -RT -are sleepable locks. Therefore, waking up a waitqueue -waiter involves locking a sleeping lock, which -is not allowed from hard interrupt context. - -cyclictest command line: -# cyclictest -m -n -q -p99 -l 1000000 -h60 -D 1m - -This patch reduces the average latency in my tests from 14us to 11us. - -Signed-off-by: Marcelo Tosatti -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/kvm/arm.c | 8 ++++---- - arch/arm/kvm/psci.c | 4 ++-- - arch/powerpc/include/asm/kvm_host.h | 4 ++-- - arch/powerpc/kvm/book3s_hv.c | 23 +++++++++++------------ - arch/s390/include/asm/kvm_host.h | 2 +- - arch/s390/kvm/interrupt.c | 4 ++-- - arch/x86/kvm/lapic.c | 6 +++--- - include/linux/kvm_host.h | 4 ++-- - virt/kvm/async_pf.c | 4 ++-- - virt/kvm/kvm_main.c | 16 ++++++++-------- - 10 files changed, 37 insertions(+), 38 deletions(-) - ---- a/arch/arm/kvm/arm.c -+++ b/arch/arm/kvm/arm.c -@@ -498,18 +498,18 @@ static void kvm_arm_resume_guest(struct - struct kvm_vcpu *vcpu; - - kvm_for_each_vcpu(i, vcpu, kvm) { -- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); -+ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu); - - vcpu->arch.pause = false; -- wake_up_interruptible(wq); -+ swait_wake_interruptible(wq); - } - } - - static void vcpu_sleep(struct kvm_vcpu *vcpu) - { -- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); -+ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu); - -- wait_event_interruptible(*wq, ((!vcpu->arch.power_off) && -+ swait_event_interruptible(*wq, ((!vcpu->arch.power_off) && - (!vcpu->arch.pause))); - } - ---- a/arch/arm/kvm/psci.c -+++ b/arch/arm/kvm/psci.c -@@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(st - { - struct kvm *kvm = source_vcpu->kvm; - struct kvm_vcpu *vcpu = NULL; -- wait_queue_head_t *wq; -+ struct swait_head *wq; - unsigned long cpu_id; - unsigned long context_id; - phys_addr_t target_pc; -@@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(st - smp_mb(); /* Make sure the above is visible */ - - wq = kvm_arch_vcpu_wq(vcpu); -- wake_up_interruptible(wq); -+ swait_wake_interruptible(wq); - - return PSCI_RET_SUCCESS; - } ---- a/arch/powerpc/include/asm/kvm_host.h -+++ b/arch/powerpc/include/asm/kvm_host.h -@@ -286,7 +286,7 @@ struct kvmppc_vcore { - struct list_head runnable_threads; - struct list_head preempt_list; - spinlock_t lock; -- wait_queue_head_t wq; -+ struct swait_head wq; - spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */ - u64 stolen_tb; - u64 preempt_tb; -@@ -626,7 +626,7 @@ struct kvm_vcpu_arch { - u8 prodded; - u32 last_inst; - -- wait_queue_head_t *wqp; -+ struct swait_head *wqp; - struct kvmppc_vcore *vcore; - int ret; - int trap; ---- a/arch/powerpc/kvm/book3s_hv.c -+++ b/arch/powerpc/kvm/book3s_hv.c -@@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu) - static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) - { - int cpu; -- wait_queue_head_t *wqp; -+ struct swait_head *wqp; - - wqp = kvm_arch_vcpu_wq(vcpu); -- if (waitqueue_active(wqp)) { -- wake_up_interruptible(wqp); -+ if (swaitqueue_active(wqp)) { -+ swait_wake_interruptible(wqp); - ++vcpu->stat.halt_wakeup; - } - -@@ -707,8 +707,8 @@ int kvmppc_pseries_do_hcall(struct kvm_v - tvcpu->arch.prodded = 1; - smp_mb(); - if (vcpu->arch.ceded) { -- if (waitqueue_active(&vcpu->wq)) { -- wake_up_interruptible(&vcpu->wq); -+ if (swaitqueue_active(&vcpu->wq)) { -+ swait_wake_interruptible(&vcpu->wq); - vcpu->stat.halt_wakeup++; - } - } -@@ -1447,7 +1447,7 @@ static struct kvmppc_vcore *kvmppc_vcore - INIT_LIST_HEAD(&vcore->runnable_threads); - spin_lock_init(&vcore->lock); - spin_lock_init(&vcore->stoltb_lock); -- init_waitqueue_head(&vcore->wq); -+ init_swait_head(&vcore->wq); - vcore->preempt_tb = TB_NIL; - vcore->lpcr = kvm->arch.lpcr; - vcore->first_vcpuid = core * threads_per_subcore; -@@ -2519,10 +2519,9 @@ static void kvmppc_vcore_blocked(struct - { - struct kvm_vcpu *vcpu; - int do_sleep = 1; -+ DEFINE_SWAITER(wait); - -- DEFINE_WAIT(wait); -- -- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); -+ swait_prepare(&vc->wq, &wait, TASK_INTERRUPTIBLE); - - /* - * Check one last time for pending exceptions and ceded state after -@@ -2536,7 +2535,7 @@ static void kvmppc_vcore_blocked(struct - } - - if (!do_sleep) { -- finish_wait(&vc->wq, &wait); -+ swait_finish(&vc->wq, &wait); - return; - } - -@@ -2544,7 +2543,7 @@ static void kvmppc_vcore_blocked(struct - trace_kvmppc_vcore_blocked(vc, 0); - spin_unlock(&vc->lock); - schedule(); -- finish_wait(&vc->wq, &wait); -+ swait_finish(&vc->wq, &wait); - spin_lock(&vc->lock); - vc->vcore_state = VCORE_INACTIVE; - trace_kvmppc_vcore_blocked(vc, 1); -@@ -2600,7 +2599,7 @@ static int kvmppc_run_vcpu(struct kvm_ru - kvmppc_start_thread(vcpu, vc); - trace_kvm_guest_enter(vcpu); - } else if (vc->vcore_state == VCORE_SLEEPING) { -- wake_up(&vc->wq); -+ swait_wake(&vc->wq); - } - - } ---- a/arch/s390/include/asm/kvm_host.h -+++ b/arch/s390/include/asm/kvm_host.h -@@ -427,7 +427,7 @@ struct kvm_s390_irq_payload { - struct kvm_s390_local_interrupt { - spinlock_t lock; - struct kvm_s390_float_interrupt *float_int; -- wait_queue_head_t *wq; -+ struct swait_head *wq; - atomic_t *cpuflags; - DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); - struct kvm_s390_irq_payload irq; ---- a/arch/s390/kvm/interrupt.c -+++ b/arch/s390/kvm/interrupt.c -@@ -868,13 +868,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu - - void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) - { -- if (waitqueue_active(&vcpu->wq)) { -+ if (swaitqueue_active(&vcpu->wq)) { - /* - * The vcpu gave up the cpu voluntarily, mark it as a good - * yield-candidate. - */ - vcpu->preempted = true; -- wake_up_interruptible(&vcpu->wq); -+ swait_wake_interruptible(&vcpu->wq); - vcpu->stat.halt_wakeup++; - } - } ---- a/arch/x86/kvm/lapic.c -+++ b/arch/x86/kvm/lapic.c -@@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_ - static void apic_timer_expired(struct kvm_lapic *apic) - { - struct kvm_vcpu *vcpu = apic->vcpu; -- wait_queue_head_t *q = &vcpu->wq; -+ struct swait_head *q = &vcpu->wq; - struct kvm_timer *ktimer = &apic->lapic_timer; - - if (atomic_read(&apic->lapic_timer.pending)) -@@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kv - atomic_inc(&apic->lapic_timer.pending); - kvm_set_pending_timer(vcpu); - -- if (waitqueue_active(q)) -- wake_up_interruptible(q); -+ if (swaitqueue_active(q)) -+ swait_wake_interruptible(q); - - if (apic_lvtt_tscdeadline(apic)) - ktimer->expired_tscdeadline = ktimer->tscdeadline; ---- a/include/linux/kvm_host.h -+++ b/include/linux/kvm_host.h -@@ -243,7 +243,7 @@ struct kvm_vcpu { - int fpu_active; - int guest_fpu_loaded, guest_xcr0_loaded; - unsigned char fpu_counter; -- wait_queue_head_t wq; -+ struct swait_head wq; - struct pid *pid; - int sigset_active; - sigset_t sigset; -@@ -794,7 +794,7 @@ static inline bool kvm_arch_has_assigned - } - #endif - --static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) -+static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) - { - #ifdef __KVM_HAVE_ARCH_WQP - return vcpu->arch.wqp; ---- a/virt/kvm/async_pf.c -+++ b/virt/kvm/async_pf.c -@@ -98,8 +98,8 @@ static void async_pf_execute(struct work - * This memory barrier pairs with prepare_to_wait's set_current_state() - */ - smp_mb(); -- if (waitqueue_active(&vcpu->wq)) -- wake_up_interruptible(&vcpu->wq); -+ if (swaitqueue_active(&vcpu->wq)) -+ swait_wake_interruptible(&vcpu->wq); - - mmput(mm); - kvm_put_kvm(vcpu->kvm); ---- a/virt/kvm/kvm_main.c -+++ b/virt/kvm/kvm_main.c -@@ -227,7 +227,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, - vcpu->vcpu_id = id; - vcpu->pid = NULL; - vcpu->halt_poll_ns = 0; -- init_waitqueue_head(&vcpu->wq); -+ init_swait_head(&vcpu->wq); - kvm_async_pf_vcpu_init(vcpu); - - vcpu->pre_pcpu = -1; -@@ -1999,7 +1999,7 @@ static int kvm_vcpu_check_block(struct k - void kvm_vcpu_block(struct kvm_vcpu *vcpu) - { - ktime_t start, cur; -- DEFINE_WAIT(wait); -+ DEFINE_SWAITER(wait); - bool waited = false; - u64 block_ns; - -@@ -2024,7 +2024,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp - kvm_arch_vcpu_blocking(vcpu); - - for (;;) { -- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); -+ swait_prepare(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); - - if (kvm_vcpu_check_block(vcpu) < 0) - break; -@@ -2033,7 +2033,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp - schedule(); - } - -- finish_wait(&vcpu->wq, &wait); -+ swait_finish(&vcpu->wq, &wait); - cur = ktime_get(); - - kvm_arch_vcpu_unblocking(vcpu); -@@ -2065,11 +2065,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu - { - int me; - int cpu = vcpu->cpu; -- wait_queue_head_t *wqp; -+ struct swait_head *wqp; - - wqp = kvm_arch_vcpu_wq(vcpu); -- if (waitqueue_active(wqp)) { -- wake_up_interruptible(wqp); -+ if (swaitqueue_active(wqp)) { -+ swait_wake_interruptible(wqp); - ++vcpu->stat.halt_wakeup; - } - -@@ -2170,7 +2170,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *m - continue; - if (vcpu == me) - continue; -- if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) -+ if (swaitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) - continue; - if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) - continue; diff --git a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch deleted file mode 100644 index 8a9e17bed..000000000 --- a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch +++ /dev/null @@ -1,174 +0,0 @@ -From: Steven Rostedt -Date: Wed, 13 Feb 2013 09:26:05 -0500 -Subject: acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -We hit the following bug with 3.6-rt: - -[ 5.898990] BUG: scheduling while atomic: swapper/3/0/0x00000002 -[ 5.898991] no locks held by swapper/3/0. -[ 5.898993] Modules linked in: -[ 5.898996] Pid: 0, comm: swapper/3 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1 -[ 5.898997] Call Trace: -[ 5.899011] [] __schedule_bug+0x67/0x90 -[ 5.899028] [] __schedule+0x793/0x7a0 -[ 5.899032] [] ? debug_rt_mutex_print_deadlock+0x50/0x200 -[ 5.899034] [] schedule+0x29/0x70 -[ 5.899036] BUG: scheduling while atomic: swapper/7/0/0x00000002 -[ 5.899037] no locks held by swapper/7/0. -[ 5.899039] [] rt_spin_lock_slowlock+0xe5/0x2f0 -[ 5.899040] Modules linked in: -[ 5.899041] -[ 5.899045] [] ? _raw_spin_unlock_irqrestore+0x38/0x90 -[ 5.899046] Pid: 0, comm: swapper/7 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1 -[ 5.899047] Call Trace: -[ 5.899049] [] rt_spin_lock+0x16/0x40 -[ 5.899052] [] __schedule_bug+0x67/0x90 -[ 5.899054] [] ? notifier_call_chain+0x80/0x80 -[ 5.899056] [] __schedule+0x793/0x7a0 -[ 5.899059] [] acpi_os_acquire_lock+0x1f/0x23 -[ 5.899062] [] ? debug_rt_mutex_print_deadlock+0x50/0x200 -[ 5.899068] [] acpi_write_bit_register+0x33/0xb0 -[ 5.899071] [] schedule+0x29/0x70 -[ 5.899072] [] ? acpi_read_bit_register+0x33/0x51 -[ 5.899074] [] rt_spin_lock_slowlock+0xe5/0x2f0 -[ 5.899077] [] acpi_idle_enter_bm+0x8a/0x28e -[ 5.899079] [] ? _raw_spin_unlock_irqrestore+0x38/0x90 -[ 5.899081] [] ? this_cpu_load+0x1a/0x30 -[ 5.899083] [] rt_spin_lock+0x16/0x40 -[ 5.899087] [] cpuidle_enter+0x19/0x20 -[ 5.899088] [] ? notifier_call_chain+0x80/0x80 -[ 5.899090] [] cpuidle_enter_state+0x17/0x50 -[ 5.899092] [] acpi_os_acquire_lock+0x1f/0x23 -[ 5.899094] [] cpuidle899101] [] ? - -As the acpi code disables interrupts in acpi_idle_enter_bm, and calls -code that grabs the acpi lock, it causes issues as the lock is currently -in RT a sleeping lock. - -The lock was converted from a raw to a sleeping lock due to some -previous issues, and tests that showed it didn't seem to matter. -Unfortunately, it did matter for one of our boxes. - -This patch converts the lock back to a raw lock. I've run this code on a -few of my own machines, one being my laptop that uses the acpi quite -extensively. I've been able to suspend and resume without issues. - -[ tglx: Made the change exclusive for acpi_gbl_hardware_lock ] - -Signed-off-by: Steven Rostedt -Cc: John Kacur -Cc: Clark Williams -Link: http://lkml.kernel.org/r/1360765565.23152.5.camel@gandalf.local.home - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/acpi/acpica/acglobal.h | 2 +- - drivers/acpi/acpica/hwregs.c | 4 ++-- - drivers/acpi/acpica/hwxface.c | 4 ++-- - drivers/acpi/acpica/utmutex.c | 4 ++-- - include/acpi/platform/aclinux.h | 15 +++++++++++++++ - 5 files changed, 22 insertions(+), 7 deletions(-) - ---- a/drivers/acpi/acpica/acglobal.h -+++ b/drivers/acpi/acpica/acglobal.h -@@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pen - * interrupt level - */ - ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */ --ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ -+ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ - ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock); - - /* Mutex for _OSI support */ ---- a/drivers/acpi/acpica/hwregs.c -+++ b/drivers/acpi/acpica/hwregs.c -@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(vo - ACPI_BITMASK_ALL_FIXED_STATUS, - ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); - -- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); -+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); - - /* Clear the fixed events in PM1 A/B */ - - status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, - ACPI_BITMASK_ALL_FIXED_STATUS); - -- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); -+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); - - if (ACPI_FAILURE(status)) { - goto exit; ---- a/drivers/acpi/acpica/hwxface.c -+++ b/drivers/acpi/acpica/hwxface.c -@@ -374,7 +374,7 @@ acpi_status acpi_write_bit_register(u32 - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - -- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); -+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); - - /* - * At this point, we know that the parent register is one of the -@@ -435,7 +435,7 @@ acpi_status acpi_write_bit_register(u32 - - unlock_and_exit: - -- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); -+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); - return_ACPI_STATUS(status); - } - ---- a/drivers/acpi/acpica/utmutex.c -+++ b/drivers/acpi/acpica/utmutex.c -@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(voi - return_ACPI_STATUS (status); - } - -- status = acpi_os_create_lock (&acpi_gbl_hardware_lock); -+ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } -@@ -156,7 +156,7 @@ void acpi_ut_mutex_terminate(void) - /* Delete the spinlocks */ - - acpi_os_delete_lock(acpi_gbl_gpe_lock); -- acpi_os_delete_lock(acpi_gbl_hardware_lock); -+ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); - acpi_os_delete_lock(acpi_gbl_reference_count_lock); - - /* Delete the reader/writer lock */ ---- a/include/acpi/platform/aclinux.h -+++ b/include/acpi/platform/aclinux.h -@@ -127,6 +127,7 @@ - - #define acpi_cache_t struct kmem_cache - #define acpi_spinlock spinlock_t * -+#define acpi_raw_spinlock raw_spinlock_t * - #define acpi_cpu_flags unsigned long - - /* Use native linux version of acpi_os_allocate_zeroed */ -@@ -145,6 +146,20 @@ - #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id - #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock - -+#define acpi_os_create_raw_lock(__handle) \ -+({ \ -+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ -+ \ -+ if (lock) { \ -+ *(__handle) = lock; \ -+ raw_spin_lock_init(*(__handle)); \ -+ } \ -+ lock ? AE_OK : AE_NO_MEMORY; \ -+ }) -+ -+#define acpi_os_delete_raw_lock(__handle) kfree(__handle) -+ -+ - /* - * OSL interfaces used by debugger/disassembler - */ diff --git a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch b/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch deleted file mode 100644 index 23c3e0a7c..000000000 --- a/debian/patches/features/all/rt/arch-arm64-Add-lazy-preempt-support.patch +++ /dev/null @@ -1,104 +0,0 @@ -From: Anders Roxell -Date: Thu, 14 May 2015 17:52:17 +0200 -Subject: arch/arm64: Add lazy preempt support -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -arm64 is missing support for PREEMPT_RT. The main feature which is -lacking is support for lazy preemption. The arch-specific entry code, -thread information structure definitions, and associated data tables -have to be extended to provide this support. Then the Kconfig file has -to be extended to indicate the support is available, and also to -indicate that support for full RT preemption is now available. - -Signed-off-by: Anders Roxell ---- - arch/arm64/Kconfig | 1 + - arch/arm64/include/asm/thread_info.h | 3 +++ - arch/arm64/kernel/asm-offsets.c | 1 + - arch/arm64/kernel/entry.S | 13 ++++++++++--- - 4 files changed, 15 insertions(+), 3 deletions(-) - ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -76,6 +76,7 @@ config ARM64 - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP - select HAVE_RCU_TABLE_FREE -+ select HAVE_PREEMPT_LAZY - select HAVE_SYSCALL_TRACEPOINTS - select IOMMU_DMA if IOMMU_SUPPORT - select IRQ_DOMAIN ---- a/arch/arm64/include/asm/thread_info.h -+++ b/arch/arm64/include/asm/thread_info.h -@@ -49,6 +49,7 @@ struct thread_info { - mm_segment_t addr_limit; /* address limit */ - struct task_struct *task; /* main task structure */ - int preempt_count; /* 0 => preemptable, <0 => bug */ -+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ - int cpu; /* cpu */ - }; - -@@ -103,6 +104,7 @@ static inline struct thread_info *curren - #define TIF_NEED_RESCHED 1 - #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ - #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ -+#define TIF_NEED_RESCHED_LAZY 4 - #define TIF_NOHZ 7 - #define TIF_SYSCALL_TRACE 8 - #define TIF_SYSCALL_AUDIT 9 -@@ -118,6 +120,7 @@ static inline struct thread_info *curren - #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) - #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) - #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_NOHZ (1 << TIF_NOHZ) - #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) ---- a/arch/arm64/kernel/asm-offsets.c -+++ b/arch/arm64/kernel/asm-offsets.c -@@ -35,6 +35,7 @@ int main(void) - BLANK(); - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); -+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); - DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); ---- a/arch/arm64/kernel/entry.S -+++ b/arch/arm64/kernel/entry.S -@@ -363,11 +363,16 @@ ENDPROC(el1_sync) - #ifdef CONFIG_PREEMPT - get_thread_info tsk - ldr w24, [tsk, #TI_PREEMPT] // get preempt count -- cbnz w24, 1f // preempt count != 0 -+ cbnz w24, 2f // preempt count != 0 - ldr x0, [tsk, #TI_FLAGS] // get flags -- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? -- bl el1_preempt -+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? -+ -+ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count -+ cbnz w24, 2f // preempt lazy count != 0 -+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? - 1: -+ bl el1_preempt -+2: - #endif - #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on -@@ -381,6 +386,7 @@ ENDPROC(el1_irq) - 1: bl preempt_schedule_irq // irq en/disable is done inside - ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS - tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? -+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? - ret x24 - #endif - -@@ -625,6 +631,7 @@ ENDPROC(cpu_switch_to) - */ - work_pending: - tbnz x1, #TIF_NEED_RESCHED, work_resched -+ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched - /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ - ldr x2, [sp, #S_PSTATE] - mov x0, sp // 'regs' diff --git a/debian/patches/features/all/rt/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch b/debian/patches/features/all/rt/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch deleted file mode 100644 index b45f44534..000000000 --- a/debian/patches/features/all/rt/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch +++ /dev/null @@ -1,77 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 22 Jan 2016 21:33:39 +0100 -Subject: arm+arm64: lazy-preempt: add TIF_NEED_RESCHED_LAZY to _TIF_WORK_MASK -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -_TIF_WORK_MASK is used to check for TIF_NEED_RESCHED so we need to check -for TIF_NEED_RESCHED_LAZY here, too. - -Reported-by: Grygorii Strashko -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/include/asm/thread_info.h | 7 ++++--- - arch/arm/kernel/entry-common.S | 9 +++++++-- - arch/arm64/include/asm/thread_info.h | 3 ++- - 3 files changed, 13 insertions(+), 6 deletions(-) - ---- a/arch/arm/include/asm/thread_info.h -+++ b/arch/arm/include/asm/thread_info.h -@@ -143,8 +143,8 @@ extern int vfp_restore_user_hwstate(stru - #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ - #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ - #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ --#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ --#define TIF_NEED_RESCHED_LAZY 8 -+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ -+#define TIF_NEED_RESCHED_LAZY 7 - - #define TIF_NOHZ 12 /* in adaptive nohz mode */ - #define TIF_USING_IWMMXT 17 -@@ -170,7 +170,8 @@ extern int vfp_restore_user_hwstate(stru - * Change these and you break ASM code in entry-common.S - */ - #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ -- _TIF_NOTIFY_RESUME | _TIF_UPROBE) -+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ -+ _TIF_NEED_RESCHED_LAZY) - - #endif /* __KERNEL__ */ - #endif /* __ASM_ARM_THREAD_INFO_H */ ---- a/arch/arm/kernel/entry-common.S -+++ b/arch/arm/kernel/entry-common.S -@@ -36,7 +36,9 @@ - UNWIND(.cantunwind ) - disable_irq_notrace @ disable interrupts - ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing -- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK -+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) -+ bne fast_work_pending -+ tst r1, #_TIF_SECCOMP - bne fast_work_pending - - /* perform architecture specific actions before user return */ -@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall) - str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 - disable_irq_notrace @ disable interrupts - ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing -- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK -+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) -+ bne do_slower_path -+ tst r1, #_TIF_SECCOMP - beq no_work_pending -+do_slower_path: - UNWIND(.fnend ) - ENDPROC(ret_fast_syscall) - ---- a/arch/arm64/include/asm/thread_info.h -+++ b/arch/arm64/include/asm/thread_info.h -@@ -129,7 +129,8 @@ static inline struct thread_info *curren - #define _TIF_32BIT (1 << TIF_32BIT) - - #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ -- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) -+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ -+ _TIF_NEED_RESCHED_LAZY) - - #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch deleted file mode 100644 index 0f3ecd995..000000000 --- a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch +++ /dev/null @@ -1,147 +0,0 @@ -From: Benedikt Spranger -Date: Sat, 6 Mar 2010 17:47:10 +0100 -Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Setup and remove the interrupt handler in clock event mode selection. -This avoids calling the (shared) interrupt handler when the device is -not used. - -Signed-off-by: Benedikt Spranger -Signed-off-by: Thomas Gleixner -[bigeasy: redo the patch with NR_IRQS_LEGACY which is probably required since -commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6. -Patch based on what Sami Pietikäinen suggested]. -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/clocksource/timer-atmel-pit.c | 17 +++++++++-------- - drivers/clocksource/timer-atmel-st.c | 32 ++++++++++++++++++++++---------- - 2 files changed, 31 insertions(+), 18 deletions(-) - ---- a/drivers/clocksource/timer-atmel-pit.c -+++ b/drivers/clocksource/timer-atmel-pit.c -@@ -96,15 +96,24 @@ static int pit_clkevt_shutdown(struct cl - - /* disable irq, leaving the clocksource active */ - pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN); -+ free_irq(data->irq, data); - return 0; - } - -+static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id); - /* - * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) - */ - static int pit_clkevt_set_periodic(struct clock_event_device *dev) - { - struct pit_data *data = clkevt_to_pit_data(dev); -+ int ret; -+ -+ ret = request_irq(data->irq, at91sam926x_pit_interrupt, -+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -+ "at91_tick", data); -+ if (ret) -+ panic(pr_fmt("Unable to setup IRQ\n")); - - /* update clocksource counter */ - data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); -@@ -181,7 +190,6 @@ static void __init at91sam926x_pit_commo - { - unsigned long pit_rate; - unsigned bits; -- int ret; - - /* - * Use our actual MCK to figure out how many MCK/16 ticks per -@@ -206,13 +214,6 @@ static void __init at91sam926x_pit_commo - data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; - clocksource_register_hz(&data->clksrc, pit_rate); - -- /* Set up irq handler */ -- ret = request_irq(data->irq, at91sam926x_pit_interrupt, -- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -- "at91_tick", data); -- if (ret) -- panic(pr_fmt("Unable to setup IRQ\n")); -- - /* Set up and register clockevents */ - data->clkevt.name = "pit"; - data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC; ---- a/drivers/clocksource/timer-atmel-st.c -+++ b/drivers/clocksource/timer-atmel-st.c -@@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_ - last_crtr = read_CRTR(); - } - -+static int atmel_st_irq; -+ - static int clkevt32k_shutdown(struct clock_event_device *evt) - { - clkdev32k_disable_and_flush_irq(); - irqmask = 0; - regmap_write(regmap_st, AT91_ST_IER, irqmask); -+ free_irq(atmel_st_irq, regmap_st); - return 0; - } - - static int clkevt32k_set_oneshot(struct clock_event_device *dev) - { -+ int ret; -+ - clkdev32k_disable_and_flush_irq(); - -+ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, -+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -+ "at91_tick", regmap_st); -+ if (ret) -+ panic(pr_fmt("Unable to setup IRQ\n")); -+ - /* - * ALM for oneshot irqs, set by next_event() - * before 32 seconds have passed. -@@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct - - static int clkevt32k_set_periodic(struct clock_event_device *dev) - { -+ int ret; -+ - clkdev32k_disable_and_flush_irq(); - -+ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, -+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -+ "at91_tick", regmap_st); -+ if (ret) -+ panic(pr_fmt("Unable to setup IRQ\n")); -+ - /* PIT for periodic irqs; fixed rate of 1/HZ */ - irqmask = AT91_ST_PITS; - regmap_write(regmap_st, AT91_ST_PIMR, timer_latch); -@@ -198,7 +217,7 @@ static void __init atmel_st_timer_init(s - { - struct clk *sclk; - unsigned int sclk_rate, val; -- int irq, ret; -+ int ret; - - regmap_st = syscon_node_to_regmap(node); - if (IS_ERR(regmap_st)) -@@ -210,17 +229,10 @@ static void __init atmel_st_timer_init(s - regmap_read(regmap_st, AT91_ST_SR, &val); - - /* Get the interrupts property */ -- irq = irq_of_parse_and_map(node, 0); -- if (!irq) -+ atmel_st_irq = irq_of_parse_and_map(node, 0); -+ if (!atmel_st_irq) - panic(pr_fmt("Unable to get IRQ from DT\n")); - -- /* Make IRQs happen for the system timer */ -- ret = request_irq(irq, at91rm9200_timer_interrupt, -- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -- "at91_tick", regmap_st); -- if (ret) -- panic(pr_fmt("Unable to setup IRQ\n")); -- - sclk = of_clk_get(node, 0); - if (IS_ERR(sclk)) - panic(pr_fmt("Unable to get slow clock\n")); diff --git a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch deleted file mode 100644 index a93a22a81..000000000 --- a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Thomas Gleixner -Date: Sat, 1 May 2010 18:29:35 +0200 -Subject: ARM: at91: tclib: Default to tclib timer for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -RT is not too happy about the shared timer interrupt in AT91 -devices. Default to tclib timer for RT. - -Signed-off-by: Thomas Gleixner - ---- - drivers/misc/Kconfig | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/misc/Kconfig -+++ b/drivers/misc/Kconfig -@@ -54,6 +54,7 @@ config AD525X_DPOT_SPI - config ATMEL_TCLIB - bool "Atmel AT32/AT91 Timer/Counter Library" - depends on (AVR32 || ARCH_AT91) -+ default y if PREEMPT_RT_FULL - help - Select this if you want a library to allocate the Timer/Counter - blocks found on many Atmel processors. This facilitates using -@@ -86,7 +87,7 @@ config ATMEL_TCB_CLKSRC_BLOCK - config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - bool "TC Block use 32 KiHz clock" - depends on ATMEL_TCB_CLKSRC -- default y -+ default y if !PREEMPT_RT_FULL - help - Select this to use 32 KiHz base clock rate as TC block clock - source for clock events. diff --git a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch deleted file mode 100644 index 34643c709..000000000 --- a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch +++ /dev/null @@ -1,408 +0,0 @@ -From: Frank Rowand -Date: Mon, 19 Sep 2011 14:51:14 -0700 -Subject: arm: Convert arm boot_lock to raw -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The arm boot_lock is used by the secondary processor startup code. The locking -task is the idle thread, which has idle->sched_class == &idle_sched_class. -idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the -lock, the attempt to wake it when the lock becomes available will fail: - -try_to_wake_up() - ... - activate_task() - enqueue_task() - p->sched_class->enqueue_task(rq, p, flags) - -Fix by converting boot_lock to a raw spin lock. - -Signed-off-by: Frank Rowand -Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com -Signed-off-by: Thomas Gleixner ---- - arch/arm/mach-exynos/platsmp.c | 12 ++++++------ - arch/arm/mach-hisi/platmcpm.c | 22 +++++++++++----------- - arch/arm/mach-omap2/omap-smp.c | 10 +++++----- - arch/arm/mach-prima2/platsmp.c | 10 +++++----- - arch/arm/mach-qcom/platsmp.c | 10 +++++----- - arch/arm/mach-spear/platsmp.c | 10 +++++----- - arch/arm/mach-sti/platsmp.c | 10 +++++----- - arch/arm/plat-versatile/platsmp.c | 10 +++++----- - 8 files changed, 47 insertions(+), 47 deletions(-) - ---- a/arch/arm/mach-exynos/platsmp.c -+++ b/arch/arm/mach-exynos/platsmp.c -@@ -230,7 +230,7 @@ static void __iomem *scu_base_addr(void) - return (void __iomem *)(S5P_VA_SCU); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void exynos_secondary_init(unsigned int cpu) - { -@@ -243,8 +243,8 @@ static void exynos_secondary_init(unsign - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) -@@ -308,7 +308,7 @@ static int exynos_boot_secondary(unsigne - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -335,7 +335,7 @@ static int exynos_boot_secondary(unsigne - - if (timeout == 0) { - printk(KERN_ERR "cpu1 power enable failed"); -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - return -ETIMEDOUT; - } - } -@@ -381,7 +381,7 @@ static int exynos_boot_secondary(unsigne - * calibrations, then wait for it to finish - */ - fail: -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? ret : 0; - } ---- a/arch/arm/mach-hisi/platmcpm.c -+++ b/arch/arm/mach-hisi/platmcpm.c -@@ -61,7 +61,7 @@ - - static void __iomem *sysctrl, *fabric; - static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - static u32 fabric_phys_addr; - /* - * [0]: bootwrapper physical address -@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned - if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) - return -EINVAL; - -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - - if (hip04_cpu_table[cluster][cpu]) - goto out; -@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned - - out: - hip04_cpu_table[cluster][cpu]++; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - - return 0; - } -@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l - cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - hip04_cpu_table[cluster][cpu]--; - if (hip04_cpu_table[cluster][cpu] == 1) { - /* A power_up request went ahead of us. */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - return; - } else if (hip04_cpu_table[cluster][cpu] > 1) { - pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); -@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l - } - - last_man = hip04_cluster_is_down(cluster); -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - if (last_man) { - /* Since it's Cortex A15, disable L2 prefetching. */ - asm volatile( -@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l - cpu >= HIP04_MAX_CPUS_PER_CLUSTER); - - count = TIMEOUT_MSEC / POLL_MSEC; -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - for (tries = 0; tries < count; tries++) { - if (hip04_cpu_table[cluster][cpu]) - goto err; -@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l - data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); - if (data & CORE_WFI_STATUS(cpu)) - break; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - /* Wait for clean L2 when the whole cluster is down. */ - msleep(POLL_MSEC); -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - } - if (tries >= count) - goto err; -@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l - goto err; - if (hip04_cluster_is_down(cluster)) - hip04_set_snoop_filter(cluster, 0); -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - return 1; - err: -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - return 0; - } - #endif ---- a/arch/arm/mach-omap2/omap-smp.c -+++ b/arch/arm/mach-omap2/omap-smp.c -@@ -43,7 +43,7 @@ - /* SCU base address */ - static void __iomem *scu_base; - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - void __iomem *omap4_get_scu_base(void) - { -@@ -74,8 +74,8 @@ static void omap4_secondary_init(unsigne - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -89,7 +89,7 @@ static int omap4_boot_secondary(unsigned - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * Update the AuxCoreBoot0 with boot state for secondary core. -@@ -166,7 +166,7 @@ static int omap4_boot_secondary(unsigned - * Now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return 0; - } ---- a/arch/arm/mach-prima2/platsmp.c -+++ b/arch/arm/mach-prima2/platsmp.c -@@ -22,7 +22,7 @@ - - static void __iomem *clk_base; - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void sirfsoc_secondary_init(unsigned int cpu) - { -@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsig - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static const struct of_device_id clk_ids[] = { -@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsign - /* make sure write buffer is drained */ - mb(); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsign - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } ---- a/arch/arm/mach-qcom/platsmp.c -+++ b/arch/arm/mach-qcom/platsmp.c -@@ -46,7 +46,7 @@ - - extern void secondary_startup_arm(void); - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - #ifdef CONFIG_HOTPLUG_CPU - static void qcom_cpu_die(unsigned int cpu) -@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int scss_release_secondary(unsigned int cpu) -@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * Send the secondary CPU a soft interrupt, thereby causing -@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return ret; - } ---- a/arch/arm/mach-spear/platsmp.c -+++ b/arch/arm/mach-spear/platsmp.c -@@ -32,7 +32,7 @@ static void write_pen_release(int val) - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void __iomem *scu_base = IOMEM(VA_SCU_BASE); - -@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(uns - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsi - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsi - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } ---- a/arch/arm/mach-sti/platsmp.c -+++ b/arch/arm/mach-sti/platsmp.c -@@ -35,7 +35,7 @@ static void write_pen_release(int val) - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void sti_secondary_init(unsigned int cpu) - { -@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned i - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned i - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } ---- a/arch/arm/plat-versatile/platsmp.c -+++ b/arch/arm/plat-versatile/platsmp.c -@@ -30,7 +30,7 @@ static void write_pen_release(int val) - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - void versatile_secondary_init(unsigned int cpu) - { -@@ -43,8 +43,8 @@ void versatile_secondary_init(unsigned i - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -55,7 +55,7 @@ int versatile_boot_secondary(unsigned in - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * This is really belt and braces; we hold unintended secondary -@@ -85,7 +85,7 @@ int versatile_boot_secondary(unsigned in - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } diff --git a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch deleted file mode 100644 index b3c95d08a..000000000 --- a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch +++ /dev/null @@ -1,174 +0,0 @@ -Subject: arm: Enable highmem for rt -From: Thomas Gleixner -Date: Wed, 13 Feb 2013 11:03:11 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -fixup highmem for ARM. - -Signed-off-by: Thomas Gleixner ---- - arch/arm/include/asm/switch_to.h | 8 +++++ - arch/arm/mm/highmem.c | 56 +++++++++++++++++++++++++++++++++------ - include/linux/highmem.h | 1 - 3 files changed, 57 insertions(+), 8 deletions(-) - ---- a/arch/arm/include/asm/switch_to.h -+++ b/arch/arm/include/asm/switch_to.h -@@ -3,6 +3,13 @@ - - #include - -+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM -+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); -+#else -+static inline void -+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -+#endif -+ - /* - * For v7 SMP cores running a preemptible kernel we may be pre-empted - * during a TLB maintenance operation, so execute an inner-shareable dsb -@@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(s - #define switch_to(prev,next,last) \ - do { \ - __complete_pending_tlbi(); \ -+ switch_kmaps(prev, next); \ - last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ - } while (0) - ---- a/arch/arm/mm/highmem.c -+++ b/arch/arm/mm/highmem.c -@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsig - return *ptep; - } - -+static unsigned int fixmap_idx(int type) -+{ -+ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+} -+ - void *kmap(struct page *page) - { - might_sleep(); -@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap); - - void *kmap_atomic(struct page *page) - { -+ pte_t pte = mk_pte(page, kmap_prot); - unsigned int idx; - unsigned long vaddr; - void *kmap; - int type; - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); -@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page) - - type = kmap_atomic_idx_push(); - -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - vaddr = __fix_to_virt(idx); - #ifdef CONFIG_DEBUG_HIGHMEM - /* -@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page) - * in place, so the contained TLB flush ensures the TLB is updated - * with the new mapping. - */ -- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_fixmap_pte(idx, pte); - - return (void *)vaddr; - } -@@ -106,10 +115,13 @@ void __kunmap_atomic(void *kvaddr) - - if (kvaddr >= (void *)FIXADDR_START) { - type = kmap_atomic_idx(); -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - - if (cache_is_vivt()) - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr != __fix_to_virt(idx)); - #else -@@ -122,28 +134,56 @@ void __kunmap_atomic(void *kvaddr) - kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); - } - pagefault_enable(); -- preempt_enable(); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(__kunmap_atomic); - - void *kmap_atomic_pfn(unsigned long pfn) - { -+ pte_t pte = pfn_pte(pfn, kmap_prot); - unsigned long vaddr; - int idx, type; - struct page *page = pfn_to_page(pfn); - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - type = kmap_atomic_idx_push(); -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - vaddr = __fix_to_virt(idx); - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(get_fixmap_pte(vaddr))); - #endif -- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_fixmap_pte(idx, pte); - - return (void *)vaddr; - } -+#if defined CONFIG_PREEMPT_RT_FULL -+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ int i; -+ -+ /* -+ * Clear @prev's kmap_atomic mappings -+ */ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = fixmap_idx(i); -+ -+ set_fixmap_pte(idx, __pte(0)); -+ } -+ /* -+ * Restore @next_p's kmap_atomic mappings -+ */ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = fixmap_idx(i); -+ -+ if (!pte_none(next_p->kmap_pte[i])) -+ set_fixmap_pte(idx, next_p->kmap_pte[i]); -+ } -+} -+#endif ---- a/include/linux/highmem.h -+++ b/include/linux/highmem.h -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - - #include - diff --git a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch b/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch deleted file mode 100644 index 402ecba37..000000000 --- a/debian/patches/features/all/rt/arm-highmem-flush-tlb-on-unmap.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 11 Mar 2013 21:37:27 +0100 -Subject: arm/highmem: Flush tlb on unmap -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The tlb should be flushed on unmap and thus make the mapping entry -invalid. This is only done in the non-debug case which does not look -right. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/mm/highmem.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/arm/mm/highmem.c -+++ b/arch/arm/mm/highmem.c -@@ -112,10 +112,10 @@ void __kunmap_atomic(void *kvaddr) - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr != __fix_to_virt(idx)); -- set_fixmap_pte(idx, __pte(0)); - #else - (void) idx; /* to kill a warning */ - #endif -+ set_fixmap_pte(idx, __pte(0)); - kmap_atomic_idx_pop(); - } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { - /* this address was obtained through kmap_high_get() */ diff --git a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch deleted file mode 100644 index b446f91da..000000000 --- a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch +++ /dev/null @@ -1,106 +0,0 @@ -Subject: arm: Add support for lazy preemption -From: Thomas Gleixner -Date: Wed, 31 Oct 2012 12:04:11 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Implement the arm pieces for lazy preempt. - -Signed-off-by: Thomas Gleixner ---- - arch/arm/Kconfig | 1 + - arch/arm/include/asm/thread_info.h | 3 +++ - arch/arm/kernel/asm-offsets.c | 1 + - arch/arm/kernel/entry-armv.S | 13 +++++++++++-- - arch/arm/kernel/signal.c | 3 ++- - 5 files changed, 18 insertions(+), 3 deletions(-) - ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -68,6 +68,7 @@ config ARM - select HAVE_PERF_EVENTS - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP -+ select HAVE_PREEMPT_LAZY - select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) - select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_SYSCALL_TRACEPOINTS ---- a/arch/arm/include/asm/thread_info.h -+++ b/arch/arm/include/asm/thread_info.h -@@ -49,6 +49,7 @@ struct cpu_context_save { - struct thread_info { - unsigned long flags; /* low level flags */ - int preempt_count; /* 0 => preemptable, <0 => bug */ -+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ - mm_segment_t addr_limit; /* address limit */ - struct task_struct *task; /* main task structure */ - __u32 cpu; /* cpu */ -@@ -143,6 +144,7 @@ extern int vfp_restore_user_hwstate(stru - #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ - #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ - #define TIF_SECCOMP 7 /* seccomp syscall filtering active */ -+#define TIF_NEED_RESCHED_LAZY 8 - - #define TIF_NOHZ 12 /* in adaptive nohz mode */ - #define TIF_USING_IWMMXT 17 -@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(stru - #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) - #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) - #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_UPROBE (1 << TIF_UPROBE) - #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) ---- a/arch/arm/kernel/asm-offsets.c -+++ b/arch/arm/kernel/asm-offsets.c -@@ -65,6 +65,7 @@ int main(void) - BLANK(); - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); -+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); - DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); ---- a/arch/arm/kernel/entry-armv.S -+++ b/arch/arm/kernel/entry-armv.S -@@ -215,11 +215,18 @@ ENDPROC(__dabt_svc) - #ifdef CONFIG_PREEMPT - get_thread_info tsk - ldr r8, [tsk, #TI_PREEMPT] @ get preempt count -- ldr r0, [tsk, #TI_FLAGS] @ get flags - teq r8, #0 @ if preempt count != 0 -+ bne 1f @ return from exeption -+ ldr r0, [tsk, #TI_FLAGS] @ get flags -+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set -+ blne svc_preempt @ preempt! -+ -+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count -+ teq r8, #0 @ if preempt lazy count != 0 - movne r0, #0 @ force flags to 0 -- tst r0, #_TIF_NEED_RESCHED -+ tst r0, #_TIF_NEED_RESCHED_LAZY - blne svc_preempt -+1: - #endif - - svc_exit r5, irq = 1 @ return from exception -@@ -234,6 +241,8 @@ ENDPROC(__irq_svc) - 1: bl preempt_schedule_irq @ irq en/disable is done inside - ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS - tst r0, #_TIF_NEED_RESCHED -+ bne 1b -+ tst r0, #_TIF_NEED_RESCHED_LAZY - reteq r8 @ go again - b 1b - #endif ---- a/arch/arm/kernel/signal.c -+++ b/arch/arm/kernel/signal.c -@@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, un - */ - trace_hardirqs_off(); - do { -- if (likely(thread_flags & _TIF_NEED_RESCHED)) { -+ if (likely(thread_flags & (_TIF_NEED_RESCHED | -+ _TIF_NEED_RESCHED_LAZY))) { - schedule(); - } else { - if (unlikely(!user_mode(regs))) diff --git a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch b/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch deleted file mode 100644 index 5c3911bdd..000000000 --- a/debian/patches/features/all/rt/arm-unwind-use_raw_lock.patch +++ /dev/null @@ -1,84 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 20 Sep 2013 14:31:54 +0200 -Subject: arm/unwind: use a raw_spin_lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Mostly unwind is done with irqs enabled however SLUB may call it with -irqs disabled while creating a new SLUB cache. - -I had system freeze while loading a module which called -kmem_cache_create() on init. That means SLUB's __slab_alloc() disabled -interrupts and then - -->new_slab_objects() - ->new_slab() - ->setup_object() - ->setup_object_debug() - ->init_tracking() - ->set_track() - ->save_stack_trace() - ->save_stack_trace_tsk() - ->walk_stackframe() - ->unwind_frame() - ->unwind_find_idx() - =>spin_lock_irqsave(&unwind_lock); - - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/kernel/unwind.c | 14 +++++++------- - 1 file changed, 7 insertions(+), 7 deletions(-) - ---- a/arch/arm/kernel/unwind.c -+++ b/arch/arm/kernel/unwind.c -@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_u - static const struct unwind_idx *__origin_unwind_idx; - extern const struct unwind_idx __stop_unwind_idx[]; - --static DEFINE_SPINLOCK(unwind_lock); -+static DEFINE_RAW_SPINLOCK(unwind_lock); - static LIST_HEAD(unwind_tables); - - /* Convert a prel31 symbol to an absolute address */ -@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_f - /* module unwind tables */ - struct unwind_table *table; - -- spin_lock_irqsave(&unwind_lock, flags); -+ raw_spin_lock_irqsave(&unwind_lock, flags); - list_for_each_entry(table, &unwind_tables, list) { - if (addr >= table->begin_addr && - addr < table->end_addr) { -@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_f - break; - } - } -- spin_unlock_irqrestore(&unwind_lock, flags); -+ raw_spin_unlock_irqrestore(&unwind_lock, flags); - } - - pr_debug("%s: idx = %p\n", __func__, idx); -@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(un - tab->begin_addr = text_addr; - tab->end_addr = text_addr + text_size; - -- spin_lock_irqsave(&unwind_lock, flags); -+ raw_spin_lock_irqsave(&unwind_lock, flags); - list_add_tail(&tab->list, &unwind_tables); -- spin_unlock_irqrestore(&unwind_lock, flags); -+ raw_spin_unlock_irqrestore(&unwind_lock, flags); - - return tab; - } -@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_tabl - if (!tab) - return; - -- spin_lock_irqsave(&unwind_lock, flags); -+ raw_spin_lock_irqsave(&unwind_lock, flags); - list_del(&tab->list); -- spin_unlock_irqrestore(&unwind_lock, flags); -+ raw_spin_unlock_irqrestore(&unwind_lock, flags); - - kfree(tab); - } diff --git a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch b/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch deleted file mode 100644 index 7fdde7167..000000000 --- a/debian/patches/features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch +++ /dev/null @@ -1,24 +0,0 @@ -Subject: arm64/xen: Make XEN depend on !RT -From: Thomas Gleixner -Date: Mon, 12 Oct 2015 11:18:40 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -It's not ready and probably never will be, unless xen folks have a -look at it. - -Signed-off-by: Thomas Gleixner ---- - arch/arm64/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -562,7 +562,7 @@ config XEN_DOM0 - - config XEN - bool "Xen guest support on ARM64" -- depends on ARM64 && OF -+ depends on ARM64 && OF && !PREEMPT_RT_FULL - select SWIOTLB_XEN - help - Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. diff --git a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch deleted file mode 100644 index 4ea55e626..000000000 --- a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch +++ /dev/null @@ -1,65 +0,0 @@ -From: Steven Rostedt -Date: Fri, 3 Jul 2009 08:44:29 -0500 -Subject: ata: Do not disable interrupts in ide code for preempt-rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use the local_irq_*_nort variants. - -Signed-off-by: Steven Rostedt -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - drivers/ata/libata-sff.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - ---- a/drivers/ata/libata-sff.c -+++ b/drivers/ata/libata-sff.c -@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str - unsigned long flags; - unsigned int consumed; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - return consumed; - } -@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu - unsigned long flags; - - /* FIXME: use a bounce buffer */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - buf = kmap_atomic(page); - - /* do the actual data transfer */ -@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu - do_write); - - kunmap_atomic(buf); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } else { - buf = page_address(page); - ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, -@@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_ - unsigned long flags; - - /* FIXME: use bounce buffer */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - buf = kmap_atomic(page); - - /* do the actual data transfer */ -@@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_ - count, rw); - - kunmap_atomic(buf); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } else { - buf = page_address(page); - consumed = ap->ops->sff_data_xfer(dev, buf + offset, diff --git a/debian/patches/features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch b/debian/patches/features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch deleted file mode 100644 index 2eeebdab7..000000000 --- a/debian/patches/features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch +++ /dev/null @@ -1,84 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Sat, 3 May 2014 11:00:29 +0200 -Subject: blk-mq: revert raw locks, post pone notifier to POST_DEAD -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The blk_mq_cpu_notify_lock should be raw because some CPU down levels -are called with interrupts off. The notifier itself calls currently one -function that is blk_mq_hctx_notify(). -That function acquires the ctx->lock lock which is sleeping and I would -prefer to keep it that way. That function only moves IO-requests from -the CPU that is going offline to another CPU and it is currently the -only one. Therefore I revert the list lock back to sleeping spinlocks -and let the notifier run at POST_DEAD time. - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-mq-cpu.c | 17 ++++++++++------- - block/blk-mq.c | 2 +- - 2 files changed, 11 insertions(+), 8 deletions(-) - ---- a/block/blk-mq-cpu.c -+++ b/block/blk-mq-cpu.c -@@ -16,7 +16,7 @@ - #include "blk-mq.h" - - static LIST_HEAD(blk_mq_cpu_notify_list); --static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); -+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); - - static int blk_mq_main_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -@@ -25,7 +25,10 @@ static int blk_mq_main_cpu_notify(struct - struct blk_mq_cpu_notifier *notify; - int ret = NOTIFY_OK; - -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ if (action != CPU_POST_DEAD) -+ return NOTIFY_OK; -+ -+ spin_lock(&blk_mq_cpu_notify_lock); - - list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { - ret = notify->notify(notify->data, action, cpu); -@@ -33,7 +36,7 @@ static int blk_mq_main_cpu_notify(struct - break; - } - -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - return ret; - } - -@@ -41,16 +44,16 @@ void blk_mq_register_cpu_notifier(struct - { - BUG_ON(!notifier->notify); - -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ spin_lock(&blk_mq_cpu_notify_lock); - list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - } - - void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) - { -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ spin_lock(&blk_mq_cpu_notify_lock); - list_del(¬ifier->list); -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - } - - void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -1640,7 +1640,7 @@ static int blk_mq_hctx_notify(void *data - { - struct blk_mq_hw_ctx *hctx = data; - -- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) -+ if (action == CPU_POST_DEAD) - return blk_mq_hctx_cpu_offline(hctx, cpu); - - /* diff --git a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch b/debian/patches/features/all/rt/block-blk-mq-use-swait.patch deleted file mode 100644 index 4b399c985..000000000 --- a/debian/patches/features/all/rt/block-blk-mq-use-swait.patch +++ /dev/null @@ -1,115 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 13 Feb 2015 11:01:26 +0100 -Subject: block: blk-mq: Use swait -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 -| in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6 -| 5 locks held by kworker/u257:6/255: -| #0: ("events_unbound"){.+.+.+}, at: [] process_one_work+0x171/0x5e0 -| #1: ((&entry->work)){+.+.+.}, at: [] process_one_work+0x171/0x5e0 -| #2: (&shost->scan_mutex){+.+.+.}, at: [] __scsi_add_device+0xa3/0x130 [scsi_mod] -| #3: (&set->tag_list_lock){+.+...}, at: [] blk_mq_init_queue+0x96a/0xa50 -| #4: (rcu_read_lock_sched){......}, at: [] percpu_ref_kill_and_confirm+0x1d/0x120 -| Preemption disabled at:[] blk_mq_freeze_queue_start+0x56/0x70 -| -| CPU: 2 PID: 255 Comm: kworker/u257:6 Not tainted 3.18.7-rt0+ #1 -| Workqueue: events_unbound async_run_entry_fn -| 0000000000000003 ffff8800bc29f998 ffffffff815b3a12 0000000000000000 -| 0000000000000000 ffff8800bc29f9b8 ffffffff8109aa16 ffff8800bc29fa28 -| ffff8800bc5d1bc8 ffff8800bc29f9e8 ffffffff815b8dd4 ffff880000000000 -| Call Trace: -| [] dump_stack+0x4f/0x7c -| [] __might_sleep+0x116/0x190 -| [] rt_spin_lock+0x24/0x60 -| [] __wake_up+0x29/0x60 -| [] blk_mq_usage_counter_release+0x1e/0x20 -| [] percpu_ref_kill_and_confirm+0x106/0x120 -| [] blk_mq_freeze_queue_start+0x56/0x70 -| [] blk_mq_update_tag_set_depth+0x40/0xd0 -| [] blk_mq_init_queue+0x98c/0xa50 -| [] scsi_mq_alloc_queue+0x20/0x60 [scsi_mod] -| [] scsi_alloc_sdev+0x2f5/0x370 [scsi_mod] -| [] scsi_probe_and_add_lun+0x9e4/0xdd0 [scsi_mod] -| [] __scsi_add_device+0x126/0x130 [scsi_mod] -| [] ata_scsi_scan_host+0xaf/0x200 [libata] -| [] async_port_probe+0x46/0x60 [libata] -| [] async_run_entry_fn+0x3b/0xf0 -| [] process_one_work+0x201/0x5e0 - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-core.c | 6 +++--- - block/blk-mq.c | 6 +++--- - include/linux/blkdev.h | 2 +- - 3 files changed, 7 insertions(+), 7 deletions(-) - ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -660,7 +660,7 @@ int blk_queue_enter(struct request_queue - if (!gfpflags_allow_blocking(gfp)) - return -EBUSY; - -- ret = wait_event_interruptible(q->mq_freeze_wq, -+ ret = swait_event_interruptible(q->mq_freeze_wq, - !atomic_read(&q->mq_freeze_depth) || - blk_queue_dying(q)); - if (blk_queue_dying(q)) -@@ -680,7 +680,7 @@ static void blk_queue_usage_counter_rele - struct request_queue *q = - container_of(ref, struct request_queue, q_usage_counter); - -- wake_up_all(&q->mq_freeze_wq); -+ swait_wake_all(&q->mq_freeze_wq); - } - - struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) -@@ -742,7 +742,7 @@ struct request_queue *blk_alloc_queue_no - q->bypass_depth = 1; - __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); - -- init_waitqueue_head(&q->mq_freeze_wq); -+ init_swait_head(&q->mq_freeze_wq); - - /* - * Init percpu_ref in atomic mode so that it's faster to shutdown. ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st - - static void blk_mq_freeze_queue_wait(struct request_queue *q) - { -- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); -+ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); - } - - /* -@@ -130,7 +130,7 @@ void blk_mq_unfreeze_queue(struct reques - WARN_ON_ONCE(freeze_depth < 0); - if (!freeze_depth) { - percpu_ref_reinit(&q->q_usage_counter); -- wake_up_all(&q->mq_freeze_wq); -+ swait_wake_all(&q->mq_freeze_wq); - } - } - EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); -@@ -149,7 +149,7 @@ void blk_mq_wake_waiters(struct request_ - * dying, we need to ensure that processes currently waiting on - * the queue are notified as well. - */ -- wake_up_all(&q->mq_freeze_wq); -+ swait_wake_all(&q->mq_freeze_wq); - } - - bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -456,7 +456,7 @@ struct request_queue { - struct throtl_data *td; - #endif - struct rcu_head rcu_head; -- wait_queue_head_t mq_freeze_wq; -+ struct swait_head mq_freeze_wq; - struct percpu_ref q_usage_counter; - struct list_head all_q_node; - diff --git a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch deleted file mode 100644 index 6b14f9846..000000000 --- a/debian/patches/features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch +++ /dev/null @@ -1,102 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 29 Jan 2015 15:10:08 +0100 -Subject: block/mq: don't complete requests via IPI -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The IPI runs in hardirq context and there are sleeping locks. This patch -moves the completion into a workqueue. - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-core.c | 3 +++ - block/blk-mq.c | 20 ++++++++++++++++++++ - include/linux/blk-mq.h | 1 + - include/linux/blkdev.h | 1 + - 4 files changed, 25 insertions(+) - ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q - - INIT_LIST_HEAD(&rq->queuelist); - INIT_LIST_HEAD(&rq->timeout_list); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); -+#endif - rq->cpu = -1; - rq->q = q; - rq->__sector = (sector_t) -1; ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -196,6 +196,9 @@ static void blk_mq_rq_ctx_init(struct re - rq->resid_len = 0; - rq->sense = NULL; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); -+#endif - INIT_LIST_HEAD(&rq->timeout_list); - rq->timeout = 0; - -@@ -325,6 +328,17 @@ void blk_mq_end_request(struct request * - } - EXPORT_SYMBOL(blk_mq_end_request); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+void __blk_mq_complete_request_remote_work(struct work_struct *work) -+{ -+ struct request *rq = container_of(work, struct request, work); -+ -+ rq->q->softirq_done_fn(rq); -+} -+ -+#else -+ - static void __blk_mq_complete_request_remote(void *data) - { - struct request *rq = data; -@@ -332,6 +346,8 @@ static void __blk_mq_complete_request_re - rq->q->softirq_done_fn(rq); - } - -+#endif -+ - static void blk_mq_ipi_complete_request(struct request *rq) - { - struct blk_mq_ctx *ctx = rq->mq_ctx; -@@ -348,10 +364,14 @@ static void blk_mq_ipi_complete_request( - shared = cpus_share_cache(cpu, ctx->cpu); - - if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ schedule_work_on(ctx->cpu, &rq->work); -+#else - rq->csd.func = __blk_mq_complete_request_remote; - rq->csd.info = rq; - rq->csd.flags = 0; - smp_call_function_single_async(ctx->cpu, &rq->csd); -+#endif - } else { - rq->q->softirq_done_fn(rq); - } ---- a/include/linux/blk-mq.h -+++ b/include/linux/blk-mq.h -@@ -212,6 +212,7 @@ static inline u16 blk_mq_unique_tag_to_t - - struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); - struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -+void __blk_mq_complete_request_remote_work(struct work_struct *work); - - int blk_mq_request_started(struct request *rq); - void blk_mq_start_request(struct request *rq); ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -89,6 +89,7 @@ struct request { - struct list_head queuelist; - union { - struct call_single_data csd; -+ struct work_struct work; - unsigned long fifo_time; - }; - diff --git a/debian/patches/features/all/rt/block-mq-drop-per-ctx-cpu_lock.patch b/debian/patches/features/all/rt/block-mq-drop-per-ctx-cpu_lock.patch deleted file mode 100644 index fc569fe40..000000000 --- a/debian/patches/features/all/rt/block-mq-drop-per-ctx-cpu_lock.patch +++ /dev/null @@ -1,125 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 18 Feb 2015 18:37:26 +0100 -Subject: block/mq: drop per ctx cpu_lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -While converting the get_cpu() to get_cpu_light() I added a cpu lock to -ensure the same code is not invoked twice on the same CPU. And now I run -into this: - -| kernel BUG at kernel/locking/rtmutex.c:996! -| invalid opcode: 0000 [#1] PREEMPT SMP -| CPU0: 13 PID: 75 Comm: kworker/u258:0 Tainted: G I 3.18.7-rt1.5+ #12 -| Workqueue: writeback bdi_writeback_workfn (flush-8:0) -| task: ffff88023742a620 ti: ffff88023743c000 task.ti: ffff88023743c000 -| RIP: 0010:[] [] rt_spin_lock_slowlock+0x280/0x2d0 -| Call Trace: -| [] rt_spin_lock+0x27/0x60 -taking the same lock again -| -| [] blk_mq_insert_requests+0x51/0x130 -| [] blk_mq_flush_plug_list+0x129/0x140 -| [] blk_flush_plug_list+0xd1/0x250 -| [] schedule+0x75/0xa0 -| [] do_nanosleep+0xdd/0x180 -| [] __hrtimer_nanosleep+0xd2/0x1c0 -| [] cpu_chill+0x56/0x80 -| [] try_to_grab_pending+0x1bd/0x390 -| [] cancel_delayed_work+0x21/0x170 -| [] blk_mq_stop_hw_queue+0x18/0x40 -| [] scsi_queue_rq+0x7f/0x830 [scsi_mod] -| [] __blk_mq_run_hw_queue+0x1ee/0x360 -| [] blk_mq_map_request+0x108/0x190 -take the lock ^^^ -| -| [] blk_sq_make_request+0x82/0x350 -| [] generic_make_request+0xd0/0x120 -| [] submit_bio+0x78/0x190 -| [] _submit_bh+0x117/0x180 -| [] __block_write_full_page.constprop.38+0x138/0x3f0 -| [] block_write_full_page+0xa0/0xe0 -| [] blkdev_writepage+0x13/0x20 -| [] __writepage+0x15/0x40 -| [] write_cache_pages+0x1fb/0x440 -| [] generic_writepages+0x3e/0x60 -| [] do_writepages+0x1c/0x30 -| [] __writeback_single_inode+0x33/0x140 -| [] writeback_sb_inodes+0x2bd/0x490 -| [] __writeback_inodes_wb+0x97/0xd0 -| [] wb_writeback+0x1cb/0x210 -| [] bdi_writeback_workfn+0x25b/0x380 -| [] process_one_work+0x1bb/0x490 -| [] worker_thread+0x6b/0x4f0 -| [] kthread+0xe3/0x100 -| [] ret_from_fork+0x7c/0xb0 - -After looking at this for a while it seems that it is save if blk_mq_ctx is -used multiple times, the in struct lock protects the access. - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-mq.c | 4 ---- - block/blk-mq.h | 8 -------- - 2 files changed, 12 deletions(-) - ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -1405,9 +1405,7 @@ static blk_qc_t blk_sq_make_request(stru - blk_mq_put_ctx(data.ctx); - - if (request_count >= BLK_MAX_REQUEST_COUNT) { -- spin_unlock(&data.ctx->cpu_lock); - blk_flush_plug_list(plug, false); -- spin_lock(&data.ctx->cpu_lock); - trace_block_plug(q); - } - -@@ -1609,7 +1607,6 @@ static int blk_mq_hctx_cpu_offline(struc - blk_mq_hctx_clear_pending(hctx, ctx); - } - spin_unlock(&ctx->lock); -- __blk_mq_put_ctx(ctx); - - if (list_empty(&tmp)) - return NOTIFY_OK; -@@ -1803,7 +1800,6 @@ static void blk_mq_init_cpu_queues(struc - memset(__ctx, 0, sizeof(*__ctx)); - __ctx->cpu = i; - spin_lock_init(&__ctx->lock); -- spin_lock_init(&__ctx->cpu_lock); - INIT_LIST_HEAD(&__ctx->rq_list); - __ctx->queue = q; - ---- a/block/blk-mq.h -+++ b/block/blk-mq.h -@@ -9,7 +9,6 @@ struct blk_mq_ctx { - struct list_head rq_list; - } ____cacheline_aligned_in_smp; - -- spinlock_t cpu_lock; - unsigned int cpu; - unsigned int index_hw; - -@@ -78,7 +77,6 @@ static inline struct blk_mq_ctx *__blk_m - struct blk_mq_ctx *ctx; - - ctx = per_cpu_ptr(q->queue_ctx, cpu); -- spin_lock(&ctx->cpu_lock); - return ctx; - } - -@@ -93,14 +91,8 @@ static inline struct blk_mq_ctx *blk_mq_ - return __blk_mq_get_ctx(q, get_cpu_light()); - } - --static void __blk_mq_put_ctx(struct blk_mq_ctx *ctx) --{ -- spin_unlock(&ctx->cpu_lock); --} -- - static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) - { -- __blk_mq_put_ctx(ctx); - put_cpu_light(); - } - diff --git a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch b/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch deleted file mode 100644 index ba05f75cc..000000000 --- a/debian/patches/features/all/rt/block-mq-drop-preempt-disable.patch +++ /dev/null @@ -1,52 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: block/mq: do not invoke preempt_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -preempt_disable() and get_cpu() don't play well together with the sleeping -locks it tries to allocate later. -It seems to be enough to replace it with get_cpu_light() and migrate_disable(). - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-mq.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -343,7 +343,7 @@ static void blk_mq_ipi_complete_request( - return; - } - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) - shared = cpus_share_cache(cpu, ctx->cpu); - -@@ -355,7 +355,7 @@ static void blk_mq_ipi_complete_request( - } else { - rq->q->softirq_done_fn(rq); - } -- put_cpu(); -+ put_cpu_light(); - } - - static void __blk_mq_complete_request(struct request *rq) -@@ -862,14 +862,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h - return; - - if (!async) { -- int cpu = get_cpu(); -+ int cpu = get_cpu_light(); - if (cpumask_test_cpu(cpu, hctx->cpumask)) { - __blk_mq_run_hw_queue(hctx); -- put_cpu(); -+ put_cpu_light(); - return; - } - -- put_cpu(); -+ put_cpu_light(); - } - - kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), diff --git a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch b/debian/patches/features/all/rt/block-mq-use-cpu_light.patch deleted file mode 100644 index 275fec6b5..000000000 --- a/debian/patches/features/all/rt/block-mq-use-cpu_light.patch +++ /dev/null @@ -1,90 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 9 Apr 2014 10:37:23 +0200 -Subject: block: mq: use cpu_light() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -there is a might sleep splat because get_cpu() disables preemption and -later we grab a lock. As a workaround for this we use get_cpu_light() -and an additional lock to prevent taking the same ctx. - -There is a lock member in the ctx already but there some functions which do ++ -on the member and this works with irq off but on RT we would need the extra lock. - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-mq.c | 4 ++++ - block/blk-mq.h | 17 ++++++++++++++--- - 2 files changed, 18 insertions(+), 3 deletions(-) - ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -1385,7 +1385,9 @@ static blk_qc_t blk_sq_make_request(stru - blk_mq_put_ctx(data.ctx); - - if (request_count >= BLK_MAX_REQUEST_COUNT) { -+ spin_unlock(&data.ctx->cpu_lock); - blk_flush_plug_list(plug, false); -+ spin_lock(&data.ctx->cpu_lock); - trace_block_plug(q); - } - -@@ -1587,6 +1589,7 @@ static int blk_mq_hctx_cpu_offline(struc - blk_mq_hctx_clear_pending(hctx, ctx); - } - spin_unlock(&ctx->lock); -+ __blk_mq_put_ctx(ctx); - - if (list_empty(&tmp)) - return NOTIFY_OK; -@@ -1780,6 +1783,7 @@ static void blk_mq_init_cpu_queues(struc - memset(__ctx, 0, sizeof(*__ctx)); - __ctx->cpu = i; - spin_lock_init(&__ctx->lock); -+ spin_lock_init(&__ctx->cpu_lock); - INIT_LIST_HEAD(&__ctx->rq_list); - __ctx->queue = q; - ---- a/block/blk-mq.h -+++ b/block/blk-mq.h -@@ -9,6 +9,7 @@ struct blk_mq_ctx { - struct list_head rq_list; - } ____cacheline_aligned_in_smp; - -+ spinlock_t cpu_lock; - unsigned int cpu; - unsigned int index_hw; - -@@ -74,7 +75,11 @@ struct blk_align_bitmap { - static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, - unsigned int cpu) - { -- return per_cpu_ptr(q->queue_ctx, cpu); -+ struct blk_mq_ctx *ctx; -+ -+ ctx = per_cpu_ptr(q->queue_ctx, cpu); -+ spin_lock(&ctx->cpu_lock); -+ return ctx; - } - - /* -@@ -85,12 +90,18 @@ static inline struct blk_mq_ctx *__blk_m - */ - static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) - { -- return __blk_mq_get_ctx(q, get_cpu()); -+ return __blk_mq_get_ctx(q, get_cpu_light()); -+} -+ -+static void __blk_mq_put_ctx(struct blk_mq_ctx *ctx) -+{ -+ spin_unlock(&ctx->cpu_lock); - } - - static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) - { -- put_cpu(); -+ __blk_mq_put_ctx(ctx); -+ put_cpu_light(); - } - - struct blk_mq_alloc_data { diff --git a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch deleted file mode 100644 index 963e762d1..000000000 --- a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch +++ /dev/null @@ -1,97 +0,0 @@ -Subject: block: Shorten interrupt disabled regions -From: Thomas Gleixner -Date: Wed, 22 Jun 2011 19:47:02 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Moving the blk_sched_flush_plug() call out of the interrupt/preempt -disabled region in the scheduler allows us to replace -local_irq_save/restore(flags) by local_irq_disable/enable() in -blk_flush_plug(). - -Now instead of doing this we disable interrupts explicitely when we -lock the request_queue and reenable them when we drop the lock. That -allows interrupts to be handled when the plug list contains requests -for more than one queue. - -Aside of that this change makes the scope of the irq disabled region -more obvious. The current code confused the hell out of me when -looking at: - - local_irq_save(flags); - spin_lock(q->queue_lock); - ... - queue_unplugged(q...); - scsi_request_fn(); - spin_unlock(q->queue_lock); - spin_lock(shost->host_lock); - spin_unlock_irq(shost->host_lock); - --------------------^^^ ???? - - spin_lock_irq(q->queue_lock); - spin_unlock(q->lock); - local_irq_restore(flags); - -Also add a comment to __blk_run_queue() documenting that -q->request_fn() can drop q->queue_lock and reenable interrupts, but -must return with q->queue_lock held and interrupts disabled. - -Signed-off-by: Thomas Gleixner -Cc: Peter Zijlstra -Cc: Tejun Heo -Cc: Jens Axboe -Cc: Linus Torvalds -Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de ---- - block/blk-core.c | 12 ++---------- - 1 file changed, 2 insertions(+), 10 deletions(-) - ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -3198,7 +3198,7 @@ static void queue_unplugged(struct reque - blk_run_queue_async(q); - else - __blk_run_queue(q); -- spin_unlock(q->queue_lock); -+ spin_unlock_irq(q->queue_lock); - } - - static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) -@@ -3246,7 +3246,6 @@ EXPORT_SYMBOL(blk_check_plugged); - void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) - { - struct request_queue *q; -- unsigned long flags; - struct request *rq; - LIST_HEAD(list); - unsigned int depth; -@@ -3266,11 +3265,6 @@ void blk_flush_plug_list(struct blk_plug - q = NULL; - depth = 0; - -- /* -- * Save and disable interrupts here, to avoid doing it for every -- * queue lock we have to take. -- */ -- local_irq_save(flags); - while (!list_empty(&list)) { - rq = list_entry_rq(list.next); - list_del_init(&rq->queuelist); -@@ -3283,7 +3277,7 @@ void blk_flush_plug_list(struct blk_plug - queue_unplugged(q, depth, from_schedule); - q = rq->q; - depth = 0; -- spin_lock(q->queue_lock); -+ spin_lock_irq(q->queue_lock); - } - - /* -@@ -3310,8 +3304,6 @@ void blk_flush_plug_list(struct blk_plug - */ - if (q) - queue_unplugged(q, depth, from_schedule); -- -- local_irq_restore(flags); - } - - void blk_finish_plug(struct blk_plug *plug) diff --git a/debian/patches/features/all/rt/block-use-cpu-chill.patch b/debian/patches/features/all/rt/block-use-cpu-chill.patch deleted file mode 100644 index ec52d61a4..000000000 --- a/debian/patches/features/all/rt/block-use-cpu-chill.patch +++ /dev/null @@ -1,46 +0,0 @@ -Subject: block: Use cpu_chill() for retry loops -From: Thomas Gleixner -Date: Thu, 20 Dec 2012 18:28:26 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Retry loops on RT might loop forever when the modifying side was -preempted. Steven also observed a live lock when there was a -concurrent priority boosting going on. - -Use cpu_chill() instead of cpu_relax() to let the system -make progress. - -Signed-off-by: Thomas Gleixner - ---- - block/blk-ioc.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - ---- a/block/blk-ioc.c -+++ b/block/blk-ioc.c -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - - #include "blk.h" - -@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_s - spin_unlock(q->queue_lock); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); -- cpu_relax(); -+ cpu_chill(); - spin_lock_irqsave_nested(&ioc->lock, flags, 1); - } - } -@@ -187,7 +188,7 @@ void put_io_context_active(struct io_con - spin_unlock(icq->q->queue_lock); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); -- cpu_relax(); -+ cpu_chill(); - goto retry; - } - } diff --git a/debian/patches/features/all/rt/btrfs-initialize-the-seq-counter-in-struct-btrfs_dev.patch b/debian/patches/features/all/rt/btrfs-initialize-the-seq-counter-in-struct-btrfs_dev.patch deleted file mode 100644 index 243b5bede..000000000 --- a/debian/patches/features/all/rt/btrfs-initialize-the-seq-counter-in-struct-btrfs_dev.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 15 Jan 2016 14:28:39 +0100 -Subject: btrfs: initialize the seq counter in struct btrfs_device -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -I managed to trigger this: -| INFO: trying to register non-static key. -| the code is fine but needs lockdep annotation. -| turning off the locking correctness validator. -| CPU: 1 PID: 781 Comm: systemd-gpt-aut Not tainted 4.4.0-rt2+ #14 -| Hardware name: ARM-Versatile Express -| [<80307cec>] (dump_stack) -| [<80070e98>] (__lock_acquire) -| [<8007184c>] (lock_acquire) -| [<80287800>] (btrfs_ioctl) -| [<8012a8d4>] (do_vfs_ioctl) -| [<8012ac14>] (SyS_ioctl) - -so I think that btrfs_device_data_ordered_init() is not invoked behind -a macro somewhere. - -Fixes: 7cc8e58d53cd ("Btrfs: fix unprotected device's variants on 32bits machine") -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/btrfs/volumes.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/fs/btrfs/volumes.c -+++ b/fs/btrfs/volumes.c -@@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_devi - spin_lock_init(&dev->reada_lock); - atomic_set(&dev->reada_in_flight, 0); - atomic_set(&dev->dev_stats_ccnt, 0); -+ btrfs_device_data_ordered_init(dev); - INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); - INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); - diff --git a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch deleted file mode 100644 index 304b81a10..000000000 --- a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:58 -0500 -Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Introduce RT/NON-RT WARN/BUG statements to avoid ifdefs in the code. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - include/asm-generic/bug.h | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - ---- a/include/asm-generic/bug.h -+++ b/include/asm-generic/bug.h -@@ -206,6 +206,20 @@ extern void warn_slowpath_null(const cha - # define WARN_ON_SMP(x) ({0;}) - #endif - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define BUG_ON_RT(c) BUG_ON(c) -+# define BUG_ON_NONRT(c) do { } while (0) -+# define WARN_ON_RT(condition) WARN_ON(condition) -+# define WARN_ON_NONRT(condition) do { } while (0) -+# define WARN_ON_ONCE_NONRT(condition) do { } while (0) -+#else -+# define BUG_ON_RT(c) do { } while (0) -+# define BUG_ON_NONRT(c) BUG_ON(c) -+# define WARN_ON_RT(condition) do { } while (0) -+# define WARN_ON_NONRT(condition) WARN_ON(condition) -+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) -+#endif -+ - #endif /* __ASSEMBLY__ */ - - #endif diff --git a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch deleted file mode 100644 index 061fedd6c..000000000 --- a/debian/patches/features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch +++ /dev/null @@ -1,65 +0,0 @@ -From: Mike Galbraith -Date: Sat, 21 Jun 2014 10:09:48 +0200 -Subject: memcontrol: Prevent scheduling while atomic in cgroup code -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -mm, memcg: make refill_stock() use get_cpu_light() - -Nikita reported the following memcg scheduling while atomic bug: - -Call Trace: -[e22d5a90] [c0007ea8] show_stack+0x4c/0x168 (unreliable) -[e22d5ad0] [c0618c04] __schedule_bug+0x94/0xb0 -[e22d5ae0] [c060b9ec] __schedule+0x530/0x550 -[e22d5bf0] [c060bacc] schedule+0x30/0xbc -[e22d5c00] [c060ca24] rt_spin_lock_slowlock+0x180/0x27c -[e22d5c70] [c00b39dc] res_counter_uncharge_until+0x40/0xc4 -[e22d5ca0] [c013ca88] drain_stock.isra.20+0x54/0x98 -[e22d5cc0] [c01402ac] __mem_cgroup_try_charge+0x2e8/0xbac -[e22d5d70] [c01410d4] mem_cgroup_charge_common+0x3c/0x70 -[e22d5d90] [c0117284] __do_fault+0x38c/0x510 -[e22d5df0] [c011a5f4] handle_pte_fault+0x98/0x858 -[e22d5e50] [c060ed08] do_page_fault+0x42c/0x6fc -[e22d5f40] [c000f5b4] handle_page_fault+0xc/0x80 - -What happens: - - refill_stock() - get_cpu_var() - drain_stock() - res_counter_uncharge() - res_counter_uncharge_until() - spin_lock() <== boom - -Fix it by replacing get/put_cpu_var() with get/put_cpu_light(). - - -Reported-by: Nikita Yushchenko -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/memcontrol.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -1937,14 +1937,17 @@ static void drain_local_stock(struct wor - */ - static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) - { -- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); -+ struct memcg_stock_pcp *stock; -+ int cpu = get_cpu_light(); -+ -+ stock = &per_cpu(memcg_stock, cpu); - - if (stock->cached != memcg) { /* reset if necessary */ - drain_stock(stock); - stock->cached = memcg; - } - stock->nr_pages += nr_pages; -- put_cpu_var(memcg_stock); -+ put_cpu_light(); - } - - /* diff --git a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch b/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch deleted file mode 100644 index c781ba2a6..000000000 --- a/debian/patches/features/all/rt/cgroups-use-simple-wait-in-css_release.patch +++ /dev/null @@ -1,87 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 13 Feb 2015 15:52:24 +0100 -Subject: cgroups: use simple wait in css_release() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -To avoid: -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 -|in_atomic(): 1, irqs_disabled(): 0, pid: 92, name: rcuc/11 -|2 locks held by rcuc/11/92: -| #0: (rcu_callback){......}, at: [] rcu_cpu_kthread+0x3de/0x940 -| #1: (rcu_read_lock_sched){......}, at: [] percpu_ref_call_confirm_rcu+0x0/0xd0 -|Preemption disabled at:[] percpu_ref_switch_to_atomic_rcu+0x82/0xc0 -|CPU: 11 PID: 92 Comm: rcuc/11 Not tainted 3.18.7-rt0+ #1 -| ffff8802398cdf80 ffff880235f0bc28 ffffffff815b3a12 0000000000000000 -| 0000000000000000 ffff880235f0bc48 ffffffff8109aa16 0000000000000000 -| ffff8802398cdf80 ffff880235f0bc78 ffffffff815b8dd4 000000000000df80 -|Call Trace: -| [] dump_stack+0x4f/0x7c -| [] __might_sleep+0x116/0x190 -| [] rt_spin_lock+0x24/0x60 -| [] queue_work_on+0x6d/0x1d0 -| [] css_release+0x81/0x90 -| [] percpu_ref_call_confirm_rcu+0xbe/0xd0 -| [] percpu_ref_switch_to_atomic_rcu+0x82/0xc0 -| [] rcu_cpu_kthread+0x445/0x940 -| [] smpboot_thread_fn+0x18d/0x2d0 -| [] kthread+0xe8/0x100 -| [] ret_from_fork+0x7c/0xb0 - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/cgroup-defs.h | 2 ++ - kernel/cgroup.c | 9 +++++---- - 2 files changed, 7 insertions(+), 4 deletions(-) - ---- a/include/linux/cgroup-defs.h -+++ b/include/linux/cgroup-defs.h -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_CGROUPS - -@@ -136,6 +137,7 @@ struct cgroup_subsys_state { - /* percpu_ref killing and RCU release */ - struct rcu_head rcu_head; - struct work_struct destroy_work; -+ struct swork_event destroy_swork; - }; - - /* ---- a/kernel/cgroup.c -+++ b/kernel/cgroup.c -@@ -4724,10 +4724,10 @@ static void css_free_rcu_fn(struct rcu_h - queue_work(cgroup_destroy_wq, &css->destroy_work); - } - --static void css_release_work_fn(struct work_struct *work) -+static void css_release_work_fn(struct swork_event *sev) - { - struct cgroup_subsys_state *css = -- container_of(work, struct cgroup_subsys_state, destroy_work); -+ container_of(sev, struct cgroup_subsys_state, destroy_swork); - struct cgroup_subsys *ss = css->ss; - struct cgroup *cgrp = css->cgroup; - -@@ -4766,8 +4766,8 @@ static void css_release(struct percpu_re - struct cgroup_subsys_state *css = - container_of(ref, struct cgroup_subsys_state, refcnt); - -- INIT_WORK(&css->destroy_work, css_release_work_fn); -- queue_work(cgroup_destroy_wq, &css->destroy_work); -+ INIT_SWORK(&css->destroy_swork, css_release_work_fn); -+ swork_queue(&css->destroy_swork); - } - - static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5363,6 +5363,7 @@ static int __init cgroup_wq_init(void) - */ - cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); - BUG_ON(!cgroup_destroy_wq); -+ BUG_ON(swork_get()); - - /* - * Used to destroy pidlists and separate to serve as flush domain. diff --git a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch deleted file mode 100644 index 27602da08..000000000 --- a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch +++ /dev/null @@ -1,158 +0,0 @@ -From: Benedikt Spranger -Date: Mon, 8 Mar 2010 18:57:04 +0100 -Subject: clocksource: TCLIB: Allow higher clock rates for clock events -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -As default the TCLIB uses the 32KiHz base clock rate for clock events. -Add a compile time selection to allow higher clock resulution. - -(fixed up by Sami Pietikäinen ) - -Signed-off-by: Benedikt Spranger -Signed-off-by: Thomas Gleixner ---- - drivers/clocksource/tcb_clksrc.c | 36 +++++++++++++++++++++--------------- - drivers/misc/Kconfig | 12 ++++++++++-- - 2 files changed, 31 insertions(+), 17 deletions(-) - ---- a/drivers/clocksource/tcb_clksrc.c -+++ b/drivers/clocksource/tcb_clksrc.c -@@ -23,8 +23,7 @@ - * this 32 bit free-running counter. the second channel is not used. - * - * - The third channel may be used to provide a 16-bit clockevent -- * source, used in either periodic or oneshot mode. This runs -- * at 32 KiHZ, and can handle delays of up to two seconds. -+ * source, used in either periodic or oneshot mode. - * - * A boot clocksource and clockevent source are also currently needed, - * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so -@@ -74,6 +73,7 @@ static struct clocksource clksrc = { - struct tc_clkevt_device { - struct clock_event_device clkevt; - struct clk *clk; -+ u32 freq; - void __iomem *regs; - }; - -@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_cl - return container_of(clkevt, struct tc_clkevt_device, clkevt); - } - --/* For now, we always use the 32K clock ... this optimizes for NO_HZ, -- * because using one of the divided clocks would usually mean the -- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). -- * -- * A divided clock could be good for high resolution timers, since -- * 30.5 usec resolution can seem "low". -- */ - static u32 timer_clock; - - static int tc_shutdown(struct clock_event_device *d) -@@ -113,7 +106,7 @@ static int tc_set_oneshot(struct clock_e - - clk_enable(tcd->clk); - -- /* slow clock, count up to RC, then irq and stop */ -+ /* count up to RC, then irq and stop */ - __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | - ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); - __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); -@@ -135,10 +128,10 @@ static int tc_set_periodic(struct clock_ - */ - clk_enable(tcd->clk); - -- /* slow clock, count up to RC, then irq and restart */ -+ /* count up to RC, then irq and restart */ - __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, - regs + ATMEL_TC_REG(2, CMR)); -- __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); -+ __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); - - /* Enable clock and interrupts on RC compare */ - __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); -@@ -165,7 +158,11 @@ static struct tc_clkevt_device clkevt = - .features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT, - /* Should be lower than at91rm9200's system timer */ -+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - .rating = 125, -+#else -+ .rating = 200, -+#endif - .set_next_event = tc_next_event, - .set_state_shutdown = tc_shutdown, - .set_state_periodic = tc_set_periodic, -@@ -187,8 +184,9 @@ static irqreturn_t ch2_irq(int irq, void - return IRQ_NONE; - } - --static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) -+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) - { -+ unsigned divisor = atmel_tc_divisors[divisor_idx]; - int ret; - struct clk *t2_clk = tc->clk[2]; - int irq = tc->irq[2]; -@@ -209,7 +207,11 @@ static int __init setup_clkevents(struct - clkevt.regs = tc->regs; - clkevt.clk = t2_clk; - -- timer_clock = clk32k_divisor_idx; -+ timer_clock = divisor_idx; -+ if (!divisor) -+ clkevt.freq = 32768; -+ else -+ clkevt.freq = clk_get_rate(t2_clk) / divisor; - - clkevt.clkevt.cpumask = cpumask_of(0); - -@@ -220,7 +222,7 @@ static int __init setup_clkevents(struct - return ret; - } - -- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); -+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); - - return ret; - } -@@ -357,7 +359,11 @@ static int __init tcb_clksrc_init(void) - goto err_disable_t1; - - /* channel 2: periodic and oneshot timer support */ -+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - ret = setup_clkevents(tc, clk32k_divisor_idx); -+#else -+ ret = setup_clkevents(tc, best_divisor_idx); -+#endif - if (ret) - goto err_unregister_clksrc; - ---- a/drivers/misc/Kconfig -+++ b/drivers/misc/Kconfig -@@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC - are combined to make a single 32-bit timer. - - When GENERIC_CLOCKEVENTS is defined, the third timer channel -- may be used as a clock event device supporting oneshot mode -- (delays of up to two seconds) based on the 32 KiHz clock. -+ may be used as a clock event device supporting oneshot mode. - - config ATMEL_TCB_CLKSRC_BLOCK - int -@@ -84,6 +83,15 @@ config ATMEL_TCB_CLKSRC_BLOCK - TC can be used for other purposes, such as PWM generation and - interval timing. - -+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK -+ bool "TC Block use 32 KiHz clock" -+ depends on ATMEL_TCB_CLKSRC -+ default y -+ help -+ Select this to use 32 KiHz base clock rate as TC block clock -+ source for clock events. -+ -+ - config DUMMY_IRQ - tristate "Dummy IRQ handler" - default n diff --git a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch deleted file mode 100644 index 34d9ff4bc..000000000 --- a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch +++ /dev/null @@ -1,225 +0,0 @@ -Subject: completion: Use simple wait queues -From: Thomas Gleixner -Date: Fri, 11 Jan 2013 11:23:51 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Completions have no long lasting callbacks and therefor do not need -the complex waitqueue variant. Use simple waitqueues which reduces the -contention on the waitqueue lock. - -Signed-off-by: Thomas Gleixner ---- - drivers/net/wireless/orinoco/orinoco_usb.c | 2 - - drivers/usb/gadget/function/f_fs.c | 2 - - drivers/usb/gadget/legacy/inode.c | 4 +-- - include/linux/completion.h | 9 +++----- - include/linux/uprobes.h | 1 - kernel/sched/completion.c | 32 ++++++++++++++--------------- - kernel/sched/core.c | 10 +++++++-- - 7 files changed, 33 insertions(+), 27 deletions(-) - ---- a/drivers/net/wireless/orinoco/orinoco_usb.c -+++ b/drivers/net/wireless/orinoco/orinoco_usb.c -@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ez - while (!ctx->done.done && msecs--) - udelay(1000); - } else { -- wait_event_interruptible(ctx->done.wait, -+ swait_event_interruptible(ctx->done.wait, - ctx->done.done); - } - break; ---- a/drivers/usb/gadget/function/f_fs.c -+++ b/drivers/usb/gadget/function/f_fs.c -@@ -1405,7 +1405,7 @@ static void ffs_data_put(struct ffs_data - pr_info("%s(): freeing\n", __func__); - ffs_data_clear(ffs); - BUG_ON(waitqueue_active(&ffs->ev.waitq) || -- waitqueue_active(&ffs->ep0req_completion.wait)); -+ swaitqueue_active(&ffs->ep0req_completion.wait)); - kfree(ffs->dev_name); - kfree(ffs); - } ---- a/drivers/usb/gadget/legacy/inode.c -+++ b/drivers/usb/gadget/legacy/inode.c -@@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf - spin_unlock_irq (&epdata->dev->lock); - - if (likely (value == 0)) { -- value = wait_event_interruptible (done.wait, done.done); -+ value = swait_event_interruptible (done.wait, done.done); - if (value != 0) { - spin_lock_irq (&epdata->dev->lock); - if (likely (epdata->ep != NULL)) { -@@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf - usb_ep_dequeue (epdata->ep, epdata->req); - spin_unlock_irq (&epdata->dev->lock); - -- wait_event (done.wait, done.done); -+ swait_event (done.wait, done.done); - if (epdata->status == -ECONNRESET) - epdata->status = -EINTR; - } else { ---- a/include/linux/completion.h -+++ b/include/linux/completion.h -@@ -7,8 +7,7 @@ - * Atomic wait-for-completion handler data structures. - * See kernel/sched/completion.c for details. - */ -- --#include -+#include - - /* - * struct completion - structure used to maintain state for a "completion" -@@ -24,11 +23,11 @@ - */ - struct completion { - unsigned int done; -- wait_queue_head_t wait; -+ struct swait_head wait; - }; - - #define COMPLETION_INITIALIZER(work) \ -- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } -+ { 0, SWAIT_HEAD_INITIALIZER((work).wait) } - - #define COMPLETION_INITIALIZER_ONSTACK(work) \ - ({ init_completion(&work); work; }) -@@ -73,7 +72,7 @@ struct completion { - static inline void init_completion(struct completion *x) - { - x->done = 0; -- init_waitqueue_head(&x->wait); -+ init_swait_head(&x->wait); - } - - /** ---- a/include/linux/uprobes.h -+++ b/include/linux/uprobes.h -@@ -27,6 +27,7 @@ - #include - #include - #include -+#include - - struct vm_area_struct; - struct mm_struct; ---- a/kernel/sched/completion.c -+++ b/kernel/sched/completion.c -@@ -30,10 +30,10 @@ void complete(struct completion *x) - { - unsigned long flags; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - x->done++; -- __wake_up_locked(&x->wait, TASK_NORMAL, 1); -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ __swait_wake_locked(&x->wait, TASK_NORMAL, 1); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - } - EXPORT_SYMBOL(complete); - -@@ -50,10 +50,10 @@ void complete_all(struct completion *x) - { - unsigned long flags; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - x->done += UINT_MAX/2; -- __wake_up_locked(&x->wait, TASK_NORMAL, 0); -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ __swait_wake_locked(&x->wait, TASK_NORMAL, 0); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - } - EXPORT_SYMBOL(complete_all); - -@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x, - long (*action)(long), long timeout, int state) - { - if (!x->done) { -- DECLARE_WAITQUEUE(wait, current); -+ DEFINE_SWAITER(wait); - -- __add_wait_queue_tail_exclusive(&x->wait, &wait); -+ swait_prepare_locked(&x->wait, &wait); - do { - if (signal_pending_state(state, current)) { - timeout = -ERESTARTSYS; - break; - } - __set_current_state(state); -- spin_unlock_irq(&x->wait.lock); -+ raw_spin_unlock_irq(&x->wait.lock); - timeout = action(timeout); -- spin_lock_irq(&x->wait.lock); -+ raw_spin_lock_irq(&x->wait.lock); - } while (!x->done && timeout); -- __remove_wait_queue(&x->wait, &wait); -+ swait_finish_locked(&x->wait, &wait); - if (!x->done) - return timeout; - } -@@ -89,9 +89,9 @@ static inline long __sched - { - might_sleep(); - -- spin_lock_irq(&x->wait.lock); -+ raw_spin_lock_irq(&x->wait.lock); - timeout = do_wait_for_common(x, action, timeout, state); -- spin_unlock_irq(&x->wait.lock); -+ raw_spin_unlock_irq(&x->wait.lock); - return timeout; - } - -@@ -277,12 +277,12 @@ bool try_wait_for_completion(struct comp - if (!READ_ONCE(x->done)) - return 0; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - if (!x->done) - ret = 0; - else - x->done--; -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - return ret; - } - EXPORT_SYMBOL(try_wait_for_completion); -@@ -311,7 +311,7 @@ bool completion_done(struct completion * - * after it's acquired the lock. - */ - smp_rmb(); -- spin_unlock_wait(&x->wait.lock); -+ raw_spin_unlock_wait(&x->wait.lock); - return true; - } - EXPORT_SYMBOL(completion_done); ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3102,7 +3102,10 @@ void migrate_disable(void) - } - - #ifdef CONFIG_SCHED_DEBUG -- WARN_ON_ONCE(p->migrate_disable_atomic); -+ if (unlikely(p->migrate_disable_atomic)) { -+ tracing_off(); -+ WARN_ON_ONCE(1); -+ } - #endif - - if (p->migrate_disable) { -@@ -3129,7 +3132,10 @@ void migrate_enable(void) - } - - #ifdef CONFIG_SCHED_DEBUG -- WARN_ON_ONCE(p->migrate_disable_atomic); -+ if (unlikely(p->migrate_disable_atomic)) { -+ tracing_off(); -+ WARN_ON_ONCE(1); -+ } - #endif - WARN_ON_ONCE(p->migrate_disable <= 0); - diff --git a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch deleted file mode 100644 index a04522fc8..000000000 --- a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch +++ /dev/null @@ -1,27 +0,0 @@ -Subject: sched: Use the proper LOCK_OFFSET for cond_resched() -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 22:51:33 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -RT does not increment preempt count when a 'sleeping' spinlock is -locked. Update PREEMPT_LOCK_OFFSET for that case. - -Signed-off-by: Thomas Gleixner ---- - include/linux/preempt.h | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -91,7 +91,11 @@ - /* - * The preempt_count offset after spin_lock() - */ -+#if !defined(CONFIG_PREEMPT_RT_FULL) - #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET -+#else -+#define PREEMPT_LOCK_OFFSET 0 -+#endif - - /* - * The preempt_count offset needed for things like: diff --git a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch deleted file mode 100644 index e1095dbff..000000000 --- a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch +++ /dev/null @@ -1,53 +0,0 @@ -Subject: sched: Take RT softirq semantics into account in cond_resched() -From: Thomas Gleixner -Date: Thu, 14 Jul 2011 09:56:44 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The softirq semantics work different on -RT. There is no SOFTIRQ_MASK in -the preemption counter which leads to the BUG_ON() statement in -__cond_resched_softirq(). As for -RT it is enough to perform a "normal" -schedule. - -Signed-off-by: Thomas Gleixner ---- - include/linux/sched.h | 4 ++++ - kernel/sched/core.c | 2 ++ - 2 files changed, 6 insertions(+) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -2987,12 +2987,16 @@ extern int __cond_resched_lock(spinlock_ - __cond_resched_lock(lock); \ - }) - -+#ifndef CONFIG_PREEMPT_RT_FULL - extern int __cond_resched_softirq(void); - - #define cond_resched_softirq() ({ \ - ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ - __cond_resched_softirq(); \ - }) -+#else -+# define cond_resched_softirq() cond_resched() -+#endif - - static inline void cond_resched_rcu(void) - { ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -4770,6 +4770,7 @@ int __cond_resched_lock(spinlock_t *lock - } - EXPORT_SYMBOL(__cond_resched_lock); - -+#ifndef CONFIG_PREEMPT_RT_FULL - int __sched __cond_resched_softirq(void) - { - BUG_ON(!in_softirq()); -@@ -4783,6 +4784,7 @@ int __sched __cond_resched_softirq(void) - return 0; - } - EXPORT_SYMBOL(__cond_resched_softirq); -+#endif - - /** - * yield - yield the current processor to other threads. diff --git a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch deleted file mode 100644 index 7754cd3e3..000000000 --- a/debian/patches/features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch +++ /dev/null @@ -1,56 +0,0 @@ -From: Steven Rostedt -Date: Thu, 5 Dec 2013 09:16:52 -0500 -Subject: cpu hotplug: Document why PREEMPT_RT uses a spinlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The patch: - - cpu: Make hotplug.lock a "sleeping" spinlock on RT - - Tasks can block on hotplug.lock in pin_current_cpu(), but their - state might be != RUNNING. So the mutex wakeup will set the state - unconditionally to RUNNING. That might cause spurious unexpected - wakeups. We could provide a state preserving mutex_lock() function, - but this is semantically backwards. So instead we convert the - hotplug.lock() to a spinlock for RT, which has the state preserving - semantics already. - -Fixed a bug where the hotplug lock on PREEMPT_RT can be called after a -task set its state to TASK_UNINTERRUPTIBLE and before it called -schedule. If the hotplug_lock used a mutex, and there was contention, -the current task's state would be turned to TASK_RUNNABLE and the -schedule call will not sleep. This caused unexpected results. - -Although the patch had a description of the change, the code had no -comments about it. This causes confusion to those that review the code, -and as PREEMPT_RT is held in a quilt queue and not git, it's not as easy -to see why a change was made. Even if it was in git, the code should -still have a comment for something as subtle as this. - -Document the rational for using a spinlock on PREEMPT_RT in the hotplug -lock code. - -Reported-by: Nicholas Mc Guire -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/cpu.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -110,6 +110,14 @@ struct hotplug_pcp { - int grab_lock; - struct completion synced; - #ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * Note, on PREEMPT_RT, the hotplug lock must save the state of -+ * the task, otherwise the mutex will cause the task to fail -+ * to sleep when required. (Because it's called from migrate_disable()) -+ * -+ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's -+ * state. -+ */ - spinlock_t lock; - #else - struct mutex mutex; diff --git a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch deleted file mode 100644 index c39b5eeea..000000000 --- a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch +++ /dev/null @@ -1,117 +0,0 @@ -Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT -From: Steven Rostedt -Date: Fri, 02 Mar 2012 10:36:57 -0500 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Tasks can block on hotplug.lock in pin_current_cpu(), but their state -might be != RUNNING. So the mutex wakeup will set the state -unconditionally to RUNNING. That might cause spurious unexpected -wakeups. We could provide a state preserving mutex_lock() function, -but this is semantically backwards. So instead we convert the -hotplug.lock() to a spinlock for RT, which has the state preserving -semantics already. - -Signed-off-by: Steven Rostedt -Cc: Carsten Emde -Cc: John Kacur -Cc: Peter Zijlstra -Cc: Clark Williams - -Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com -Signed-off-by: Thomas Gleixner ---- - kernel/cpu.c | 34 +++++++++++++++++++++++++++------- - 1 file changed, 27 insertions(+), 7 deletions(-) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -60,10 +60,16 @@ static int cpu_hotplug_disabled; - - static struct { - struct task_struct *active_writer; -+ - /* wait queue to wake up the active_writer */ - wait_queue_head_t wq; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* Makes the lock keep the task's state */ -+ spinlock_t lock; -+#else - /* verifies that no writer will get active while readers are active */ - struct mutex lock; -+#endif - /* - * Also blocks the new readers during - * an ongoing cpu hotplug operation. -@@ -76,12 +82,26 @@ static struct { - } cpu_hotplug = { - .active_writer = NULL, - .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), -+#ifdef CONFIG_PREEMPT_RT_FULL -+ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), -+#else - .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), -+#endif - #ifdef CONFIG_DEBUG_LOCK_ALLOC - .dep_map = {.name = "cpu_hotplug.lock" }, - #endif - }; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock) -+# define hotplug_trylock() rt_spin_trylock(&cpu_hotplug.lock) -+# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock) -+#else -+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) -+# define hotplug_trylock() mutex_trylock(&cpu_hotplug.lock) -+# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) -+#endif -+ - /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ - #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) - #define cpuhp_lock_acquire_tryread() \ -@@ -118,8 +138,8 @@ void pin_current_cpu(void) - return; - } - preempt_enable(); -- mutex_lock(&cpu_hotplug.lock); -- mutex_unlock(&cpu_hotplug.lock); -+ hotplug_lock(); -+ hotplug_unlock(); - preempt_disable(); - goto retry; - } -@@ -192,9 +212,9 @@ void get_online_cpus(void) - if (cpu_hotplug.active_writer == current) - return; - cpuhp_lock_acquire_read(); -- mutex_lock(&cpu_hotplug.lock); -+ hotplug_lock(); - atomic_inc(&cpu_hotplug.refcount); -- mutex_unlock(&cpu_hotplug.lock); -+ hotplug_unlock(); - } - EXPORT_SYMBOL_GPL(get_online_cpus); - -@@ -247,11 +267,11 @@ void cpu_hotplug_begin(void) - cpuhp_lock_acquire(); - - for (;;) { -- mutex_lock(&cpu_hotplug.lock); -+ hotplug_lock(); - prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); - if (likely(!atomic_read(&cpu_hotplug.refcount))) - break; -- mutex_unlock(&cpu_hotplug.lock); -+ hotplug_unlock(); - schedule(); - } - finish_wait(&cpu_hotplug.wq, &wait); -@@ -260,7 +280,7 @@ void cpu_hotplug_begin(void) - void cpu_hotplug_done(void) - { - cpu_hotplug.active_writer = NULL; -- mutex_unlock(&cpu_hotplug.lock); -+ hotplug_unlock(); - cpuhp_lock_release(); - } - diff --git a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch deleted file mode 100644 index 775c68574..000000000 --- a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch +++ /dev/null @@ -1,530 +0,0 @@ -From: Steven Rostedt -Date: Mon, 16 Jul 2012 08:07:43 +0000 -Subject: cpu/rt: Rework cpu down for PREEMPT_RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Bringing a CPU down is a pain with the PREEMPT_RT kernel because -tasks can be preempted in many more places than in non-RT. In -order to handle per_cpu variables, tasks may be pinned to a CPU -for a while, and even sleep. But these tasks need to be off the CPU -if that CPU is going down. - -Several synchronization methods have been tried, but when stressed -they failed. This is a new approach. - -A sync_tsk thread is still created and tasks may still block on a -lock when the CPU is going down, but how that works is a bit different. -When cpu_down() starts, it will create the sync_tsk and wait on it -to inform that current tasks that are pinned on the CPU are no longer -pinned. But new tasks that are about to be pinned will still be allowed -to do so at this time. - -Then the notifiers are called. Several notifiers will bring down tasks -that will enter these locations. Some of these tasks will take locks -of other tasks that are on the CPU. If we don't let those other tasks -continue, but make them block until CPU down is done, the tasks that -the notifiers are waiting on will never complete as they are waiting -for the locks held by the tasks that are blocked. - -Thus we still let the task pin the CPU until the notifiers are done. -After the notifiers run, we then make new tasks entering the pinned -CPU sections grab a mutex and wait. This mutex is now a per CPU mutex -in the hotplug_pcp descriptor. - -To help things along, a new function in the scheduler code is created -called migrate_me(). This function will try to migrate the current task -off the CPU this is going down if possible. When the sync_tsk is created, -all tasks will then try to migrate off the CPU going down. There are -several cases that this wont work, but it helps in most cases. - -After the notifiers are called and if a task can't migrate off but enters -the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex -until the CPU down is complete. Then the scheduler will force the migration -anyway. - -Also, I found that THREAD_BOUND need to also be accounted for in the -pinned CPU, and the migrate_disable no longer treats them special. -This helps fix issues with ksoftirqd and workqueue that unbind on CPU down. - -Signed-off-by: Steven Rostedt -Signed-off-by: Thomas Gleixner - ---- - include/linux/sched.h | 7 + - kernel/cpu.c | 240 ++++++++++++++++++++++++++++++++++++++++---------- - kernel/sched/core.c | 78 ++++++++++++++++ - 3 files changed, 281 insertions(+), 44 deletions(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -2288,6 +2288,10 @@ extern void do_set_cpus_allowed(struct t - - extern int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask); -+int migrate_me(void); -+void tell_sched_cpu_down_begin(int cpu); -+void tell_sched_cpu_down_done(int cpu); -+ - #else - static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) -@@ -2300,6 +2304,9 @@ static inline int set_cpus_allowed_ptr(s - return -EINVAL; - return 0; - } -+static inline int migrate_me(void) { return 0; } -+static inline void tell_sched_cpu_down_begin(int cpu) { } -+static inline void tell_sched_cpu_down_done(int cpu) { } - #endif - - #ifdef CONFIG_NO_HZ_COMMON ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -60,16 +60,10 @@ static int cpu_hotplug_disabled; - - static struct { - struct task_struct *active_writer; -- - /* wait queue to wake up the active_writer */ - wait_queue_head_t wq; --#ifdef CONFIG_PREEMPT_RT_FULL -- /* Makes the lock keep the task's state */ -- spinlock_t lock; --#else - /* verifies that no writer will get active while readers are active */ - struct mutex lock; --#endif - /* - * Also blocks the new readers during - * an ongoing cpu hotplug operation. -@@ -81,27 +75,13 @@ static struct { - #endif - } cpu_hotplug = { - .active_writer = NULL, -- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), --#ifdef CONFIG_PREEMPT_RT_FULL -- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), --#else - .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), --#endif -+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), - #ifdef CONFIG_DEBUG_LOCK_ALLOC - .dep_map = {.name = "cpu_hotplug.lock" }, - #endif - }; - --#ifdef CONFIG_PREEMPT_RT_FULL --# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock) --# define hotplug_trylock() rt_spin_trylock(&cpu_hotplug.lock) --# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock) --#else --# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) --# define hotplug_trylock() mutex_trylock(&cpu_hotplug.lock) --# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) --#endif -- - /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ - #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) - #define cpuhp_lock_acquire_tryread() \ -@@ -109,12 +89,42 @@ static struct { - #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) - #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) - -+/** -+ * hotplug_pcp - per cpu hotplug descriptor -+ * @unplug: set when pin_current_cpu() needs to sync tasks -+ * @sync_tsk: the task that waits for tasks to finish pinned sections -+ * @refcount: counter of tasks in pinned sections -+ * @grab_lock: set when the tasks entering pinned sections should wait -+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished -+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true) -+ * @mutex_init: zero if the mutex hasn't been initialized yet. -+ * -+ * Although @unplug and @sync_tsk may point to the same task, the @unplug -+ * is used as a flag and still exists after @sync_tsk has exited and -+ * @sync_tsk set to NULL. -+ */ - struct hotplug_pcp { - struct task_struct *unplug; -+ struct task_struct *sync_tsk; - int refcount; -+ int grab_lock; - struct completion synced; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ spinlock_t lock; -+#else -+ struct mutex mutex; -+#endif -+ int mutex_init; - }; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock) -+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock) -+#else -+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) -+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) -+#endif -+ - static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); - - /** -@@ -128,18 +138,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp - void pin_current_cpu(void) - { - struct hotplug_pcp *hp; -+ int force = 0; - - retry: - hp = this_cpu_ptr(&hotplug_pcp); - -- if (!hp->unplug || hp->refcount || preempt_count() > 1 || -+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || - hp->unplug == current) { - hp->refcount++; - return; - } -- preempt_enable(); -- hotplug_lock(); -- hotplug_unlock(); -+ if (hp->grab_lock) { -+ preempt_enable(); -+ hotplug_lock(hp); -+ hotplug_unlock(hp); -+ } else { -+ preempt_enable(); -+ /* -+ * Try to push this task off of this CPU. -+ */ -+ if (!migrate_me()) { -+ preempt_disable(); -+ hp = this_cpu_ptr(&hotplug_pcp); -+ if (!hp->grab_lock) { -+ /* -+ * Just let it continue it's already pinned -+ * or about to sleep. -+ */ -+ force = 1; -+ goto retry; -+ } -+ preempt_enable(); -+ } -+ } - preempt_disable(); - goto retry; - } -@@ -160,26 +191,84 @@ void unpin_current_cpu(void) - wake_up_process(hp->unplug); - } - --/* -- * FIXME: Is this really correct under all circumstances ? -- */ -+static void wait_for_pinned_cpus(struct hotplug_pcp *hp) -+{ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ while (hp->refcount) { -+ schedule_preempt_disabled(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+} -+ - static int sync_unplug_thread(void *data) - { - struct hotplug_pcp *hp = data; - - preempt_disable(); - hp->unplug = current; -+ wait_for_pinned_cpus(hp); -+ -+ /* -+ * This thread will synchronize the cpu_down() with threads -+ * that have pinned the CPU. When the pinned CPU count reaches -+ * zero, we inform the cpu_down code to continue to the next step. -+ */ - set_current_state(TASK_UNINTERRUPTIBLE); -- while (hp->refcount) { -- schedule_preempt_disabled(); -+ preempt_enable(); -+ complete(&hp->synced); -+ -+ /* -+ * If all succeeds, the next step will need tasks to wait till -+ * the CPU is offline before continuing. To do this, the grab_lock -+ * is set and tasks going into pin_current_cpu() will block on the -+ * mutex. But we still need to wait for those that are already in -+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() -+ * will kick this thread out. -+ */ -+ while (!hp->grab_lock && !kthread_should_stop()) { -+ schedule(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+ -+ /* Make sure grab_lock is seen before we see a stale completion */ -+ smp_mb(); -+ -+ /* -+ * Now just before cpu_down() enters stop machine, we need to make -+ * sure all tasks that are in pinned CPU sections are out, and new -+ * tasks will now grab the lock, keeping them from entering pinned -+ * CPU sections. -+ */ -+ if (!kthread_should_stop()) { -+ preempt_disable(); -+ wait_for_pinned_cpus(hp); -+ preempt_enable(); -+ complete(&hp->synced); -+ } -+ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ while (!kthread_should_stop()) { -+ schedule(); - set_current_state(TASK_UNINTERRUPTIBLE); - } - set_current_state(TASK_RUNNING); -- preempt_enable(); -- complete(&hp->synced); -+ -+ /* -+ * Force this thread off this CPU as it's going down and -+ * we don't want any more work on this CPU. -+ */ -+ current->flags &= ~PF_NO_SETAFFINITY; -+ do_set_cpus_allowed(current, cpu_present_mask); -+ migrate_me(); - return 0; - } - -+static void __cpu_unplug_sync(struct hotplug_pcp *hp) -+{ -+ wake_up_process(hp->sync_tsk); -+ wait_for_completion(&hp->synced); -+} -+ - /* - * Start the sync_unplug_thread on the target cpu and wait for it to - * complete. -@@ -187,23 +276,83 @@ static int sync_unplug_thread(void *data - static int cpu_unplug_begin(unsigned int cpu) - { - struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -- struct task_struct *tsk; -+ int err; -+ -+ /* Protected by cpu_hotplug.lock */ -+ if (!hp->mutex_init) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ spin_lock_init(&hp->lock); -+#else -+ mutex_init(&hp->mutex); -+#endif -+ hp->mutex_init = 1; -+ } -+ -+ /* Inform the scheduler to migrate tasks off this CPU */ -+ tell_sched_cpu_down_begin(cpu); - - init_completion(&hp->synced); -- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); -- if (IS_ERR(tsk)) -- return (PTR_ERR(tsk)); -- kthread_bind(tsk, cpu); -- wake_up_process(tsk); -- wait_for_completion(&hp->synced); -+ -+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); -+ if (IS_ERR(hp->sync_tsk)) { -+ err = PTR_ERR(hp->sync_tsk); -+ hp->sync_tsk = NULL; -+ return err; -+ } -+ kthread_bind(hp->sync_tsk, cpu); -+ -+ /* -+ * Wait for tasks to get out of the pinned sections, -+ * it's still OK if new tasks enter. Some CPU notifiers will -+ * wait for tasks that are going to enter these sections and -+ * we must not have them block. -+ */ -+ __cpu_unplug_sync(hp); -+ - return 0; - } - -+static void cpu_unplug_sync(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ -+ init_completion(&hp->synced); -+ /* The completion needs to be initialzied before setting grab_lock */ -+ smp_wmb(); -+ -+ /* Grab the mutex before setting grab_lock */ -+ hotplug_lock(hp); -+ hp->grab_lock = 1; -+ -+ /* -+ * The CPU notifiers have been completed. -+ * Wait for tasks to get out of pinned CPU sections and have new -+ * tasks block until the CPU is completely down. -+ */ -+ __cpu_unplug_sync(hp); -+ -+ /* All done with the sync thread */ -+ kthread_stop(hp->sync_tsk); -+ hp->sync_tsk = NULL; -+} -+ - static void cpu_unplug_done(unsigned int cpu) - { - struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); - - hp->unplug = NULL; -+ /* Let all tasks know cpu unplug is finished before cleaning up */ -+ smp_wmb(); -+ -+ if (hp->sync_tsk) -+ kthread_stop(hp->sync_tsk); -+ -+ if (hp->grab_lock) { -+ hotplug_unlock(hp); -+ /* protected by cpu_hotplug.lock */ -+ hp->grab_lock = 0; -+ } -+ tell_sched_cpu_down_done(cpu); - } - - void get_online_cpus(void) -@@ -212,9 +361,9 @@ void get_online_cpus(void) - if (cpu_hotplug.active_writer == current) - return; - cpuhp_lock_acquire_read(); -- hotplug_lock(); -+ mutex_lock(&cpu_hotplug.lock); - atomic_inc(&cpu_hotplug.refcount); -- hotplug_unlock(); -+ mutex_unlock(&cpu_hotplug.lock); - } - EXPORT_SYMBOL_GPL(get_online_cpus); - -@@ -267,11 +416,11 @@ void cpu_hotplug_begin(void) - cpuhp_lock_acquire(); - - for (;;) { -- hotplug_lock(); -+ mutex_lock(&cpu_hotplug.lock); - prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); - if (likely(!atomic_read(&cpu_hotplug.refcount))) - break; -- hotplug_unlock(); -+ mutex_unlock(&cpu_hotplug.lock); - schedule(); - } - finish_wait(&cpu_hotplug.wq, &wait); -@@ -280,7 +429,7 @@ void cpu_hotplug_begin(void) - void cpu_hotplug_done(void) - { - cpu_hotplug.active_writer = NULL; -- hotplug_unlock(); -+ mutex_unlock(&cpu_hotplug.lock); - cpuhp_lock_release(); - } - -@@ -516,6 +665,9 @@ static int _cpu_down(unsigned int cpu, i - - smpboot_park_threads(cpu); - -+ /* Notifiers are done. Don't let any more tasks pin this CPU. */ -+ cpu_unplug_sync(cpu); -+ - /* - * Prevent irq alloc/free while the dying cpu reorganizes the - * interrupt affinities. ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1211,6 +1211,84 @@ void do_set_cpus_allowed(struct task_str - enqueue_task(rq, p, ENQUEUE_RESTORE); - } - -+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); -+static DEFINE_MUTEX(sched_down_mutex); -+static cpumask_t sched_down_cpumask; -+ -+void tell_sched_cpu_down_begin(int cpu) -+{ -+ mutex_lock(&sched_down_mutex); -+ cpumask_set_cpu(cpu, &sched_down_cpumask); -+ mutex_unlock(&sched_down_mutex); -+} -+ -+void tell_sched_cpu_down_done(int cpu) -+{ -+ mutex_lock(&sched_down_mutex); -+ cpumask_clear_cpu(cpu, &sched_down_cpumask); -+ mutex_unlock(&sched_down_mutex); -+} -+ -+/** -+ * migrate_me - try to move the current task off this cpu -+ * -+ * Used by the pin_current_cpu() code to try to get tasks -+ * to move off the current CPU as it is going down. -+ * It will only move the task if the task isn't pinned to -+ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY) -+ * and the task has to be in a RUNNING state. Otherwise the -+ * movement of the task will wake it up (change its state -+ * to running) when the task did not expect it. -+ * -+ * Returns 1 if it succeeded in moving the current task -+ * 0 otherwise. -+ */ -+int migrate_me(void) -+{ -+ struct task_struct *p = current; -+ struct migration_arg arg; -+ struct cpumask *cpumask; -+ struct cpumask *mask; -+ unsigned long flags; -+ unsigned int dest_cpu; -+ struct rq *rq; -+ -+ /* -+ * We can not migrate tasks bounded to a CPU or tasks not -+ * running. The movement of the task will wake it up. -+ */ -+ if (p->flags & PF_NO_SETAFFINITY || p->state) -+ return 0; -+ -+ mutex_lock(&sched_down_mutex); -+ rq = task_rq_lock(p, &flags); -+ -+ cpumask = this_cpu_ptr(&sched_cpumasks); -+ mask = &p->cpus_allowed; -+ -+ cpumask_andnot(cpumask, mask, &sched_down_cpumask); -+ -+ if (!cpumask_weight(cpumask)) { -+ /* It's only on this CPU? */ -+ task_rq_unlock(rq, p, &flags); -+ mutex_unlock(&sched_down_mutex); -+ return 0; -+ } -+ -+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); -+ -+ arg.task = p; -+ arg.dest_cpu = dest_cpu; -+ -+ task_rq_unlock(rq, p, &flags); -+ -+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); -+ tlb_migrate_finish(p->mm); -+ mutex_unlock(&sched_down_mutex); -+ -+ return 1; -+} -+ - /* - * Change a given task's CPU affinity. Migrate the thread to a - * proper CPU and schedule it away if the CPU it's executing on diff --git a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch deleted file mode 100644 index 8d051116d..000000000 --- a/debian/patches/features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch +++ /dev/null @@ -1,107 +0,0 @@ -From: Steven Rostedt -Date: Tue, 4 Mar 2014 12:28:32 -0500 -Subject: cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -We hit another bug that was caused by switching cpu_chill() from -msleep() to hrtimer_nanosleep(). - -This time it is a livelock. The problem is that hrtimer_nanosleep() -calls schedule with the state == TASK_INTERRUPTIBLE. But these means -that if a signal is pending, the scheduler wont schedule, and will -simply change the current task state back to TASK_RUNNING. This -nullifies the whole point of cpu_chill() in the first place. That is, -if a task is spinning on a try_lock() and it preempted the owner of the -lock, if it has a signal pending, it will never give up the CPU to let -the owner of the lock run. - -I made a static function __hrtimer_nanosleep() that takes a fifth -parameter "state", which determines the task state of that the -nanosleep() will be in. The normal hrtimer_nanosleep() will act the -same, but cpu_chill() will call the __hrtimer_nanosleep() directly with -the TASK_UNINTERRUPTIBLE state. - -cpu_chill() only cares that the first sleep happens, and does not care -about the state of the restart schedule (in hrtimer_nanosleep_restart). - - -Reported-by: Ulrich Obergfell -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 25 ++++++++++++++++++------- - 1 file changed, 18 insertions(+), 7 deletions(-) - ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -1656,12 +1656,13 @@ void hrtimer_init_sleeper(struct hrtimer - } - EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); - --static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) -+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode, -+ unsigned long state) - { - hrtimer_init_sleeper(t, current); - - do { -- set_current_state(TASK_INTERRUPTIBLE); -+ set_current_state(state); - hrtimer_start_expires(&t->timer, mode); - - if (likely(t->task)) -@@ -1703,7 +1704,8 @@ long __sched hrtimer_nanosleep_restart(s - HRTIMER_MODE_ABS); - hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); - -- if (do_nanosleep(&t, HRTIMER_MODE_ABS)) -+ /* cpu_chill() does not care about restart state. */ -+ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE)) - goto out; - - rmtp = restart->nanosleep.rmtp; -@@ -1720,8 +1722,10 @@ long __sched hrtimer_nanosleep_restart(s - return ret; - } - --long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, -- const enum hrtimer_mode mode, const clockid_t clockid) -+static long -+__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, -+ const enum hrtimer_mode mode, const clockid_t clockid, -+ unsigned long state) - { - struct restart_block *restart; - struct hrtimer_sleeper t; -@@ -1734,7 +1738,7 @@ long hrtimer_nanosleep(struct timespec * - - hrtimer_init_on_stack(&t.timer, clockid, mode); - hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); -- if (do_nanosleep(&t, mode)) -+ if (do_nanosleep(&t, mode, state)) - goto out; - - /* Absolute timers do not update the rmtp value and restart: */ -@@ -1761,6 +1765,12 @@ long hrtimer_nanosleep(struct timespec * - return ret; - } - -+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, -+ const enum hrtimer_mode mode, const clockid_t clockid) -+{ -+ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE); -+} -+ - SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, - struct timespec __user *, rmtp) - { -@@ -1787,7 +1797,8 @@ void cpu_chill(void) - unsigned int freeze_flag = current->flags & PF_NOFREEZE; - - current->flags |= PF_NOFREEZE; -- hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); -+ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC, -+ TASK_UNINTERRUPTIBLE); - if (!freeze_flag) - current->flags &= ~PF_NOFREEZE; - } diff --git a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch b/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch deleted file mode 100644 index 11d31722b..000000000 --- a/debian/patches/features/all/rt/cpu_down_move_migrate_enable_back.patch +++ /dev/null @@ -1,53 +0,0 @@ -From: Tiejun Chen -Subject: cpu_down: move migrate_enable() back -Date: Thu, 7 Nov 2013 10:06:07 +0800 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to -use migrate_enable()/migrate_disable() to replace that combination -of preempt_enable() and preempt_disable(), but actually in -!CONFIG_PREEMPT_RT_FULL case, migrate_enable()/migrate_disable() -are still equal to preempt_enable()/preempt_disable(). So that -followed cpu_hotplug_begin()/cpu_unplug_begin(cpu) would go schedule() -to trigger schedule_debug() like this: - -_cpu_down() - | - + migrate_disable() = preempt_disable() - | - + cpu_hotplug_begin() or cpu_unplug_begin() - | - + schedule() - | - + __schedule() - | - + preempt_disable(); - | - + __schedule_bug() is true! - -So we should move migrate_enable() as the original scheme. - - -Signed-off-by: Tiejun Chen ---- - kernel/cpu.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -657,6 +657,7 @@ static int _cpu_down(unsigned int cpu, i - err = -EBUSY; - goto restore_cpus; - } -+ migrate_enable(); - - cpu_hotplug_begin(); - err = cpu_unplug_begin(cpu); -@@ -741,7 +742,6 @@ static int _cpu_down(unsigned int cpu, i - out_release: - cpu_unplug_done(cpu); - out_cancel: -- migrate_enable(); - cpu_hotplug_done(); - if (!err) - cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch deleted file mode 100644 index b5276e649..000000000 --- a/debian/patches/features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 9 Apr 2015 15:23:01 +0200 -Subject: cpufreq: drop K8's driver from beeing selected -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Ralf posted a picture of a backtrace from - -| powernowk8_target_fn() -> transition_frequency_fidvid() and then at the -| end: -| 932 policy = cpufreq_cpu_get(smp_processor_id()); -| 933 cpufreq_cpu_put(policy); - -crashing the system on -RT. I assumed that policy was a NULL pointer but -was rulled out. Since Ralf can't do any more investigations on this and -I have no machine with this, I simply switch it off. - -Reported-by: Ralf Mardorf -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/cpufreq/Kconfig.x86 | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/cpufreq/Kconfig.x86 -+++ b/drivers/cpufreq/Kconfig.x86 -@@ -123,7 +123,7 @@ config X86_POWERNOW_K7_ACPI - - config X86_POWERNOW_K8 - tristate "AMD Opteron/Athlon64 PowerNow!" -- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ -+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE - help - This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. - Support for K10 and newer processors is now in acpi-cpufreq. diff --git a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch deleted file mode 100644 index c7cfc2a32..000000000 --- a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch +++ /dev/null @@ -1,35 +0,0 @@ -Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT -From: Thomas Gleixner -Date: Wed, 14 Dec 2011 01:03:49 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -We can't deal with the cpumask allocations which happen in atomic -context (see arch/x86/kernel/apic/io_apic.c) on RT right now. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/Kconfig | 2 +- - lib/Kconfig | 1 + - 2 files changed, 2 insertions(+), 1 deletion(-) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -851,7 +851,7 @@ config IOMMU_HELPER - config MAXSMP - bool "Enable Maximum number of SMP Processors and NUMA Nodes" - depends on X86_64 && SMP && DEBUG_KERNEL -- select CPUMASK_OFFSTACK -+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL - ---help--- - Enable maximum number of CPUS and NUMA Nodes for this architecture. - If unsure, say N. ---- a/lib/Kconfig -+++ b/lib/Kconfig -@@ -395,6 +395,7 @@ config CHECK_SIGNATURE - - config CPUMASK_OFFSTACK - bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS -+ depends on !PREEMPT_RT_FULL - help - Use dynamic allocation for cpumask_var_t, instead of putting - them on the stack. This is a bit more expensive, but avoids diff --git a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch deleted file mode 100644 index 1e78d9e7c..000000000 --- a/debian/patches/features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch +++ /dev/null @@ -1,242 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 21 Feb 2014 17:24:04 +0100 -Subject: crypto: Reduce preempt disabled regions, more algos -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Don Estabrook reported -| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() -| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2462 migrate_enable+0x17b/0x200() -| kernel: WARNING: CPU: 3 PID: 865 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() - -and his backtrace showed some crypto functions which looked fine. - -The problem is the following sequence: - -glue_xts_crypt_128bit() -{ - blkcipher_walk_virt(); /* normal migrate_disable() */ - - glue_fpu_begin(); /* get atomic */ - - while (nbytes) { - __glue_xts_crypt_128bit(); - blkcipher_walk_done(); /* with nbytes = 0, migrate_enable() - * while we are atomic */ - }; - glue_fpu_end() /* no longer atomic */ -} - -and this is why the counter get out of sync and the warning is printed. -The other problem is that we are non-preemptible between -glue_fpu_begin() and glue_fpu_end() and the latency grows. To fix this, -I shorten the FPU off region and ensure blkcipher_walk_done() is called -with preemption enabled. This might hurt the performance because we now -enable/disable the FPU state more often but we gain lower latency and -the bug is gone. - - -Reported-by: Don Estabrook -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/crypto/cast5_avx_glue.c | 21 +++++++++------------ - arch/x86/crypto/glue_helper.c | 31 +++++++++++++++---------------- - 2 files changed, 24 insertions(+), 28 deletions(-) - ---- a/arch/x86/crypto/cast5_avx_glue.c -+++ b/arch/x86/crypto/cast5_avx_glue.c -@@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fp - static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - bool enc) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - const unsigned int bsize = CAST5_BLOCK_SIZE; - unsigned int nbytes; -@@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_de - u8 *wsrc = walk->src.virt.addr; - u8 *wdst = walk->dst.virt.addr; - -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - - /* Process multi-block batch */ - if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { -@@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_de - } while (nbytes >= bsize); - - done: -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, walk, nbytes); - } -- -- cast5_fpu_end(fpu_enabled); - return err; - } - -@@ -227,7 +226,7 @@ static unsigned int __cbc_decrypt(struct - static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_ - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk.nbytes)) { -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - nbytes = __cbc_decrypt(desc, &walk); -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- -- cast5_fpu_end(fpu_enabled); - return err; - } - -@@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct b - static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_de - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - nbytes = __ctr_crypt(desc, &walk); -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- cast5_fpu_end(fpu_enabled); -- - if (walk.nbytes) { - ctr_crypt_final(desc, &walk); - err = blkcipher_walk_done(desc, &walk, 0); ---- a/arch/x86/crypto/glue_helper.c -+++ b/arch/x86/crypto/glue_helper.c -@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const - void *ctx = crypto_blkcipher_ctx(desc->tfm); - const unsigned int bsize = 128 / 8; - unsigned int nbytes, i, func_bytes; -- bool fpu_enabled = false; -+ bool fpu_enabled; - int err; - - err = blkcipher_walk_virt(desc, walk); -@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const - u8 *wdst = walk->dst.virt.addr; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - - for (i = 0; i < gctx->num_funcs; i++) { - func_bytes = bsize * gctx->funcs[i].num_blocks; -@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const - } - - done: -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); - return err; - } - -@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct - struct scatterlist *src, unsigned int nbytes) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct - - while ((nbytes = walk.nbytes)) { - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); - return err; - } - EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); -@@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct c - struct scatterlist *src, unsigned int nbytes) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct c - - while ((nbytes = walk.nbytes) >= bsize) { - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); -- - if (walk.nbytes) { - glue_ctr_crypt_final_128bit( - gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); -@@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct c - void *tweak_ctx, void *crypt_ctx) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct c - - /* set minimum length to bsize, for tweak_fn */ - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, -+ desc, false, - nbytes < bsize ? bsize : nbytes); -- - /* calculate first value of T */ - tweak_fn(tweak_ctx, walk.iv, walk.iv); -+ glue_fpu_end(fpu_enabled); - - while (nbytes) { -+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -+ desc, false, nbytes); - nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); - -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - nbytes = walk.nbytes; - } -- -- glue_fpu_end(fpu_enabled); -- - return err; - } - EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); diff --git a/debian/patches/features/all/rt/debugobjects-rt.patch b/debian/patches/features/all/rt/debugobjects-rt.patch deleted file mode 100644 index 75000bfb2..000000000 --- a/debian/patches/features/all/rt/debugobjects-rt.patch +++ /dev/null @@ -1,26 +0,0 @@ -Subject: debugobjects: Make RT aware -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:41:35 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Avoid filling the pool / allocating memory with irqs off(). - -Signed-off-by: Thomas Gleixner ---- - lib/debugobjects.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/lib/debugobjects.c -+++ b/lib/debugobjects.c -@@ -309,7 +309,10 @@ static void - struct debug_obj *obj; - unsigned long flags; - -- fill_pool(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (preempt_count() == 0 && !irqs_disabled()) -+#endif -+ fill_pool(); - - db = get_bucket((unsigned long) addr); - diff --git a/debian/patches/features/all/rt/dm-make-rt-aware.patch b/debian/patches/features/all/rt/dm-make-rt-aware.patch deleted file mode 100644 index 5ad1d0e9a..000000000 --- a/debian/patches/features/all/rt/dm-make-rt-aware.patch +++ /dev/null @@ -1,27 +0,0 @@ -Subject: dm: Make rt aware -From: Thomas Gleixner -Date: Mon, 14 Nov 2011 23:06:09 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has -interrupts legitimately enabled here as we cant deadlock against the -irq thread due to the "sleeping spinlocks" conversion. - -Reported-by: Luis Claudio R. Goncalves - -Signed-off-by: Thomas Gleixner ---- - drivers/md/dm.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -2126,7 +2126,7 @@ static void dm_request_fn(struct request - /* Establish tio->ti before queuing work (map_tio_request) */ - tio->ti = ti; - queue_kthread_work(&md->kworker, &tio->work); -- BUG_ON(!irqs_disabled()); -+ BUG_ON_NONRT(!irqs_disabled()); - } - - goto out; diff --git a/debian/patches/features/all/rt/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch b/debian/patches/features/all/rt/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch deleted file mode 100644 index e125a7744..000000000 --- a/debian/patches/features/all/rt/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Anders Roxell -Date: Fri, 15 Jan 2016 20:21:12 +0100 -Subject: drivers/cpuidle: coupled: fix warning cpuidle_coupled_lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Used multi_v7_defconfig+PREEMPT_RT_FULL=y and this caused a compilation -warning without this fix: -../drivers/cpuidle/coupled.c:122:21: warning: 'cpuidle_coupled_lock' -defined but not used [-Wunused-variable] - -Signed-off-by: Anders Roxell -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/cpuidle/coupled.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/drivers/cpuidle/coupled.c -+++ b/drivers/cpuidle/coupled.c -@@ -119,7 +119,6 @@ struct cpuidle_coupled { - - #define CPUIDLE_COUPLED_NOT_IDLE (-1) - --static DEFINE_MUTEX(cpuidle_coupled_lock); - static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); - - /* diff --git a/debian/patches/features/all/rt/drivers-media-vsp1_video-fix-compile-error.patch b/debian/patches/features/all/rt/drivers-media-vsp1_video-fix-compile-error.patch deleted file mode 100644 index 654eee795..000000000 --- a/debian/patches/features/all/rt/drivers-media-vsp1_video-fix-compile-error.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Anders Roxell -Date: Fri, 15 Jan 2016 01:09:43 +0100 -Subject: drivers/media: vsp1_video: fix compile error -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -This was found with the -RT patch enabled, but the fix should apply to -non-RT also. - -Compilation error without this fix: -../drivers/media/platform/vsp1/vsp1_video.c: In function -'vsp1_pipeline_stopped': -../drivers/media/platform/vsp1/vsp1_video.c:524:2: error: expected -expression before 'do' - spin_unlock_irqrestore(&pipe->irqlock, flags); - ^ - -Signed-off-by: Anders Roxell -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/media/platform/vsp1/vsp1_video.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/media/platform/vsp1/vsp1_video.c -+++ b/drivers/media/platform/vsp1/vsp1_video.c -@@ -520,7 +520,7 @@ static bool vsp1_pipeline_stopped(struct - bool stopped; - - spin_lock_irqsave(&pipe->irqlock, flags); -- stopped = pipe->state == VSP1_PIPELINE_STOPPED, -+ stopped = pipe->state == VSP1_PIPELINE_STOPPED; - spin_unlock_irqrestore(&pipe->irqlock, flags); - - return stopped; diff --git a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch deleted file mode 100644 index 38804ec22..000000000 --- a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:24 -0500 -Subject: drivers/net: Use disable_irq_nosync() in 8139too -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use disable_irq_nosync() instead of disable_irq() as this might be -called in atomic context with netpoll. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - drivers/net/ethernet/realtek/8139too.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/ethernet/realtek/8139too.c -+++ b/drivers/net/ethernet/realtek/8139too.c -@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(stru - struct rtl8139_private *tp = netdev_priv(dev); - const int irq = tp->pci_dev->irq; - -- disable_irq(irq); -+ disable_irq_nosync(irq); - rtl8139_interrupt(irq, dev); - enable_irq(irq); - } diff --git a/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch b/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch deleted file mode 100644 index eea735032..000000000 --- a/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch +++ /dev/null @@ -1,127 +0,0 @@ -From: Thomas Gleixner -Date: Sat, 20 Jun 2009 11:36:54 +0200 -Subject: drivers/net: fix livelock issues -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro -optimization. The reason is that the softirq thread is rescheduling -itself on that return value. Depending on priorities it starts to -monoplize the CPU and livelock on UP systems. - -Remove it. - -Signed-off-by: Thomas Gleixner - ---- - drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +----- - drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +-- - drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +-- - drivers/net/ethernet/neterion/s2io.c | 7 +------ - drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++---- - drivers/net/ethernet/tehuti/tehuti.c | 9 ++------- - drivers/net/rionet.c | 6 +----- - 7 files changed, 9 insertions(+), 31 deletions(-) - ---- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c -+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c -@@ -2221,11 +2221,7 @@ static netdev_tx_t atl1c_xmit_frame(stru - } - - tpd_req = atl1c_cal_tpd_req(skb); -- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { -- if (netif_msg_pktdata(adapter)) -- dev_info(&adapter->pdev->dev, "tx locked\n"); -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&adapter->tx_lock, flags); - - if (atl1c_tpd_avail(adapter, type) < tpd_req) { - /* no enough descriptor, just stop queue */ ---- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c -+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c -@@ -1880,8 +1880,7 @@ static netdev_tx_t atl1e_xmit_frame(stru - return NETDEV_TX_OK; - } - tpd_req = atl1e_cal_tdp_req(skb); -- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) -- return NETDEV_TX_LOCKED; -+ spin_lock_irqsave(&adapter->tx_lock, flags); - - if (atl1e_tpd_avail(adapter) < tpd_req) { - /* no enough descriptor, just stop queue */ ---- a/drivers/net/ethernet/chelsio/cxgb/sge.c -+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c -@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb - struct cmdQ *q = &sge->cmdQ[qid]; - unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - -- if (!spin_trylock(&q->lock)) -- return NETDEV_TX_LOCKED; -+ spin_lock(&q->lock); - - reclaim_completed_tx(sge, q); - ---- a/drivers/net/ethernet/neterion/s2io.c -+++ b/drivers/net/ethernet/neterion/s2io.c -@@ -4084,12 +4084,7 @@ static netdev_tx_t s2io_xmit(struct sk_b - [skb->priority & (MAX_TX_FIFOS - 1)]; - fifo = &mac_control->fifos[queue]; - -- if (do_spin_lock) -- spin_lock_irqsave(&fifo->tx_lock, flags); -- else { -- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&fifo->tx_lock, flags); - - if (sp->config.multiq) { - if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { ---- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c -+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c -@@ -2137,10 +2137,8 @@ static int pch_gbe_xmit_frame(struct sk_ - struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; - unsigned long flags; - -- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { -- /* Collision - tell upper layer to requeue */ -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&tx_ring->tx_lock, flags); -+ - if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { - netif_stop_queue(netdev); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); ---- a/drivers/net/ethernet/tehuti/tehuti.c -+++ b/drivers/net/ethernet/tehuti/tehuti.c -@@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struc - unsigned long flags; - - ENTER; -- local_irq_save(flags); -- if (!spin_trylock(&priv->tx_lock)) { -- local_irq_restore(flags); -- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", -- BDX_DRV_NAME, ndev->name); -- return NETDEV_TX_LOCKED; -- } -+ -+ spin_lock_irqsave(&priv->tx_lock, flags); - - /* build tx descriptor */ - BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ ---- a/drivers/net/rionet.c -+++ b/drivers/net/rionet.c -@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_b - unsigned long flags; - int add_num = 1; - -- local_irq_save(flags); -- if (!spin_trylock(&rnet->tx_lock)) { -- local_irq_restore(flags); -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&rnet->tx_lock, flags); - - if (is_multicast_ether_addr(eth->h_dest)) - add_num = nets[rnet->mport->id].nact; diff --git a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch deleted file mode 100644 index 61a7e5787..000000000 --- a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch +++ /dev/null @@ -1,49 +0,0 @@ -From: Steven Rostedt -Date: Fri, 3 Jul 2009 08:30:00 -0500 -Subject: drivers/net: vortex fix locking issues -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Argh, cut and paste wasn't enough... - -Use this patch instead. It needs an irq disable. But, believe it or not, -on SMP this is actually better. If the irq is shared (as it is in Mark's -case), we don't stop the irq of other devices from being handled on -another CPU (unfortunately for Mark, he pinned all interrupts to one CPU). - -Signed-off-by: Steven Rostedt -Signed-off-by: Thomas Gleixner - - drivers/net/ethernet/3com/3c59x.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -Signed-off-by: Ingo Molnar - ---- a/drivers/net/ethernet/3com/3c59x.c -+++ b/drivers/net/ethernet/3com/3c59x.c -@@ -842,9 +842,9 @@ static void poll_vortex(struct net_devic - { - struct vortex_private *vp = netdev_priv(dev); - unsigned long flags; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - #endif - -@@ -1916,12 +1916,12 @@ static void vortex_tx_timeout(struct net - * Block interrupts because vortex_interrupt does a bare spin_lock() - */ - unsigned long flags; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (vp->full_bus_master_tx) - boomerang_interrupt(dev->irq, dev); - else - vortex_interrupt(dev->irq, dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - } - diff --git a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch deleted file mode 100644 index ec05dc1b1..000000000 --- a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:30 -0500 -Subject: drivers: random: Reduce preempt disabled region -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -No need to keep preemption disabled across the whole function. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - drivers/char/random.c | 3 --- - 1 file changed, 3 deletions(-) - ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -796,8 +796,6 @@ static void add_timer_randomness(struct - } sample; - long delta, delta2, delta3; - -- preempt_disable(); -- - sample.jiffies = jiffies; - sample.cycles = random_get_entropy(); - sample.num = num; -@@ -838,7 +836,6 @@ static void add_timer_randomness(struct - */ - credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); - } -- preempt_enable(); - } - - void add_input_randomness(unsigned int type, unsigned int code, diff --git a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch deleted file mode 100644 index 76f85f36b..000000000 --- a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch +++ /dev/null @@ -1,43 +0,0 @@ -Subject: tty/serial/omap: Make the locking RT aware -From: Thomas Gleixner -Date: Thu, 28 Jul 2011 13:32:57 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The lock is a sleeping lock and local_irq_save() is not the -optimsation we are looking for. Redo it to make it work on -RT and -non-RT. - -Signed-off-by: Thomas Gleixner ---- - drivers/tty/serial/omap-serial.c | 12 ++++-------- - 1 file changed, 4 insertions(+), 8 deletions(-) - ---- a/drivers/tty/serial/omap-serial.c -+++ b/drivers/tty/serial/omap-serial.c -@@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console - - pm_runtime_get_sync(up->dev); - -- local_irq_save(flags); -- if (up->port.sysrq) -- locked = 0; -- else if (oops_in_progress) -- locked = spin_trylock(&up->port.lock); -+ if (up->port.sysrq || oops_in_progress) -+ locked = spin_trylock_irqsave(&up->port.lock, flags); - else -- spin_lock(&up->port.lock); -+ spin_lock_irqsave(&up->port.lock, flags); - - /* - * First save the IER then disable the interrupts -@@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console - pm_runtime_mark_last_busy(up->dev); - pm_runtime_put_autosuspend(up->dev); - if (locked) -- spin_unlock(&up->port.lock); -- local_irq_restore(flags); -+ spin_unlock_irqrestore(&up->port.lock, flags); - } - - static int __init diff --git a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch deleted file mode 100644 index 64e24233c..000000000 --- a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch +++ /dev/null @@ -1,48 +0,0 @@ -Subject: tty/serial/pl011: Make the locking work on RT -From: Thomas Gleixner -Date: Tue, 08 Jan 2013 21:36:51 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The lock is a sleeping lock and local_irq_save() is not the optimsation -we are looking for. Redo it to make it work on -RT and non-RT. - -Signed-off-by: Thomas Gleixner ---- - drivers/tty/serial/amba-pl011.c | 15 ++++++++++----- - 1 file changed, 10 insertions(+), 5 deletions(-) - ---- a/drivers/tty/serial/amba-pl011.c -+++ b/drivers/tty/serial/amba-pl011.c -@@ -2067,13 +2067,19 @@ pl011_console_write(struct console *co, - - clk_enable(uap->clk); - -- local_irq_save(flags); -+ /* -+ * local_irq_save(flags); -+ * -+ * This local_irq_save() is nonsense. If we come in via sysrq -+ * handling then interrupts are already disabled. Aside of -+ * that the port.sysrq check is racy on SMP regardless. -+ */ - if (uap->port.sysrq) - locked = 0; - else if (oops_in_progress) -- locked = spin_trylock(&uap->port.lock); -+ locked = spin_trylock_irqsave(&uap->port.lock, flags); - else -- spin_lock(&uap->port.lock); -+ spin_lock_irqsave(&uap->port.lock, flags); - - /* - * First save the CR then disable the interrupts -@@ -2098,8 +2104,7 @@ pl011_console_write(struct console *co, - writew(old_cr, uap->port.membase + UART011_CR); - - if (locked) -- spin_unlock(&uap->port.lock); -- local_irq_restore(flags); -+ spin_unlock_irqrestore(&uap->port.lock, flags); - - clk_disable(uap->clk); - } diff --git a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch deleted file mode 100644 index 6a9cc85b5..000000000 --- a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch +++ /dev/null @@ -1,59 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 25 Apr 2013 18:12:52 +0200 -Subject: drm/i915: drop trace_i915_gem_ring_dispatch on rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -This tracepoint is responsible for: - -|[<814cc358>] __schedule_bug+0x4d/0x59 -|[<814d24cc>] __schedule+0x88c/0x930 -|[<814d3b90>] ? _raw_spin_unlock_irqrestore+0x40/0x50 -|[<814d3b95>] ? _raw_spin_unlock_irqrestore+0x45/0x50 -|[<810b57b5>] ? task_blocks_on_rt_mutex+0x1f5/0x250 -|[<814d27d9>] schedule+0x29/0x70 -|[<814d3423>] rt_spin_lock_slowlock+0x15b/0x278 -|[<814d3786>] rt_spin_lock+0x26/0x30 -|[] gen6_gt_force_wake_get+0x29/0x60 [i915] -|[] gen6_ring_get_irq+0x5f/0x100 [i915] -|[] ftrace_raw_event_i915_gem_ring_dispatch+0xe3/0x100 [i915] -|[] i915_gem_do_execbuffer.isra.13+0xbd3/0x1430 [i915] -|[<810f8943>] ? trace_buffer_unlock_commit+0x43/0x60 -|[<8113e8d2>] ? ftrace_raw_event_kmem_alloc+0xd2/0x180 -|[<8101d063>] ? native_sched_clock+0x13/0x80 -|[] i915_gem_execbuffer2+0x99/0x280 [i915] -|[] drm_ioctl+0x4c3/0x570 [drm] -|[<8101d0d9>] ? sched_clock+0x9/0x10 -|[] ? i915_gem_execbuffer+0x480/0x480 [i915] -|[<810f1c18>] ? rb_commit+0x68/0xa0 -|[<810f1c6c>] ? ring_buffer_unlock_commit+0x1c/0xa0 -|[<81197467>] do_vfs_ioctl+0x97/0x540 -|[<81021318>] ? ftrace_raw_event_sys_enter+0xd8/0x130 -|[<811979a1>] sys_ioctl+0x91/0xb0 -|[<814db931>] tracesys+0xe1/0xe6 - -Chris Wilson does not like to move i915_trace_irq_get() out of the macro - -|No. This enables the IRQ, as well as making a number of -|very expensively serialised read, unconditionally. - -so it is gone now on RT. - - -Reported-by: Joakim Hernberg -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c -+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c -@@ -1264,7 +1264,9 @@ i915_gem_ringbuffer_submission(struct i9 - if (ret) - return ret; - -+#ifndef CONFIG_PREEMPT_RT_BASE - trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); -+#endif - - i915_gem_execbuffer_move_to_active(vmas, params->request); - i915_gem_execbuffer_retire_commands(params); diff --git a/debian/patches/features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch b/debian/patches/features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch deleted file mode 100644 index 37ad87410..000000000 --- a/debian/patches/features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch +++ /dev/null @@ -1,99 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Sun, 16 Aug 2015 14:27:50 +0200 -Subject: dump stack: don't disable preemption during trace -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -I see here large latencies during a stack dump on x86. The -preempt_disable() and get_cpu() should forbid moving the task to another -CPU during a stack dump and avoiding two stack traces in parallel on the -same CPU. However a stack trace from a second CPU may still happen in -parallel. Also nesting is allowed so a stack trace happens in -process-context and we may have another one from IRQ context. With migrate -disable we keep this code preemptible and allow a second backtrace on -the same CPU by another task. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/kernel/dumpstack_32.c | 4 ++-- - arch/x86/kernel/dumpstack_64.c | 8 ++++---- - lib/dump_stack.c | 4 ++-- - 3 files changed, 8 insertions(+), 8 deletions(-) - ---- a/arch/x86/kernel/dumpstack_32.c -+++ b/arch/x86/kernel/dumpstack_32.c -@@ -42,7 +42,7 @@ void dump_trace(struct task_struct *task - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data) - { -- const unsigned cpu = get_cpu(); -+ const unsigned cpu = get_cpu_light(); - int graph = 0; - u32 *prev_esp; - -@@ -86,7 +86,7 @@ void dump_trace(struct task_struct *task - break; - touch_nmi_watchdog(); - } -- put_cpu(); -+ put_cpu_light(); - } - EXPORT_SYMBOL(dump_trace); - ---- a/arch/x86/kernel/dumpstack_64.c -+++ b/arch/x86/kernel/dumpstack_64.c -@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data) - { -- const unsigned cpu = get_cpu(); -+ const unsigned cpu = get_cpu_light(); - struct thread_info *tinfo; - unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); - unsigned long dummy; -@@ -241,7 +241,7 @@ void dump_trace(struct task_struct *task - * This handles the process stack: - */ - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); -- put_cpu(); -+ put_cpu_light(); - } - EXPORT_SYMBOL(dump_trace); - -@@ -255,7 +255,7 @@ show_stack_log_lvl(struct task_struct *t - int cpu; - int i; - -- preempt_disable(); -+ migrate_disable(); - cpu = smp_processor_id(); - - irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); -@@ -291,7 +291,7 @@ show_stack_log_lvl(struct task_struct *t - pr_cont(" %016lx", *stack++); - touch_nmi_watchdog(); - } -- preempt_enable(); -+ migrate_enable(); - - pr_cont("\n"); - show_trace_log_lvl(task, regs, sp, bp, log_lvl); ---- a/lib/dump_stack.c -+++ b/lib/dump_stack.c -@@ -33,7 +33,7 @@ asmlinkage __visible void dump_stack(voi - * Permit this cpu to perform nested stack dumps while serialising - * against other CPUs - */ -- preempt_disable(); -+ migrate_disable(); - - retry: - cpu = smp_processor_id(); -@@ -52,7 +52,7 @@ asmlinkage __visible void dump_stack(voi - if (!was_locked) - atomic_set(&dump_lock, -1); - -- preempt_enable(); -+ migrate_enable(); - } - #else - asmlinkage __visible void dump_stack(void) diff --git a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch deleted file mode 100644 index 087c4ec50..000000000 --- a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch +++ /dev/null @@ -1,31 +0,0 @@ -Subject: fs/epoll: Do not disable preemption on RT -From: Thomas Gleixner -Date: Fri, 08 Jul 2011 16:35:35 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -ep_call_nested() takes a sleeping lock so we can't disable preemption. -The light version is enough since ep_call_nested() doesn't mind beeing -invoked twice on the same CPU. - -Signed-off-by: Thomas Gleixner ---- - fs/eventpoll.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/fs/eventpoll.c -+++ b/fs/eventpoll.c -@@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *pri - */ - static void ep_poll_safewake(wait_queue_head_t *wq) - { -- int this_cpu = get_cpu(); -+ int this_cpu = get_cpu_light(); - - ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, - ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); - -- put_cpu(); -+ put_cpu_light(); - } - - static void ep_remove_wait_queue(struct eppoll_entry *pwq) diff --git a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch b/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch deleted file mode 100644 index cff591324..000000000 --- a/debian/patches/features/all/rt/fs-aio-simple-simple-work.patch +++ /dev/null @@ -1,107 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 16 Feb 2015 18:49:10 +0100 -Subject: fs/aio: simple simple work -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768 -|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2 -|2 locks held by rcuos/2/26: -| #0: (rcu_callback){.+.+..}, at: [] rcu_nocb_kthread+0x1e2/0x380 -| #1: (rcu_read_lock_sched){.+.+..}, at: [] percpu_ref_kill_rcu+0xa6/0x1c0 -|Preemption disabled at:[] rcu_nocb_kthread+0x263/0x380 -|Call Trace: -| [] dump_stack+0x4e/0x9c -| [] __might_sleep+0xfb/0x170 -| [] rt_spin_lock+0x24/0x70 -| [] free_ioctx_users+0x30/0x130 -| [] percpu_ref_kill_rcu+0x1b4/0x1c0 -| [] rcu_nocb_kthread+0x263/0x380 -| [] kthread+0xd6/0xf0 -| [] ret_from_fork+0x7c/0xb0 - -replace this preempt_disable() friendly swork. - -Reported-By: Mike Galbraith -Suggested-by: Benjamin LaHaise -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/aio.c | 24 +++++++++++++++++------- - 1 file changed, 17 insertions(+), 7 deletions(-) - ---- a/fs/aio.c -+++ b/fs/aio.c -@@ -40,6 +40,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -115,7 +116,7 @@ struct kioctx { - struct page **ring_pages; - long nr_pages; - -- struct work_struct free_work; -+ struct swork_event free_work; - - /* - * signals when all in-flight requests are done -@@ -253,6 +254,7 @@ static int __init aio_setup(void) - .mount = aio_mount, - .kill_sb = kill_anon_super, - }; -+ BUG_ON(swork_get()); - aio_mnt = kern_mount(&aio_fs); - if (IS_ERR(aio_mnt)) - panic("Failed to create aio fs mount."); -@@ -568,9 +570,9 @@ static int kiocb_cancel(struct aio_kiocb - return cancel(&kiocb->common); - } - --static void free_ioctx(struct work_struct *work) -+static void free_ioctx(struct swork_event *sev) - { -- struct kioctx *ctx = container_of(work, struct kioctx, free_work); -+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); - - pr_debug("freeing %p\n", ctx); - -@@ -589,8 +591,8 @@ static void free_ioctx_reqs(struct percp - if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) - complete(&ctx->rq_wait->comp); - -- INIT_WORK(&ctx->free_work, free_ioctx); -- schedule_work(&ctx->free_work); -+ INIT_SWORK(&ctx->free_work, free_ioctx); -+ swork_queue(&ctx->free_work); - } - - /* -@@ -598,9 +600,9 @@ static void free_ioctx_reqs(struct percp - * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - - * now it's safe to cancel any that need to be. - */ --static void free_ioctx_users(struct percpu_ref *ref) -+static void free_ioctx_users_work(struct swork_event *sev) - { -- struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); - struct aio_kiocb *req; - - spin_lock_irq(&ctx->ctx_lock); -@@ -619,6 +621,14 @@ static void free_ioctx_users(struct perc - percpu_ref_put(&ctx->reqs); - } - -+static void free_ioctx_users(struct percpu_ref *ref) -+{ -+ struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ -+ INIT_SWORK(&ctx->free_work, free_ioctx_users_work); -+ swork_queue(&ctx->free_work); -+} -+ - static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) - { - unsigned i, new_nr; diff --git a/debian/patches/features/all/rt/fs-block-rt-support.patch b/debian/patches/features/all/rt/fs-block-rt-support.patch deleted file mode 100644 index 1d2d3dd27..000000000 --- a/debian/patches/features/all/rt/fs-block-rt-support.patch +++ /dev/null @@ -1,23 +0,0 @@ -Subject: block: Turn off warning which is bogus on RT -From: Thomas Gleixner -Date: Tue, 14 Jun 2011 17:05:09 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On -RT the context is always with IRQs enabled. Ignore this warning on -RT. - -Signed-off-by: Thomas Gleixner ---- - block/blk-core.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -233,7 +233,7 @@ EXPORT_SYMBOL(blk_start_queue_async); - **/ - void blk_start_queue(struct request_queue *q) - { -- WARN_ON(!irqs_disabled()); -+ WARN_ON_NONRT(!irqs_disabled()); - - queue_flag_clear(QUEUE_FLAG_STOPPED, q); - __blk_run_queue(q); diff --git a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch deleted file mode 100644 index 08688be5a..000000000 --- a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch +++ /dev/null @@ -1,86 +0,0 @@ -Subject: fs: dcache: Use cpu_chill() in trylock loops -From: Thomas Gleixner -Date: Wed, 07 Mar 2012 21:00:34 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Retry loops on RT might loop forever when the modifying side was -preempted. Use cpu_chill() instead of cpu_relax() to let the system -make progress. - -Signed-off-by: Thomas Gleixner - ---- - fs/autofs4/autofs_i.h | 1 + - fs/autofs4/expire.c | 2 +- - fs/dcache.c | 5 +++-- - fs/namespace.c | 3 ++- - 4 files changed, 7 insertions(+), 4 deletions(-) - ---- a/fs/autofs4/autofs_i.h -+++ b/fs/autofs4/autofs_i.h -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - #include - #include - ---- a/fs/autofs4/expire.c -+++ b/fs/autofs4/expire.c -@@ -150,7 +150,7 @@ static struct dentry *get_next_positive_ - parent = p->d_parent; - if (!spin_trylock(&parent->d_lock)) { - spin_unlock(&p->d_lock); -- cpu_relax(); -+ cpu_chill(); - goto relock; - } - spin_unlock(&p->d_lock); ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -589,7 +590,7 @@ static struct dentry *dentry_kill(struct - - failed: - spin_unlock(&dentry->d_lock); -- cpu_relax(); -+ cpu_chill(); - return dentry; /* try again with same dentry */ - } - -@@ -2398,7 +2399,7 @@ void d_delete(struct dentry * dentry) - if (dentry->d_lockref.count == 1) { - if (!spin_trylock(&inode->i_lock)) { - spin_unlock(&dentry->d_lock); -- cpu_relax(); -+ cpu_chill(); - goto again; - } - dentry->d_flags &= ~DCACHE_CANT_MOUNT; ---- a/fs/namespace.c -+++ b/fs/namespace.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include /* init_rootfs */ -@@ -355,7 +356,7 @@ int __mnt_want_write(struct vfsmount *m) - smp_mb(); - while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { - preempt_enable(); -- cpu_relax(); -+ cpu_chill(); - preempt_disable(); - } - /* diff --git a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch deleted file mode 100644 index 0b6b54b20..000000000 --- a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch +++ /dev/null @@ -1,97 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 18 Mar 2011 10:11:25 +0100 -Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -bit_spin_locks break under RT. - -Based on a previous patch from Steven Rostedt -Signed-off-by: Thomas Gleixner --- - include/linux/buffer_head.h | 8 ++++++++ - include/linux/jbd2.h | 24 ++++++++++++++++++++++++ - 2 files changed, 32 insertions(+) - ---- a/include/linux/buffer_head.h -+++ b/include/linux/buffer_head.h -@@ -77,6 +77,10 @@ struct buffer_head { - atomic_t b_count; /* users using this buffer_head */ - #ifdef CONFIG_PREEMPT_RT_BASE - spinlock_t b_uptodate_lock; -+#if IS_ENABLED(CONFIG_JBD2) -+ spinlock_t b_state_lock; -+ spinlock_t b_journal_head_lock; -+#endif - #endif - }; - -@@ -108,6 +112,10 @@ static inline void buffer_head_init_lock - { - #ifdef CONFIG_PREEMPT_RT_BASE - spin_lock_init(&bh->b_uptodate_lock); -+#if IS_ENABLED(CONFIG_JBD2) -+ spin_lock_init(&bh->b_state_lock); -+ spin_lock_init(&bh->b_journal_head_lock); -+#endif - #endif - } - ---- a/include/linux/jbd2.h -+++ b/include/linux/jbd2.h -@@ -352,32 +352,56 @@ static inline struct journal_head *bh2jh - - static inline void jbd_lock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(BH_State, &bh->b_state); -+#else -+ spin_lock(&bh->b_state_lock); -+#endif - } - - static inline int jbd_trylock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - return bit_spin_trylock(BH_State, &bh->b_state); -+#else -+ return spin_trylock(&bh->b_state_lock); -+#endif - } - - static inline int jbd_is_locked_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - return bit_spin_is_locked(BH_State, &bh->b_state); -+#else -+ return spin_is_locked(&bh->b_state_lock); -+#endif - } - - static inline void jbd_unlock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_State, &bh->b_state); -+#else -+ spin_unlock(&bh->b_state_lock); -+#endif - } - - static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(BH_JournalHead, &bh->b_state); -+#else -+ spin_lock(&bh->b_journal_head_lock); -+#endif - } - - static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_JournalHead, &bh->b_state); -+#else -+ spin_unlock(&bh->b_journal_head_lock); -+#endif - } - - #define J_ASSERT(assert) BUG_ON(!(assert)) diff --git a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch deleted file mode 100644 index e38490252..000000000 --- a/debian/patches/features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch +++ /dev/null @@ -1,32 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 17 Feb 2014 17:30:03 +0100 -Subject: fs: jbd2: pull your plug when waiting for space -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Two cps in parallel managed to stall the the ext4 fs. It seems that -journal code is either waiting for locks or sleeping waiting for -something to happen. This seems similar to what Mike observed on ext3, -here is his description: - -|With an -rt kernel, and a heavy sync IO load, tasks can jam -|up on journal locks without unplugging, which can lead to -|terminal IO starvation. Unplug and schedule when waiting -|for space. - - -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/jbd2/checkpoint.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/fs/jbd2/checkpoint.c -+++ b/fs/jbd2/checkpoint.c -@@ -116,6 +116,8 @@ void __jbd2_log_wait_for_space(journal_t - nblocks = jbd2_space_needed(journal); - while (jbd2_log_space_left(journal) < nblocks) { - write_unlock(&journal->j_state_lock); -+ if (current->plug) -+ io_schedule(); - mutex_lock(&journal->j_checkpoint_mutex); - - /* diff --git a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch deleted file mode 100644 index d7d5b3010..000000000 --- a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Thomas Gleixner -Date: Sun, 19 Jul 2009 08:44:27 -0500 -Subject: fs: namespace preemption fix -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On RT we cannot loop with preemption disabled here as -mnt_make_readonly() might have been preempted. We can safely enable -preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT -as well. - -Signed-off-by: Thomas Gleixner - ---- - fs/namespace.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/fs/namespace.c -+++ b/fs/namespace.c -@@ -353,8 +353,11 @@ int __mnt_want_write(struct vfsmount *m) - * incremented count after it has set MNT_WRITE_HOLD. - */ - smp_mb(); -- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) -+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { -+ preempt_enable(); - cpu_relax(); -+ preempt_disable(); -+ } - /* - * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will - * be set to match its requirements. So we must not load that until diff --git a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch deleted file mode 100644 index 6b6265030..000000000 --- a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch +++ /dev/null @@ -1,60 +0,0 @@ -From: Mike Galbraith -Date: Fri, 3 Jul 2009 08:44:12 -0500 -Subject: fs: ntfs: disable interrupt only on !RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote: -> * Nick Piggin wrote: -> -> > > [10138.175796] [] show_trace+0x12/0x14 -> > > [10138.180291] [] dump_stack+0x16/0x18 -> > > [10138.184769] [] native_smp_call_function_mask+0x138/0x13d -> > > [10138.191117] [] smp_call_function+0x1e/0x24 -> > > [10138.196210] [] on_each_cpu+0x25/0x50 -> > > [10138.200807] [] flush_tlb_all+0x1e/0x20 -> > > [10138.205553] [] kmap_high+0x1b6/0x417 -> > > [10138.210118] [] kmap+0x4d/0x4f -> > > [10138.214102] [] ntfs_end_buffer_async_read+0x228/0x2f9 -> > > [10138.220163] [] end_bio_bh_io_sync+0x26/0x3f -> > > [10138.225352] [] bio_endio+0x42/0x6d -> > > [10138.229769] [] __end_that_request_first+0x115/0x4ac -> > > [10138.235682] [] end_that_request_chunk+0x8/0xa -> > > [10138.241052] [] ide_end_request+0x55/0x10a -> > > [10138.246058] [] ide_dma_intr+0x6f/0xac -> > > [10138.250727] [] ide_intr+0x93/0x1e0 -> > > [10138.255125] [] handle_IRQ_event+0x5c/0xc9 -> > -> > Looks like ntfs is kmap()ing from interrupt context. Should be using -> > kmap_atomic instead, I think. -> -> it's not atomic interrupt context but irq thread context - and -rt -> remaps kmap_atomic() to kmap() internally. - -Hm. Looking at the change to mm/bounce.c, perhaps I should do this -instead? - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - fs/ntfs/aops.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/fs/ntfs/aops.c -+++ b/fs/ntfs/aops.c -@@ -143,13 +143,13 @@ static void ntfs_end_buffer_async_read(s - recs = PAGE_CACHE_SIZE / rec_size; - /* Should have been verified before we got here... */ - BUG_ON(!recs); -- local_irq_save(flags); -+ local_irq_save_nort(flags); - kaddr = kmap_atomic(page); - for (i = 0; i < recs; i++) - post_read_mst_fixup((NTFS_RECORD*)(kaddr + - i * rec_size), rec_size); - kunmap_atomic(kaddr); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - flush_dcache_page(page); - if (likely(page_uptodate && !PageError(page))) - SetPageUptodate(page); diff --git a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch deleted file mode 100644 index c86b24b0c..000000000 --- a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch +++ /dev/null @@ -1,162 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 18 Mar 2011 09:18:52 +0100 -Subject: buffer_head: Replace bh_uptodate_lock for -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Wrap the bit_spin_lock calls into a separate inline and add the RT -replacements with a real spinlock. - -Signed-off-by: Thomas Gleixner ---- - fs/buffer.c | 21 +++++++-------------- - fs/ntfs/aops.c | 10 +++------- - include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++ - 3 files changed, 44 insertions(+), 21 deletions(-) - ---- a/fs/buffer.c -+++ b/fs/buffer.c -@@ -305,8 +305,7 @@ static void end_buffer_async_read(struct - * decide that the page is now completely done. - */ - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; -@@ -319,8 +318,7 @@ static void end_buffer_async_read(struct - } - tmp = tmp->b_this_page; - } while (tmp != bh); -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - - /* - * If none of the buffers had errors and they are all -@@ -332,9 +330,7 @@ static void end_buffer_async_read(struct - return; - - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - - /* -@@ -362,8 +358,7 @@ void end_buffer_async_write(struct buffe - } - - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - - clear_buffer_async_write(bh); - unlock_buffer(bh); -@@ -375,15 +370,12 @@ void end_buffer_async_write(struct buffe - } - tmp = tmp->b_this_page; - } -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - end_page_writeback(page); - return; - - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - EXPORT_SYMBOL(end_buffer_async_write); - -@@ -3325,6 +3317,7 @@ struct buffer_head *alloc_buffer_head(gf - struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); - if (ret) { - INIT_LIST_HEAD(&ret->b_assoc_buffers); -+ buffer_head_init_locks(ret); - preempt_disable(); - __this_cpu_inc(bh_accounting.nr); - recalc_bh_state(); ---- a/fs/ntfs/aops.c -+++ b/fs/ntfs/aops.c -@@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(s - "0x%llx.", (unsigned long long)bh->b_blocknr); - } - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; -@@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(s - } - tmp = tmp->b_this_page; - } while (tmp != bh); -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - /* - * If none of the buffers had errors then we can set the page uptodate, - * but we first have to perform the post read mst fixups, if the -@@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(s - unlock_page(page); - return; - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - - /** ---- a/include/linux/buffer_head.h -+++ b/include/linux/buffer_head.h -@@ -75,8 +75,42 @@ struct buffer_head { - struct address_space *b_assoc_map; /* mapping this buffer is - associated with */ - atomic_t b_count; /* users using this buffer_head */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spinlock_t b_uptodate_lock; -+#endif - }; - -+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) -+{ -+ unsigned long flags; -+ -+#ifndef CONFIG_PREEMPT_RT_BASE -+ local_irq_save(flags); -+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); -+#else -+ spin_lock_irqsave(&bh->b_uptodate_lock, flags); -+#endif -+ return flags; -+} -+ -+static inline void -+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) -+{ -+#ifndef CONFIG_PREEMPT_RT_BASE -+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); -+ local_irq_restore(flags); -+#else -+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); -+#endif -+} -+ -+static inline void buffer_head_init_locks(struct buffer_head *bh) -+{ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spin_lock_init(&bh->b_uptodate_lock); -+#endif -+} -+ - /* - * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() - * and buffer_foo() functions. diff --git a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch deleted file mode 100644 index 199e307ad..000000000 --- a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch +++ /dev/null @@ -1,74 +0,0 @@ -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:56:42 +0200 -Subject: trace: Add migrate-disabled counter to tracing output -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Signed-off-by: Thomas Gleixner ---- - include/linux/trace_events.h | 2 ++ - kernel/trace/trace.c | 9 ++++++--- - kernel/trace/trace_events.c | 2 ++ - kernel/trace/trace_output.c | 5 +++++ - 4 files changed, 15 insertions(+), 3 deletions(-) - ---- a/include/linux/trace_events.h -+++ b/include/linux/trace_events.h -@@ -66,6 +66,8 @@ struct trace_entry { - unsigned char flags; - unsigned char preempt_count; - int pid; -+ unsigned short migrate_disable; -+ unsigned short padding; - }; - - #define TRACE_EVENT_TYPE_MAX \ ---- a/kernel/trace/trace.c -+++ b/kernel/trace/trace.c -@@ -1663,6 +1663,8 @@ tracing_generic_entry_update(struct trac - ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | - (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | - (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); -+ -+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; - } - EXPORT_SYMBOL_GPL(tracing_generic_entry_update); - -@@ -2560,9 +2562,10 @@ static void print_lat_help_header(struct - "# | / _----=> need-resched \n" - "# || / _---=> hardirq/softirq \n" - "# ||| / _--=> preempt-depth \n" -- "# |||| / delay \n" -- "# cmd pid ||||| time | caller \n" -- "# \\ / ||||| \\ | / \n"); -+ "# |||| / _--=> migrate-disable\n" -+ "# ||||| / delay \n" -+ "# cmd pid |||||| time | caller \n" -+ "# \\ / ||||| \\ | / \n"); - } - - static void print_event_info(struct trace_buffer *buf, struct seq_file *m) ---- a/kernel/trace/trace_events.c -+++ b/kernel/trace/trace_events.c -@@ -186,6 +186,8 @@ static int trace_define_common_fields(vo - __common_field(unsigned char, flags); - __common_field(unsigned char, preempt_count); - __common_field(int, pid); -+ __common_field(unsigned short, migrate_disable); -+ __common_field(unsigned short, padding); - - return ret; - } ---- a/kernel/trace/trace_output.c -+++ b/kernel/trace/trace_output.c -@@ -428,6 +428,11 @@ int trace_print_lat_fmt(struct trace_seq - else - trace_seq_putc(s, '.'); - -+ if (entry->migrate_disable) -+ trace_seq_printf(s, "%x", entry->migrate_disable); -+ else -+ trace_seq_putc(s, '.'); -+ - return !trace_seq_has_overflowed(s); - } - diff --git a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch deleted file mode 100644 index d775677a4..000000000 --- a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch +++ /dev/null @@ -1,114 +0,0 @@ -From: Steven Rostedt -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: futex: Fix bug on when a requeued RT task times out -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Requeue with timeout causes a bug with PREEMPT_RT_FULL. - -The bug comes from a timed out condition. - - - TASK 1 TASK 2 - ------ ------ - futex_wait_requeue_pi() - futex_wait_queue_me() - - - double_lock_hb(); - - raw_spin_lock(pi_lock); - if (current->pi_blocked_on) { - } else { - current->pi_blocked_on = PI_WAKE_INPROGRESS; - run_spin_unlock(pi_lock); - spin_lock(hb->lock); <-- blocked! - - - plist_for_each_entry_safe(this) { - rt_mutex_start_proxy_lock(); - task_blocks_on_rt_mutex(); - BUG_ON(task->pi_blocked_on)!!!! - -The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the -problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to -grab the hb->lock, which it fails to do so. As the hb->lock is a mutex, -it will block and set the "pi_blocked_on" to the hb->lock. - -When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails -because the task1's pi_blocked_on is no longer set to that, but instead, -set to the hb->lock. - -The fix: - -When calling rt_mutex_start_proxy_lock() a check is made to see -if the proxy tasks pi_blocked_on is set. If so, exit out early. -Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies -the proxy task that it is being requeued, and will handle things -appropriately. - - -Signed-off-by: Steven Rostedt -Signed-off-by: Thomas Gleixner ---- - kernel/locking/rtmutex.c | 32 +++++++++++++++++++++++++++++++- - kernel/locking/rtmutex_common.h | 1 + - 2 files changed, 32 insertions(+), 1 deletion(-) - ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -71,7 +71,8 @@ static void fixup_rt_mutex_waiters(struc - - static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) - { -- return waiter && waiter != PI_WAKEUP_INPROGRESS; -+ return waiter && waiter != PI_WAKEUP_INPROGRESS && -+ waiter != PI_REQUEUE_INPROGRESS; - } - - /* -@@ -1631,6 +1632,35 @@ int rt_mutex_start_proxy_lock(struct rt_ - return 1; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * In PREEMPT_RT there's an added race. -+ * If the task, that we are about to requeue, times out, -+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue -+ * to skip this task. But right after the task sets -+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then -+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. -+ * This will replace the PI_WAKEUP_INPROGRESS with the actual -+ * lock that it blocks on. We *must not* place this task -+ * on this proxy lock in that case. -+ * -+ * To prevent this race, we first take the task's pi_lock -+ * and check if it has updated its pi_blocked_on. If it has, -+ * we assume that it woke up and we return -EAGAIN. -+ * Otherwise, we set the task's pi_blocked_on to -+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up -+ * it will know that we are in the process of requeuing it. -+ */ -+ raw_spin_lock_irq(&task->pi_lock); -+ if (task->pi_blocked_on) { -+ raw_spin_unlock_irq(&task->pi_lock); -+ raw_spin_unlock(&lock->wait_lock); -+ return -EAGAIN; -+ } -+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; -+ raw_spin_unlock_irq(&task->pi_lock); -+#endif -+ - /* We enforce deadlock detection for futexes */ - ret = task_blocks_on_rt_mutex(lock, waiter, task, - RT_MUTEX_FULL_CHAINWALK); ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -98,6 +98,7 @@ enum rtmutex_chainwalk { - * PI-futex support (proxy locking functions, etc.): - */ - #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) -+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) - - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); - extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, diff --git a/debian/patches/features/all/rt/genirq-Add-default-affinity-mask-command-line-option.patch b/debian/patches/features/all/rt/genirq-Add-default-affinity-mask-command-line-option.patch deleted file mode 100644 index 431420477..000000000 --- a/debian/patches/features/all/rt/genirq-Add-default-affinity-mask-command-line-option.patch +++ /dev/null @@ -1,68 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 25 May 2012 16:59:47 +0200 -Subject: genirq: Add default affinity mask command line option -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -If we isolate CPUs, then we don't want random device interrupts on them. Even -w/o the user space irq balancer enabled we can end up with irqs on non boot -cpus and chasing newly requested interrupts is a tedious task. - -Allow to restrict the default irq affinity mask. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - Documentation/kernel-parameters.txt | 9 +++++++++ - kernel/irq/irqdesc.c | 21 +++++++++++++++++++-- - 2 files changed, 28 insertions(+), 2 deletions(-) - ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -1629,6 +1629,15 @@ bytes respectively. Such letter suffixes - ip= [IP_PNP] - See Documentation/filesystems/nfs/nfsroot.txt. - -+ irqaffinity= [SMP] Set the default irq affinity mask -+ Format: -+ ,..., -+ or -+ - -+ (must be a positive range in ascending order) -+ or a mixture -+ ,...,- -+ - irqfixup [HW] - When an interrupt is not handled search all handlers - for it. Intended to get systems with badly broken ---- a/kernel/irq/irqdesc.c -+++ b/kernel/irq/irqdesc.c -@@ -24,10 +24,27 @@ - static struct lock_class_key irq_desc_lock_class; - - #if defined(CONFIG_SMP) -+static int __init irq_affinity_setup(char *str) -+{ -+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); -+ cpulist_parse(str, irq_default_affinity); -+ /* -+ * Set at least the boot cpu. We don't want to end up with -+ * bugreports caused by random comandline masks -+ */ -+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity); -+ return 1; -+} -+__setup("irqaffinity=", irq_affinity_setup); -+ - static void __init init_irq_default_affinity(void) - { -- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); -- cpumask_setall(irq_default_affinity); -+#ifdef CONFIG_CPUMASK_OFFSTACK -+ if (!irq_default_affinity) -+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); -+#endif -+ if (cpumask_empty(irq_default_affinity)) -+ cpumask_setall(irq_default_affinity); - } - #else - static void __init init_irq_default_affinity(void) diff --git a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch deleted file mode 100644 index a683d8ae7..000000000 --- a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch +++ /dev/null @@ -1,38 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:57 -0500 -Subject: genirq: Disable irqpoll on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Creates long latencies for no value - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - kernel/irq/spurious.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - ---- a/kernel/irq/spurious.c -+++ b/kernel/irq/spurious.c -@@ -444,6 +444,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir - - static int __init irqfixup_setup(char *str) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); -+ return 1; -+#endif - irqfixup = 1; - printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); - printk(KERN_WARNING "This may impact system performance.\n"); -@@ -456,6 +460,10 @@ module_param(irqfixup, int, 0644); - - static int __init irqpoll_setup(char *str) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); -+ return 1; -+#endif - irqfixup = 2; - printk(KERN_WARNING "Misrouted IRQ fixup and polling support " - "enabled\n"); diff --git a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch deleted file mode 100644 index 32d608e3b..000000000 --- a/debian/patches/features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch +++ /dev/null @@ -1,153 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 21 Aug 2013 17:48:46 +0200 -Subject: genirq: Do not invoke the affinity callback via a workqueue on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Joe Korty reported, that __irq_set_affinity_locked() schedules a -workqueue while holding a rawlock which results in a might_sleep() -warning. -This patch moves the invokation into a process context so that we only -wakeup() a process while holding the lock. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/interrupt.h | 2 + - kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++-- - 2 files changed, 78 insertions(+), 3 deletions(-) - ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -206,6 +206,7 @@ extern void resume_device_irqs(void); - * @irq: Interrupt to which notification applies - * @kref: Reference count, for internal use - * @work: Work item, for internal use -+ * @list: List item for deferred callbacks - * @notify: Function to be called on change. This will be - * called in process context. - * @release: Function to be called on release. This will be -@@ -217,6 +218,7 @@ struct irq_affinity_notify { - unsigned int irq; - struct kref kref; - struct work_struct work; -+ struct list_head list; - void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); - void (*release)(struct kref *ref); - }; ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -183,6 +183,62 @@ static inline void - irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void _irq_affinity_notify(struct irq_affinity_notify *notify); -+static struct task_struct *set_affinity_helper; -+static LIST_HEAD(affinity_list); -+static DEFINE_RAW_SPINLOCK(affinity_list_lock); -+ -+static int set_affinity_thread(void *unused) -+{ -+ while (1) { -+ struct irq_affinity_notify *notify; -+ int empty; -+ -+ set_current_state(TASK_INTERRUPTIBLE); -+ -+ raw_spin_lock_irq(&affinity_list_lock); -+ empty = list_empty(&affinity_list); -+ raw_spin_unlock_irq(&affinity_list_lock); -+ -+ if (empty) -+ schedule(); -+ if (kthread_should_stop()) -+ break; -+ set_current_state(TASK_RUNNING); -+try_next: -+ notify = NULL; -+ -+ raw_spin_lock_irq(&affinity_list_lock); -+ if (!list_empty(&affinity_list)) { -+ notify = list_first_entry(&affinity_list, -+ struct irq_affinity_notify, list); -+ list_del_init(¬ify->list); -+ } -+ raw_spin_unlock_irq(&affinity_list_lock); -+ -+ if (!notify) -+ continue; -+ _irq_affinity_notify(notify); -+ goto try_next; -+ } -+ return 0; -+} -+ -+static void init_helper_thread(void) -+{ -+ if (set_affinity_helper) -+ return; -+ set_affinity_helper = kthread_run(set_affinity_thread, NULL, -+ "affinity-cb"); -+ WARN_ON(IS_ERR(set_affinity_helper)); -+} -+#else -+ -+static inline void init_helper_thread(void) { } -+ -+#endif -+ - int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, - bool force) - { -@@ -222,7 +278,17 @@ int irq_set_affinity_locked(struct irq_d - - if (desc->affinity_notify) { - kref_get(&desc->affinity_notify->kref); -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ raw_spin_lock(&affinity_list_lock); -+ if (list_empty(&desc->affinity_notify->list)) -+ list_add_tail(&affinity_list, -+ &desc->affinity_notify->list); -+ raw_spin_unlock(&affinity_list_lock); -+ wake_up_process(set_affinity_helper); -+#else - schedule_work(&desc->affinity_notify->work); -+#endif - } - irqd_set(data, IRQD_AFFINITY_SET); - -@@ -260,10 +326,8 @@ int irq_set_affinity_hint(unsigned int i - } - EXPORT_SYMBOL_GPL(irq_set_affinity_hint); - --static void irq_affinity_notify(struct work_struct *work) -+static void _irq_affinity_notify(struct irq_affinity_notify *notify) - { -- struct irq_affinity_notify *notify = -- container_of(work, struct irq_affinity_notify, work); - struct irq_desc *desc = irq_to_desc(notify->irq); - cpumask_var_t cpumask; - unsigned long flags; -@@ -285,6 +349,13 @@ static void irq_affinity_notify(struct w - kref_put(¬ify->kref, notify->release); - } - -+static void irq_affinity_notify(struct work_struct *work) -+{ -+ struct irq_affinity_notify *notify = -+ container_of(work, struct irq_affinity_notify, work); -+ _irq_affinity_notify(notify); -+} -+ - /** - * irq_set_affinity_notifier - control notification of IRQ affinity changes - * @irq: Interrupt for which to enable/disable notification -@@ -314,6 +385,8 @@ irq_set_affinity_notifier(unsigned int i - notify->irq = irq; - kref_init(¬ify->kref); - INIT_WORK(¬ify->work, irq_affinity_notify); -+ INIT_LIST_HEAD(¬ify->list); -+ init_helper_thread(); - } - - raw_spin_lock_irqsave(&desc->lock, flags); diff --git a/debian/patches/features/all/rt/genirq-force-threading.patch b/debian/patches/features/all/rt/genirq-force-threading.patch deleted file mode 100644 index 8ad39843e..000000000 --- a/debian/patches/features/all/rt/genirq-force-threading.patch +++ /dev/null @@ -1,49 +0,0 @@ -Subject: genirq: Force interrupt thread on RT -From: Thomas Gleixner -Date: Sun, 03 Apr 2011 11:57:29 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Force threaded_irqs and optimize the code (force_irqthreads) in regard -to this. - -Signed-off-by: Thomas Gleixner ---- - include/linux/interrupt.h | 6 +++++- - kernel/irq/manage.c | 2 ++ - 2 files changed, 7 insertions(+), 1 deletion(-) - ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -379,9 +379,13 @@ extern int irq_set_irqchip_state(unsigne - bool state); - - #ifdef CONFIG_IRQ_FORCED_THREADING -+# ifndef CONFIG_PREEMPT_RT_BASE - extern bool force_irqthreads; -+# else -+# define force_irqthreads (true) -+# endif - #else --#define force_irqthreads (0) -+#define force_irqthreads (false) - #endif - - #ifndef __ARCH_SET_SOFTIRQ_PENDING ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -22,6 +22,7 @@ - #include "internals.h" - - #ifdef CONFIG_IRQ_FORCED_THREADING -+# ifndef CONFIG_PREEMPT_RT_BASE - __read_mostly bool force_irqthreads; - - static int __init setup_forced_irqthreads(char *arg) -@@ -30,6 +31,7 @@ static int __init setup_forced_irqthread - return 0; - } - early_param("threadirqs", setup_forced_irqthreads); -+# endif - #endif - - static void __synchronize_hardirq(struct irq_desc *desc) diff --git a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch deleted file mode 100644 index a6d192a42..000000000 --- a/debian/patches/features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Mike Galbraith -Date: Tue, 24 Mar 2015 08:14:49 +0100 -Subject: hotplug: Use set_cpus_allowed_ptr() in sync_unplug_thread() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -do_set_cpus_allowed() is not safe vs ->sched_class change. - -crash> bt -PID: 11676 TASK: ffff88026f979da0 CPU: 22 COMMAND: "sync_unplug/22" - #0 [ffff880274d25bc8] machine_kexec at ffffffff8103b41c - #1 [ffff880274d25c18] crash_kexec at ffffffff810d881a - #2 [ffff880274d25cd8] oops_end at ffffffff81525818 - #3 [ffff880274d25cf8] do_invalid_op at ffffffff81003096 - #4 [ffff880274d25d90] invalid_op at ffffffff8152d3de - [exception RIP: set_cpus_allowed_rt+18] - RIP: ffffffff8109e012 RSP: ffff880274d25e48 RFLAGS: 00010202 - RAX: ffffffff8109e000 RBX: ffff88026f979da0 RCX: ffff8802770cb6e8 - RDX: 0000000000000000 RSI: ffffffff81add700 RDI: ffff88026f979da0 - RBP: ffff880274d25e78 R8: ffffffff816112e0 R9: 0000000000000001 - R10: 0000000000000001 R11: 0000000000011940 R12: ffff88026f979da0 - R13: ffff8802770cb6d0 R14: ffff880274d25fd8 R15: 0000000000000000 - ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 - #5 [ffff880274d25e60] do_set_cpus_allowed at ffffffff8108e65f - #6 [ffff880274d25e80] sync_unplug_thread at ffffffff81058c08 - #7 [ffff880274d25ed8] kthread at ffffffff8107cad6 - #8 [ffff880274d25f50] ret_from_fork at ffffffff8152bbbc -crash> task_struct ffff88026f979da0 | grep class - sched_class = 0xffffffff816111e0 , - -Signed-off-by: Mike Galbraith - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/cpu.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -268,7 +268,7 @@ static int sync_unplug_thread(void *data - * we don't want any more work on this CPU. - */ - current->flags &= ~PF_NO_SETAFFINITY; -- do_set_cpus_allowed(current, cpu_present_mask); -+ set_cpus_allowed_ptr(current, cpu_present_mask); - migrate_me(); - return 0; - } diff --git a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch deleted file mode 100644 index 686aef6a7..000000000 --- a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch +++ /dev/null @@ -1,205 +0,0 @@ -Subject: hotplug: Lightweight get online cpus -From: Thomas Gleixner -Date: Wed, 15 Jun 2011 12:36:06 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -get_online_cpus() is a heavy weight function which involves a global -mutex. migrate_disable() wants a simpler construct which prevents only -a CPU from going doing while a task is in a migrate disabled section. - -Implement a per cpu lockless mechanism, which serializes only in the -real unplug case on a global mutex. That serialization affects only -tasks on the cpu which should be brought down. - -Signed-off-by: Thomas Gleixner ---- - include/linux/cpu.h | 7 +-- - kernel/cpu.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++- - 2 files changed, 122 insertions(+), 4 deletions(-) - ---- a/include/linux/cpu.h -+++ b/include/linux/cpu.h -@@ -222,9 +222,6 @@ static inline void smpboot_thread_init(v - #endif /* CONFIG_SMP */ - extern struct bus_type cpu_subsys; - --static inline void pin_current_cpu(void) { } --static inline void unpin_current_cpu(void) { } -- - #ifdef CONFIG_HOTPLUG_CPU - /* Stop CPUs going up and down. */ - -@@ -234,6 +231,8 @@ extern void get_online_cpus(void); - extern void put_online_cpus(void); - extern void cpu_hotplug_disable(void); - extern void cpu_hotplug_enable(void); -+extern void pin_current_cpu(void); -+extern void unpin_current_cpu(void); - #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) - #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) - #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) -@@ -251,6 +250,8 @@ static inline void cpu_hotplug_done(void - #define put_online_cpus() do { } while (0) - #define cpu_hotplug_disable() do { } while (0) - #define cpu_hotplug_enable() do { } while (0) -+static inline void pin_current_cpu(void) { } -+static inline void unpin_current_cpu(void) { } - #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) - #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) - /* These aren't inline functions due to a GCC bug. */ ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -89,6 +89,100 @@ static struct { - #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) - #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) - -+struct hotplug_pcp { -+ struct task_struct *unplug; -+ int refcount; -+ struct completion synced; -+}; -+ -+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); -+ -+/** -+ * pin_current_cpu - Prevent the current cpu from being unplugged -+ * -+ * Lightweight version of get_online_cpus() to prevent cpu from being -+ * unplugged when code runs in a migration disabled region. -+ * -+ * Must be called with preemption disabled (preempt_count = 1)! -+ */ -+void pin_current_cpu(void) -+{ -+ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp); -+ -+retry: -+ if (!hp->unplug || hp->refcount || preempt_count() > 1 || -+ hp->unplug == current) { -+ hp->refcount++; -+ return; -+ } -+ preempt_enable(); -+ mutex_lock(&cpu_hotplug.lock); -+ mutex_unlock(&cpu_hotplug.lock); -+ preempt_disable(); -+ goto retry; -+} -+ -+/** -+ * unpin_current_cpu - Allow unplug of current cpu -+ * -+ * Must be called with preemption or interrupts disabled! -+ */ -+void unpin_current_cpu(void) -+{ -+ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp); -+ -+ WARN_ON(hp->refcount <= 0); -+ -+ /* This is safe. sync_unplug_thread is pinned to this cpu */ -+ if (!--hp->refcount && hp->unplug && hp->unplug != current) -+ wake_up_process(hp->unplug); -+} -+ -+/* -+ * FIXME: Is this really correct under all circumstances ? -+ */ -+static int sync_unplug_thread(void *data) -+{ -+ struct hotplug_pcp *hp = data; -+ -+ preempt_disable(); -+ hp->unplug = current; -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ while (hp->refcount) { -+ schedule_preempt_disabled(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+ set_current_state(TASK_RUNNING); -+ preempt_enable(); -+ complete(&hp->synced); -+ return 0; -+} -+ -+/* -+ * Start the sync_unplug_thread on the target cpu and wait for it to -+ * complete. -+ */ -+static int cpu_unplug_begin(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ struct task_struct *tsk; -+ -+ init_completion(&hp->synced); -+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu); -+ if (IS_ERR(tsk)) -+ return (PTR_ERR(tsk)); -+ kthread_bind(tsk, cpu); -+ wake_up_process(tsk); -+ wait_for_completion(&hp->synced); -+ return 0; -+} -+ -+static void cpu_unplug_done(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ -+ hp->unplug = NULL; -+} - - void get_online_cpus(void) - { -@@ -338,13 +432,14 @@ static int take_cpu_down(void *_param) - /* Requires cpu_add_remove_lock to be held */ - static int _cpu_down(unsigned int cpu, int tasks_frozen) - { -- int err, nr_calls = 0; -+ int mycpu, err, nr_calls = 0; - void *hcpu = (void *)(long)cpu; - unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; - struct take_cpu_down_param tcd_param = { - .mod = mod, - .hcpu = hcpu, - }; -+ cpumask_var_t cpumask; - - if (num_online_cpus() == 1) - return -EBUSY; -@@ -352,7 +447,27 @@ static int _cpu_down(unsigned int cpu, i - if (!cpu_online(cpu)) - return -EINVAL; - -+ /* Move the downtaker off the unplug cpu */ -+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) -+ return -ENOMEM; -+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); -+ set_cpus_allowed_ptr(current, cpumask); -+ free_cpumask_var(cpumask); -+ preempt_disable(); -+ mycpu = smp_processor_id(); -+ if (mycpu == cpu) { -+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); -+ preempt_enable(); -+ return -EBUSY; -+ } -+ preempt_enable(); -+ - cpu_hotplug_begin(); -+ err = cpu_unplug_begin(cpu); -+ if (err) { -+ printk("cpu_unplug_begin(%d) failed\n", cpu); -+ goto out_cancel; -+ } - - err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); - if (err) { -@@ -424,6 +539,8 @@ static int _cpu_down(unsigned int cpu, i - check_for_tasks(cpu); - - out_release: -+ cpu_unplug_done(cpu); -+out_cancel: - cpu_hotplug_done(); - if (!err) - cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch deleted file mode 100644 index cfad64c67..000000000 --- a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch +++ /dev/null @@ -1,25 +0,0 @@ -Subject: hotplug: sync_unplug: No "\n" in task name -From: Yong Zhang -Date: Sun, 16 Oct 2011 18:56:43 +0800 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Otherwise the output will look a little odd. - -Signed-off-by: Yong Zhang -Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com -Signed-off-by: Thomas Gleixner ---- - kernel/cpu.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -168,7 +168,7 @@ static int cpu_unplug_begin(unsigned int - struct task_struct *tsk; - - init_completion(&hp->synced); -- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu); -+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); - if (IS_ERR(tsk)) - return (PTR_ERR(tsk)); - kthread_bind(tsk, cpu); diff --git a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch deleted file mode 100644 index ac9d1a560..000000000 --- a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch +++ /dev/null @@ -1,40 +0,0 @@ -Subject: hotplug: Use migrate disable on unplug -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 19:35:29 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Migration needs to be disabled accross the unplug handling to make -sure that the unplug thread is off the unplugged cpu. - -Signed-off-by: Thomas Gleixner ---- - kernel/cpu.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -455,14 +455,13 @@ static int _cpu_down(unsigned int cpu, i - cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); - set_cpus_allowed_ptr(current, cpumask); - free_cpumask_var(cpumask); -- preempt_disable(); -+ migrate_disable(); - mycpu = smp_processor_id(); - if (mycpu == cpu) { - printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); -- preempt_enable(); -+ migrate_enable(); - return -EBUSY; - } -- preempt_enable(); - - cpu_hotplug_begin(); - err = cpu_unplug_begin(cpu); -@@ -543,6 +542,7 @@ static int _cpu_down(unsigned int cpu, i - out_release: - cpu_unplug_done(cpu); - out_cancel: -+ migrate_enable(); - cpu_hotplug_done(); - if (!err) - cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch deleted file mode 100644 index e17cce4d1..000000000 --- a/debian/patches/features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch +++ /dev/null @@ -1,118 +0,0 @@ -From: Yang Shi -Date: Mon, 16 Sep 2013 14:09:19 -0700 -Subject: hrtimer: Move schedule_work call to helper thread -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -When run ltp leapsec_timer test, the following call trace is caught: - -BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 -in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: swapper/1 -Preemption disabled at:[] cpu_startup_entry+0x133/0x310 - -CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.10.10-rt3 #2 -Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 -ffffffff81c2f800 ffff880076843e40 ffffffff8169918d ffff880076843e58 -ffffffff8106db31 ffff88007684b4a0 ffff880076843e70 ffffffff8169d9c0 -ffff88007684b4a0 ffff880076843eb0 ffffffff81059da1 0000001876851200 -Call Trace: - [] dump_stack+0x19/0x1b -[] __might_sleep+0xf1/0x170 -[] rt_spin_lock+0x20/0x50 -[] queue_work_on+0x61/0x100 -[] clock_was_set_delayed+0x21/0x30 -[] do_timer+0x40e/0x660 -[] tick_do_update_jiffies64+0xf7/0x140 -[] tick_check_idle+0x92/0xc0 -[] irq_enter+0x57/0x70 -[] smp_apic_timer_interrupt+0x3e/0x9b -[] apic_timer_interrupt+0x6a/0x70 - [] ? cpuidle_enter_state+0x4c/0xc0 -[] cpuidle_idle_call+0xd8/0x2d0 -[] arch_cpu_idle+0xe/0x30 -[] cpu_startup_entry+0x19e/0x310 -[] start_secondary+0x1ad/0x1b0 - -The clock_was_set_delayed is called in hard IRQ handler (timer interrupt), which -calls schedule_work. - -Under PREEMPT_RT_FULL, schedule_work calls spinlocks which could sleep, so it's -not safe to call schedule_work in interrupt context. - -Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca -(rt,ntp: Move call to schedule_delayed_work() to helper thread) -from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which -makes a similar change. - -add a helper thread which does the call to schedule_work and wake up that -thread instead of calling schedule_work directly. - - -Signed-off-by: Yang Shi -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 40 ++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 40 insertions(+) - ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -48,6 +48,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -713,6 +714,44 @@ static void clock_was_set_work(struct wo - - static DECLARE_WORK(hrtimer_work, clock_was_set_work); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * RT can not call schedule_work from real interrupt context. -+ * Need to make a thread to do the real work. -+ */ -+static struct task_struct *clock_set_delay_thread; -+static bool do_clock_set_delay; -+ -+static int run_clock_set_delay(void *ignore) -+{ -+ while (!kthread_should_stop()) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ if (do_clock_set_delay) { -+ do_clock_set_delay = false; -+ schedule_work(&hrtimer_work); -+ } -+ schedule(); -+ } -+ __set_current_state(TASK_RUNNING); -+ return 0; -+} -+ -+void clock_was_set_delayed(void) -+{ -+ do_clock_set_delay = true; -+ /* Make visible before waking up process */ -+ smp_wmb(); -+ wake_up_process(clock_set_delay_thread); -+} -+ -+static __init int create_clock_set_delay_thread(void) -+{ -+ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd"); -+ BUG_ON(!clock_set_delay_thread); -+ return 0; -+} -+early_initcall(create_clock_set_delay_thread); -+#else /* PREEMPT_RT_FULL */ - /* - * Called from timekeeping and resume code to reprogramm the hrtimer - * interrupt device on all cpus. -@@ -721,6 +760,7 @@ void clock_was_set_delayed(void) - { - schedule_work(&hrtimer_work); - } -+#endif - - #else - diff --git a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch b/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch deleted file mode 100644 index 9ceb5ae7a..000000000 --- a/debian/patches/features/all/rt/hrtimer-enfore-64byte-alignment.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 23 Dec 2015 20:57:41 +0100 -Subject: hrtimer: enfore 64byte alignment -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The patch "hrtimer: Fixup hrtimer callback changes for preempt-rt" adds -a list_head expired to struct hrtimer_clock_base and with it we run into -BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN); - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/hrtimer.h | 4 ---- - 1 file changed, 4 deletions(-) - ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -125,11 +125,7 @@ struct hrtimer_sleeper { - struct task_struct *task; - }; - --#ifdef CONFIG_64BIT - # define HRTIMER_CLOCK_BASE_ALIGN 64 --#else --# define HRTIMER_CLOCK_BASE_ALIGN 32 --#endif - - /** - * struct hrtimer_clock_base - the timer base for a specific clock diff --git a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch deleted file mode 100644 index ef2f16bff..000000000 --- a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch +++ /dev/null @@ -1,336 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 3 Jul 2009 08:44:31 -0500 -Subject: hrtimer: Fixup hrtimer callback changes for preempt-rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -In preempt-rt we can not call the callbacks which take sleeping locks -from the timer interrupt context. - -Bring back the softirq split for now, until we fixed the signal -delivery problem for real. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Ingo Molnar - ---- - include/linux/hrtimer.h | 7 ++ - kernel/sched/core.c | 1 - kernel/sched/rt.c | 1 - kernel/time/hrtimer.c | 137 +++++++++++++++++++++++++++++++++++++++++++---- - kernel/time/tick-sched.c | 1 - kernel/watchdog.c | 1 - 6 files changed, 139 insertions(+), 9 deletions(-) - ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -87,6 +87,8 @@ enum hrtimer_restart { - * @function: timer expiry callback function - * @base: pointer to the timer base (per cpu and per clock) - * @state: state information (See bit values above) -+ * @cb_entry: list entry to defer timers from hardirq context -+ * @irqsafe: timer can run in hardirq context - * @praecox: timer expiry time if expired at the time of programming - * @start_pid: timer statistics field to store the pid of the task which - * started the timer -@@ -103,6 +105,8 @@ struct hrtimer { - enum hrtimer_restart (*function)(struct hrtimer *); - struct hrtimer_clock_base *base; - unsigned long state; -+ struct list_head cb_entry; -+ int irqsafe; - #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - ktime_t praecox; - #endif -@@ -134,6 +138,7 @@ struct hrtimer_sleeper { - * timer to a base on another cpu. - * @clockid: clock id for per_cpu support - * @active: red black tree root node for the active timers -+ * @expired: list head for deferred timers. - * @get_time: function to retrieve the current time of the clock - * @offset: offset of this clock to the monotonic base - */ -@@ -142,6 +147,7 @@ struct hrtimer_clock_base { - int index; - clockid_t clockid; - struct timerqueue_head active; -+ struct list_head expired; - ktime_t (*get_time)(void); - ktime_t offset; - } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); -@@ -185,6 +191,7 @@ struct hrtimer_cpu_base { - raw_spinlock_t lock; - seqcount_t seq; - struct hrtimer *running; -+ struct hrtimer *running_soft; - unsigned int cpu; - unsigned int active_bases; - unsigned int clock_was_set_seq; ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -438,6 +438,7 @@ static void init_rq_hrtick(struct rq *rq - - hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - rq->hrtick_timer.function = hrtick; -+ rq->hrtick_timer.irqsafe = 1; - } - #else /* CONFIG_SCHED_HRTICK */ - static inline void hrtick_clear(struct rq *rq) ---- a/kernel/sched/rt.c -+++ b/kernel/sched/rt.c -@@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwid - - hrtimer_init(&rt_b->rt_period_timer, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ rt_b->rt_period_timer.irqsafe = 1; - rt_b->rt_period_timer.function = sched_rt_period_timer; - } - ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -730,11 +730,8 @@ static inline int hrtimer_is_hres_enable - static inline void hrtimer_switch_to_hres(void) { } - static inline void - hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } --static inline int hrtimer_reprogram(struct hrtimer *timer, -- struct hrtimer_clock_base *base) --{ -- return 0; --} -+static inline void hrtimer_reprogram(struct hrtimer *timer, -+ struct hrtimer_clock_base *base) { } - static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } - static inline void retrigger_next_event(void *arg) { } - -@@ -883,7 +880,7 @@ void hrtimer_wait_for_timer(const struct - { - struct hrtimer_clock_base *base = timer->base; - -- if (base && base->cpu_base && !hrtimer_hres_active()) -+ if (base && base->cpu_base && !timer->irqsafe) - wait_event(base->cpu_base->wait, - !(hrtimer_callback_running(timer))); - } -@@ -933,6 +930,11 @@ static void __remove_hrtimer(struct hrti - if (!(state & HRTIMER_STATE_ENQUEUED)) - return; - -+ if (unlikely(!list_empty(&timer->cb_entry))) { -+ list_del_init(&timer->cb_entry); -+ return; -+ } -+ - if (!timerqueue_del(&base->active, &timer->node)) - cpu_base->active_bases &= ~(1 << base->index); - -@@ -1162,6 +1164,7 @@ static void __hrtimer_init(struct hrtime - - base = hrtimer_clockid_to_base(clock_id); - timer->base = &cpu_base->clock_base[base]; -+ INIT_LIST_HEAD(&timer->cb_entry); - timerqueue_init(&timer->node); - - #ifdef CONFIG_TIMER_STATS -@@ -1202,6 +1205,7 @@ bool hrtimer_active(const struct hrtimer - seq = raw_read_seqcount_begin(&cpu_base->seq); - - if (timer->state != HRTIMER_STATE_INACTIVE || -+ cpu_base->running_soft == timer || - cpu_base->running == timer) - return true; - -@@ -1292,12 +1296,112 @@ static void __run_hrtimer(struct hrtimer - cpu_base->running = NULL; - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, -+ struct hrtimer_clock_base *base) -+{ -+ int leftmost; -+ -+ if (restart != HRTIMER_NORESTART && -+ !(timer->state & HRTIMER_STATE_ENQUEUED)) { -+ -+ leftmost = enqueue_hrtimer(timer, base); -+ if (!leftmost) -+ return; -+#ifdef CONFIG_HIGH_RES_TIMERS -+ if (!hrtimer_is_hres_active(timer)) { -+ /* -+ * Kick to reschedule the next tick to handle the new timer -+ * on dynticks target. -+ */ -+ if (base->cpu_base->nohz_active) -+ wake_up_nohz_cpu(base->cpu_base->cpu); -+ } else { -+ -+ hrtimer_reprogram(timer, base); -+ } -+#endif -+ } -+} -+ -+/* -+ * The changes in mainline which removed the callback modes from -+ * hrtimer are not yet working with -rt. The non wakeup_process() -+ * based callbacks which involve sleeping locks need to be treated -+ * seperately. -+ */ -+static void hrtimer_rt_run_pending(void) -+{ -+ enum hrtimer_restart (*fn)(struct hrtimer *); -+ struct hrtimer_cpu_base *cpu_base; -+ struct hrtimer_clock_base *base; -+ struct hrtimer *timer; -+ int index, restart; -+ -+ local_irq_disable(); -+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); -+ -+ raw_spin_lock(&cpu_base->lock); -+ -+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { -+ base = &cpu_base->clock_base[index]; -+ -+ while (!list_empty(&base->expired)) { -+ timer = list_first_entry(&base->expired, -+ struct hrtimer, cb_entry); -+ -+ /* -+ * Same as the above __run_hrtimer function -+ * just we run with interrupts enabled. -+ */ -+ debug_deactivate(timer); -+ cpu_base->running_soft = timer; -+ raw_write_seqcount_barrier(&cpu_base->seq); -+ -+ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); -+ timer_stats_account_hrtimer(timer); -+ fn = timer->function; -+ -+ raw_spin_unlock_irq(&cpu_base->lock); -+ restart = fn(timer); -+ raw_spin_lock_irq(&cpu_base->lock); -+ -+ hrtimer_rt_reprogram(restart, timer, base); -+ raw_write_seqcount_barrier(&cpu_base->seq); -+ -+ WARN_ON_ONCE(cpu_base->running_soft != timer); -+ cpu_base->running_soft = NULL; -+ } -+ } -+ -+ raw_spin_unlock_irq(&cpu_base->lock); -+ -+ wake_up_timer_waiters(cpu_base); -+} -+ -+static int hrtimer_rt_defer(struct hrtimer *timer) -+{ -+ if (timer->irqsafe) -+ return 0; -+ -+ __remove_hrtimer(timer, timer->base, timer->state, 0); -+ list_add_tail(&timer->cb_entry, &timer->base->expired); -+ return 1; -+} -+ -+#else -+ -+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } -+ -+#endif -+ - static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); - - static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) - { - struct hrtimer_clock_base *base = cpu_base->clock_base; - unsigned int active = cpu_base->active_bases; -+ int raise = 0; - - for (; active; base++, active >>= 1) { - struct timerqueue_node *node; -@@ -1337,9 +1441,14 @@ static void __hrtimer_run_queues(struct - if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) - break; - -- __run_hrtimer(cpu_base, base, timer, &basenow); -+ if (!hrtimer_rt_defer(timer)) -+ __run_hrtimer(cpu_base, base, timer, &basenow); -+ else -+ raise = 1; - } - } -+ if (raise) -+ raise_softirq_irqoff(HRTIMER_SOFTIRQ); - } - - #ifdef CONFIG_HIGH_RES_TIMERS -@@ -1481,8 +1590,6 @@ void hrtimer_run_queues(void) - now = hrtimer_update_base(cpu_base); - __hrtimer_run_queues(cpu_base, now); - raw_spin_unlock(&cpu_base->lock); -- -- wake_up_timer_waiters(cpu_base); - } - - /* -@@ -1504,6 +1611,7 @@ static enum hrtimer_restart hrtimer_wake - void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) - { - sl->timer.function = hrtimer_wakeup; -+ sl->timer.irqsafe = 1; - sl->task = task; - } - EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); -@@ -1638,6 +1746,7 @@ static void init_hrtimers_cpu(int cpu) - for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { - cpu_base->clock_base[i].cpu_base = cpu_base; - timerqueue_init_head(&cpu_base->clock_base[i].active); -+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); - } - - cpu_base->cpu = cpu; -@@ -1742,11 +1851,21 @@ static struct notifier_block hrtimers_nb - .notifier_call = hrtimer_cpu_notify, - }; - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static void run_hrtimer_softirq(struct softirq_action *h) -+{ -+ hrtimer_rt_run_pending(); -+} -+#endif -+ - void __init hrtimers_init(void) - { - hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, - (void *)(long)smp_processor_id()); - register_cpu_notifier(&hrtimers_nb); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); -+#endif - } - - /** ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -1105,6 +1105,7 @@ void tick_setup_sched_timer(void) - * Emulate tick processing via per-CPU hrtimers: - */ - hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -+ ts->sched_timer.irqsafe = 1; - ts->sched_timer.function = tick_sched_timer; - - /* Get the next period (per cpu) */ ---- a/kernel/watchdog.c -+++ b/kernel/watchdog.c -@@ -507,6 +507,7 @@ static void watchdog_enable(unsigned int - /* kick off the timer for the hardlockup detector */ - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hrtimer->function = watchdog_timer_fn; -+ hrtimer->irqsafe = 1; - - /* Enable the perf event */ - watchdog_nmi_enable(cpu); diff --git a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch deleted file mode 100644 index 92927cd61..000000000 --- a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch +++ /dev/null @@ -1,205 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: hrtimers: Prepare full preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Make cancellation of a running callback in softirq context safe -against preemption. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - include/linux/hrtimer.h | 12 +++++++++++- - kernel/time/hrtimer.c | 33 ++++++++++++++++++++++++++++++++- - kernel/time/itimer.c | 1 + - kernel/time/posix-timers.c | 33 +++++++++++++++++++++++++++++++++ - 4 files changed, 77 insertions(+), 2 deletions(-) - ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -205,6 +205,9 @@ struct hrtimer_cpu_base { - unsigned int nr_hangs; - unsigned int max_hang_time; - #endif -+#ifdef CONFIG_PREEMPT_RT_BASE -+ wait_queue_head_t wait; -+#endif - struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; - } ____cacheline_aligned; - -@@ -393,6 +396,13 @@ static inline void hrtimer_restart(struc - hrtimer_start_expires(timer, HRTIMER_MODE_ABS); - } - -+/* Softirq preemption could deadlock timer removal */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); -+#else -+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) -+#endif -+ - /* Query timers: */ - extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); - -@@ -412,7 +422,7 @@ static inline int hrtimer_is_queued(stru - * Helper function to check, whether the timer is running the callback - * function - */ --static inline int hrtimer_callback_running(struct hrtimer *timer) -+static inline int hrtimer_callback_running(const struct hrtimer *timer) - { - return timer->base->cpu_base->running == timer; - } ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -866,6 +866,32 @@ u64 hrtimer_forward(struct hrtimer *time - } - EXPORT_SYMBOL_GPL(hrtimer_forward); - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define wake_up_timer_waiters(b) wake_up(&(b)->wait) -+ -+/** -+ * hrtimer_wait_for_timer - Wait for a running timer -+ * -+ * @timer: timer to wait for -+ * -+ * The function waits in case the timers callback function is -+ * currently executed on the waitqueue of the timer base. The -+ * waitqueue is woken up after the timer callback function has -+ * finished execution. -+ */ -+void hrtimer_wait_for_timer(const struct hrtimer *timer) -+{ -+ struct hrtimer_clock_base *base = timer->base; -+ -+ if (base && base->cpu_base && !hrtimer_hres_active()) -+ wait_event(base->cpu_base->wait, -+ !(hrtimer_callback_running(timer))); -+} -+ -+#else -+# define wake_up_timer_waiters(b) do { } while (0) -+#endif -+ - /* - * enqueue_hrtimer - internal function to (re)start a timer - * -@@ -1076,7 +1102,7 @@ int hrtimer_cancel(struct hrtimer *timer - - if (ret >= 0) - return ret; -- cpu_relax(); -+ hrtimer_wait_for_timer(timer); - } - } - EXPORT_SYMBOL_GPL(hrtimer_cancel); -@@ -1455,6 +1481,8 @@ void hrtimer_run_queues(void) - now = hrtimer_update_base(cpu_base); - __hrtimer_run_queues(cpu_base, now); - raw_spin_unlock(&cpu_base->lock); -+ -+ wake_up_timer_waiters(cpu_base); - } - - /* -@@ -1614,6 +1642,9 @@ static void init_hrtimers_cpu(int cpu) - - cpu_base->cpu = cpu; - hrtimer_init_hres(cpu_base); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ init_waitqueue_head(&cpu_base->wait); -+#endif - } - - #ifdef CONFIG_HOTPLUG_CPU ---- a/kernel/time/itimer.c -+++ b/kernel/time/itimer.c -@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime - /* We are sharing ->siglock with it_real_fn() */ - if (hrtimer_try_to_cancel(timer) < 0) { - spin_unlock_irq(&tsk->sighand->siglock); -+ hrtimer_wait_for_timer(&tsk->signal->real_timer); - goto again; - } - expires = timeval_to_ktime(value->it_value); ---- a/kernel/time/posix-timers.c -+++ b/kernel/time/posix-timers.c -@@ -828,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_ - return overrun; - } - -+/* -+ * Protected by RCU! -+ */ -+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) -+{ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (kc->timer_set == common_timer_set) -+ hrtimer_wait_for_timer(&timr->it.real.timer); -+ else -+ /* FIXME: Whacky hack for posix-cpu-timers */ -+ schedule_timeout(1); -+#endif -+} -+ - /* Set a POSIX.1b interval timer. */ - /* timr->it_lock is taken. */ - static int -@@ -905,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, - if (!timr) - return -EINVAL; - -+ rcu_read_lock(); - kc = clockid_to_kclock(timr->it_clock); - if (WARN_ON_ONCE(!kc || !kc->timer_set)) - error = -EINVAL; -@@ -913,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, - - unlock_timer(timr, flag); - if (error == TIMER_RETRY) { -+ timer_wait_for_callback(kc, timr); - rtn = NULL; // We already got the old time... -+ rcu_read_unlock(); - goto retry; - } -+ rcu_read_unlock(); - - if (old_setting && !error && - copy_to_user(old_setting, &old_spec, sizeof (old_spec))) -@@ -953,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t - if (!timer) - return -EINVAL; - -+ rcu_read_lock(); - if (timer_delete_hook(timer) == TIMER_RETRY) { - unlock_timer(timer, flags); -+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), -+ timer); -+ rcu_read_unlock(); - goto retry_delete; - } -+ rcu_read_unlock(); - - spin_lock(¤t->sighand->siglock); - list_del(&timer->list); -@@ -982,8 +1005,18 @@ static void itimer_delete(struct k_itime - retry_delete: - spin_lock_irqsave(&timer->it_lock, flags); - -+ /* On RT we can race with a deletion */ -+ if (!timer->it_signal) { -+ unlock_timer(timer, flags); -+ return; -+ } -+ - if (timer_delete_hook(timer) == TIMER_RETRY) { -+ rcu_read_lock(); - unlock_timer(timer, flags); -+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), -+ timer); -+ rcu_read_unlock(); - goto retry_delete; - } - list_del(&timer->list); diff --git a/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch b/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch deleted file mode 100644 index 73882a348..000000000 --- a/debian/patches/features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Mike Galbraith -Date: Fri, 30 Aug 2013 07:57:25 +0200 -Subject: hwlat-detector: Don't ignore threshold module parameter -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -If the user specified a threshold at module load time, use it. - - -Acked-by: Steven Rostedt -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/misc/hwlat_detector.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/misc/hwlat_detector.c -+++ b/drivers/misc/hwlat_detector.c -@@ -414,7 +414,7 @@ static int init_stats(void) - goto out; - - __reset_stats(); -- data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ -+ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */ - data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ - data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ - diff --git a/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch b/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch deleted file mode 100644 index fc309734b..000000000 --- a/debian/patches/features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch +++ /dev/null @@ -1,126 +0,0 @@ -From: Steven Rostedt -Date: Mon, 19 Aug 2013 17:33:25 -0400 -Subject: hwlat-detector: Update hwlat_detector to add outer loop detection -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The hwlat_detector reads two timestamps in a row, then reports any -gap between those calls. The problem is, it misses everything between -the second reading of the time stamp to the first reading of the time stamp -in the next loop. That's were most of the time is spent, which means, -chances are likely that it will miss all hardware latencies. This -defeats the purpose. - -By also testing the first time stamp from the previous loop second -time stamp (the outer loop), we are more likely to find a latency. - -Setting the threshold to 1, here's what the report now looks like: - -1347415723.0232202770 0 2 -1347415725.0234202822 0 2 -1347415727.0236202875 0 2 -1347415729.0238202928 0 2 -1347415731.0240202980 0 2 -1347415734.0243203061 0 2 -1347415736.0245203113 0 2 -1347415738.0247203166 2 0 -1347415740.0249203219 0 3 -1347415742.0251203272 0 3 -1347415743.0252203299 0 3 -1347415745.0254203351 0 2 -1347415747.0256203404 0 2 -1347415749.0258203457 0 2 -1347415751.0260203510 0 2 -1347415754.0263203589 0 2 -1347415756.0265203642 0 2 -1347415758.0267203695 0 2 -1347415760.0269203748 0 2 -1347415762.0271203801 0 2 -1347415764.0273203853 2 0 - -There's some hardware latency that takes 2 microseconds to run. - -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/misc/hwlat_detector.c | 32 ++++++++++++++++++++++++++------ - 1 file changed, 26 insertions(+), 6 deletions(-) - ---- a/drivers/misc/hwlat_detector.c -+++ b/drivers/misc/hwlat_detector.c -@@ -143,6 +143,7 @@ static void detector_exit(void); - struct sample { - u64 seqnum; /* unique sequence */ - u64 duration; /* ktime delta */ -+ u64 outer_duration; /* ktime delta (outer loop) */ - struct timespec timestamp; /* wall time */ - unsigned long lost; - }; -@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample( - */ - static int get_sample(void *unused) - { -- ktime_t start, t1, t2; -+ ktime_t start, t1, t2, last_t2; - s64 diff, total = 0; - u64 sample = 0; -+ u64 outer_sample = 0; - int ret = 1; - -+ last_t2.tv64 = 0; - start = ktime_get(); /* start timestamp */ - - do { -@@ -231,7 +234,22 @@ static int get_sample(void *unused) - t1 = ktime_get(); /* we'll look for a discontinuity */ - t2 = ktime_get(); - -+ if (last_t2.tv64) { -+ /* Check the delta from outer loop (t2 to next t1) */ -+ diff = ktime_to_us(ktime_sub(t1, last_t2)); -+ /* This shouldn't happen */ -+ if (diff < 0) { -+ pr_err(BANNER "time running backwards\n"); -+ goto out; -+ } -+ if (diff > outer_sample) -+ outer_sample = diff; -+ } -+ last_t2 = t2; -+ - total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ -+ -+ /* This checks the inner loop (t1 to t2) */ - diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ - - /* This shouldn't happen */ -@@ -246,12 +264,13 @@ static int get_sample(void *unused) - } while (total <= data.sample_width); - - /* If we exceed the threshold value, we have found a hardware latency */ -- if (sample > data.threshold) { -+ if (sample > data.threshold || outer_sample > data.threshold) { - struct sample s; - - data.count++; - s.seqnum = data.count; - s.duration = sample; -+ s.outer_duration = outer_sample; - s.timestamp = CURRENT_TIME; - __buffer_add_sample(&s); - -@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct - } - } - -- len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", -- sample->timestamp.tv_sec, -- sample->timestamp.tv_nsec, -- sample->duration); -+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n", -+ sample->timestamp.tv_sec, -+ sample->timestamp.tv_nsec, -+ sample->duration, -+ sample->outer_duration); - - - /* handling partial reads is more trouble than it's worth */ diff --git a/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch b/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch deleted file mode 100644 index 3689dcb0f..000000000 --- a/debian/patches/features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch +++ /dev/null @@ -1,184 +0,0 @@ -From: Steven Rostedt -Date: Mon, 19 Aug 2013 17:33:27 -0400 -Subject: hwlat-detector: Use thread instead of stop machine -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -There's no reason to use stop machine to search for hardware latency. -Simply disabling interrupts while running the loop will do enough to -check if something comes in that wasn't disabled by interrupts being -off, which is exactly what stop machine does. - -Instead of using stop machine, just have the thread disable interrupts -while it checks for hardware latency. - -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/misc/hwlat_detector.c | 60 ++++++++++++++++++------------------------ - 1 file changed, 26 insertions(+), 34 deletions(-) - ---- a/drivers/misc/hwlat_detector.c -+++ b/drivers/misc/hwlat_detector.c -@@ -41,7 +41,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -107,7 +106,6 @@ struct data; /* Global state */ - /* Sampling functions */ - static int __buffer_add_sample(struct sample *sample); - static struct sample *buffer_get_sample(struct sample *sample); --static int get_sample(void *unused); - - /* Threading and state */ - static int kthread_fn(void *unused); -@@ -149,7 +147,7 @@ struct sample { - unsigned long lost; - }; - --/* keep the global state somewhere. Mostly used under stop_machine. */ -+/* keep the global state somewhere. */ - static struct data { - - struct mutex lock; /* protect changes */ -@@ -172,7 +170,7 @@ static struct data { - * @sample: The new latency sample value - * - * This receives a new latency sample and records it in a global ring buffer. -- * No additional locking is used in this case - suited for stop_machine use. -+ * No additional locking is used in this case. - */ - static int __buffer_add_sample(struct sample *sample) - { -@@ -229,18 +227,18 @@ static struct sample *buffer_get_sample( - #endif - /** - * get_sample - sample the CPU TSC and look for likely hardware latencies -- * @unused: This is not used but is a part of the stop_machine API - * - * Used to repeatedly capture the CPU TSC (or similar), looking for potential -- * hardware-induced latency. Called under stop_machine, with data.lock held. -+ * hardware-induced latency. Called with interrupts disabled and with -+ * data.lock held. - */ --static int get_sample(void *unused) -+static int get_sample(void) - { - time_type start, t1, t2, last_t2; - s64 diff, total = 0; - u64 sample = 0; - u64 outer_sample = 0; -- int ret = 1; -+ int ret = -1; - - init_time(last_t2, 0); - start = time_get(); /* start timestamp */ -@@ -279,10 +277,14 @@ static int get_sample(void *unused) - - } while (total <= data.sample_width); - -+ ret = 0; -+ - /* If we exceed the threshold value, we have found a hardware latency */ - if (sample > data.threshold || outer_sample > data.threshold) { - struct sample s; - -+ ret = 1; -+ - data.count++; - s.seqnum = data.count; - s.duration = sample; -@@ -295,7 +297,6 @@ static int get_sample(void *unused) - data.max_sample = sample; - } - -- ret = 0; - out: - return ret; - } -@@ -305,32 +306,30 @@ static int get_sample(void *unused) - * @unused: A required part of the kthread API. - * - * Used to periodically sample the CPU TSC via a call to get_sample. We -- * use stop_machine, whith does (intentionally) introduce latency since we -+ * disable interrupts, which does (intentionally) introduce latency since we - * need to ensure nothing else might be running (and thus pre-empting). - * Obviously this should never be used in production environments. - * -- * stop_machine will schedule us typically only on CPU0 which is fine for -- * almost every real-world hardware latency situation - but we might later -- * generalize this if we find there are any actualy systems with alternate -- * SMI delivery or other non CPU0 hardware latencies. -+ * Currently this runs on which ever CPU it was scheduled on, but most -+ * real-worald hardware latency situations occur across several CPUs, -+ * but we might later generalize this if we find there are any actualy -+ * systems with alternate SMI delivery or other hardware latencies. - */ - static int kthread_fn(void *unused) - { -- int err = 0; -- u64 interval = 0; -+ int ret; -+ u64 interval; - - while (!kthread_should_stop()) { - - mutex_lock(&data.lock); - -- err = stop_machine(get_sample, unused, 0); -- if (err) { -- /* Houston, we have a problem */ -- mutex_unlock(&data.lock); -- goto err_out; -- } -+ local_irq_disable(); -+ ret = get_sample(); -+ local_irq_enable(); - -- wake_up(&data.wq); /* wake up reader(s) */ -+ if (ret > 0) -+ wake_up(&data.wq); /* wake up reader(s) */ - - interval = data.sample_window - data.sample_width; - do_div(interval, USEC_PER_MSEC); /* modifies interval value */ -@@ -338,15 +337,10 @@ static int kthread_fn(void *unused) - mutex_unlock(&data.lock); - - if (msleep_interruptible(interval)) -- goto out; -+ break; - } -- goto out; --err_out: -- pr_err(BANNER "could not call stop_machine, disabling\n"); -- enabled = 0; --out: -- return err; - -+ return 0; - } - - /** -@@ -442,8 +436,7 @@ static int init_stats(void) - * This function provides a generic read implementation for the global state - * "data" structure debugfs filesystem entries. It would be nice to use - * simple_attr_read directly, but we need to make sure that the data.lock -- * spinlock is held during the actual read (even though we likely won't ever -- * actually race here as the updater runs under a stop_machine context). -+ * is held during the actual read. - */ - static ssize_t simple_data_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos, const u64 *entry) -@@ -478,8 +471,7 @@ static ssize_t simple_data_read(struct f - * This function provides a generic write implementation for the global state - * "data" structure debugfs filesystem entries. It would be nice to use - * simple_attr_write directly, but we need to make sure that the data.lock -- * spinlock is held during the actual write (even though we likely won't ever -- * actually race here as the updater runs under a stop_machine context). -+ * is held during the actual write. - */ - static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos, u64 *entry) diff --git a/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch b/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch deleted file mode 100644 index 7acf5e51b..000000000 --- a/debian/patches/features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch +++ /dev/null @@ -1,93 +0,0 @@ -From: Steven Rostedt -Date: Mon, 19 Aug 2013 17:33:26 -0400 -Subject: hwlat-detector: Use trace_clock_local if available -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -As ktime_get() calls into the timing code which does a read_seq(), it -may be affected by other CPUS that touch that lock. To remove this -dependency, use the trace_clock_local() which is already exported -for module use. If CONFIG_TRACING is enabled, use that as the clock, -otherwise use ktime_get(). - -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/misc/hwlat_detector.c | 34 +++++++++++++++++++++++++--------- - 1 file changed, 25 insertions(+), 9 deletions(-) - ---- a/drivers/misc/hwlat_detector.c -+++ b/drivers/misc/hwlat_detector.c -@@ -51,6 +51,7 @@ - #include - #include - #include -+#include - - #define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ - #define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ -@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample( - return sample; - } - -+#ifndef CONFIG_TRACING -+#define time_type ktime_t -+#define time_get() ktime_get() -+#define time_to_us(x) ktime_to_us(x) -+#define time_sub(a, b) ktime_sub(a, b) -+#define init_time(a, b) (a).tv64 = b -+#define time_u64(a) ((a).tv64) -+#else -+#define time_type u64 -+#define time_get() trace_clock_local() -+#define time_to_us(x) div_u64(x, 1000) -+#define time_sub(a, b) ((a) - (b)) -+#define init_time(a, b) (a = b) -+#define time_u64(a) a -+#endif - /** - * get_sample - sample the CPU TSC and look for likely hardware latencies - * @unused: This is not used but is a part of the stop_machine API -@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample( - */ - static int get_sample(void *unused) - { -- ktime_t start, t1, t2, last_t2; -+ time_type start, t1, t2, last_t2; - s64 diff, total = 0; - u64 sample = 0; - u64 outer_sample = 0; - int ret = 1; - -- last_t2.tv64 = 0; -- start = ktime_get(); /* start timestamp */ -+ init_time(last_t2, 0); -+ start = time_get(); /* start timestamp */ - - do { - -- t1 = ktime_get(); /* we'll look for a discontinuity */ -- t2 = ktime_get(); -+ t1 = time_get(); /* we'll look for a discontinuity */ -+ t2 = time_get(); - -- if (last_t2.tv64) { -+ if (time_u64(last_t2)) { - /* Check the delta from outer loop (t2 to next t1) */ -- diff = ktime_to_us(ktime_sub(t1, last_t2)); -+ diff = time_to_us(time_sub(t1, last_t2)); - /* This shouldn't happen */ - if (diff < 0) { - pr_err(BANNER "time running backwards\n"); -@@ -247,10 +263,10 @@ static int get_sample(void *unused) - } - last_t2 = t2; - -- total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ -+ total = time_to_us(time_sub(t2, start)); /* sample width */ - - /* This checks the inner loop (t1 to t2) */ -- diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ -+ diff = time_to_us(time_sub(t2, t1)); /* current diff */ - - /* This shouldn't happen */ - if (diff < 0) { diff --git a/debian/patches/features/all/rt/hwlatdetect.patch b/debian/patches/features/all/rt/hwlatdetect.patch deleted file mode 100644 index ac3fe546c..000000000 --- a/debian/patches/features/all/rt/hwlatdetect.patch +++ /dev/null @@ -1,1348 +0,0 @@ -Subject: hwlatdetect.patch -From: Carsten Emde -Date: Tue, 19 Jul 2011 13:53:12 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Jon Masters developed this wonderful SMI detector. For details please -consult Documentation/hwlat_detector.txt. It could be ported to Linux -3.0 RT without any major change. - -Signed-off-by: Carsten Emde - ---- - Documentation/hwlat_detector.txt | 64 ++ - drivers/misc/Kconfig | 29 - drivers/misc/Makefile | 1 - drivers/misc/hwlat_detector.c | 1212 +++++++++++++++++++++++++++++++++++++++ - 4 files changed, 1306 insertions(+) - ---- /dev/null -+++ b/Documentation/hwlat_detector.txt -@@ -0,0 +1,64 @@ -+Introduction: -+------------- -+ -+The module hwlat_detector is a special purpose kernel module that is used to -+detect large system latencies induced by the behavior of certain underlying -+hardware or firmware, independent of Linux itself. The code was developed -+originally to detect SMIs (System Management Interrupts) on x86 systems, -+however there is nothing x86 specific about this patchset. It was -+originally written for use by the "RT" patch since the Real Time -+kernel is highly latency sensitive. -+ -+SMIs are usually not serviced by the Linux kernel, which typically does not -+even know that they are occuring. SMIs are instead are set up by BIOS code -+and are serviced by BIOS code, usually for "critical" events such as -+management of thermal sensors and fans. Sometimes though, SMIs are used for -+other tasks and those tasks can spend an inordinate amount of time in the -+handler (sometimes measured in milliseconds). Obviously this is a problem if -+you are trying to keep event service latencies down in the microsecond range. -+ -+The hardware latency detector works by hogging all of the cpus for configurable -+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter -+for some period, then looking for gaps in the TSC data. Any gap indicates a -+time when the polling was interrupted and since the machine is stopped and -+interrupts turned off the only thing that could do that would be an SMI. -+ -+Note that the SMI detector should *NEVER* be used in a production environment. -+It is intended to be run manually to determine if the hardware platform has a -+problem with long system firmware service routines. -+ -+Usage: -+------ -+ -+Loading the module hwlat_detector passing the parameter "enabled=1" (or by -+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only -+step required to start the hwlat_detector. It is possible to redefine the -+threshold in microseconds (us) above which latency spikes will be taken -+into account (parameter "threshold="). -+ -+Example: -+ -+ # modprobe hwlat_detector enabled=1 threshold=100 -+ -+After the module is loaded, it creates a directory named "hwlat_detector" under -+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary -+to have debugfs mounted, which might be on /sys/debug on your system. -+ -+The /debug/hwlat_detector interface contains the following files: -+ -+count - number of latency spikes observed since last reset -+enable - a global enable/disable toggle (0/1), resets count -+max - maximum hardware latency actually observed (usecs) -+sample - a pipe from which to read current raw sample data -+ in the format -+ (can be opened O_NONBLOCK for a single sample) -+threshold - minimum latency value to be considered (usecs) -+width - time period to sample with CPUs held (usecs) -+ must be less than the total window size (enforced) -+window - total period of sampling, width being inside (usecs) -+ -+By default we will set width to 500,000 and window to 1,000,000, meaning that -+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we -+observe any latencies that exceed the threshold (initially 100 usecs), -+then we write to a global sample ring buffer of 8K samples, which is -+consumed by reading from the "sample" (pipe) debugfs file interface. ---- a/drivers/misc/Kconfig -+++ b/drivers/misc/Kconfig -@@ -121,6 +121,35 @@ config IBM_ASM - for information on the specific driver level and support statement - for your IBM server. - -+config HWLAT_DETECTOR -+ tristate "Testing module to detect hardware-induced latencies" -+ depends on DEBUG_FS -+ depends on RING_BUFFER -+ default m -+ ---help--- -+ A simple hardware latency detector. Use this module to detect -+ large latencies introduced by the behavior of the underlying -+ system firmware external to Linux. We do this using periodic -+ use of stop_machine to grab all available CPUs and measure -+ for unexplainable gaps in the CPU timestamp counter(s). By -+ default, the module is not enabled until the "enable" file -+ within the "hwlat_detector" debugfs directory is toggled. -+ -+ This module is often used to detect SMI (System Management -+ Interrupts) on x86 systems, though is not x86 specific. To -+ this end, we default to using a sample window of 1 second, -+ during which we will sample for 0.5 seconds. If an SMI or -+ similar event occurs during that time, it is recorded -+ into an 8K samples global ring buffer until retreived. -+ -+ WARNING: This software should never be enabled (it can be built -+ but should not be turned on after it is loaded) in a production -+ environment where high latencies are a concern since the -+ sampling mechanism actually introduces latencies for -+ regular tasks while the CPU(s) are being held. -+ -+ If unsure, say N -+ - config PHANTOM - tristate "Sensable PHANToM (PCI)" - depends on PCI ---- a/drivers/misc/Makefile -+++ b/drivers/misc/Makefile -@@ -39,6 +39,7 @@ obj-$(CONFIG_C2PORT) += c2port/ - obj-$(CONFIG_HMC6352) += hmc6352.o - obj-y += eeprom/ - obj-y += cb710/ -+obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o - obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o - obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o - obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o ---- /dev/null -+++ b/drivers/misc/hwlat_detector.c -@@ -0,0 +1,1212 @@ -+/* -+ * hwlat_detector.c - A simple Hardware Latency detector. -+ * -+ * Use this module to detect large system latencies induced by the behavior of -+ * certain underlying system hardware or firmware, independent of Linux itself. -+ * The code was developed originally to detect the presence of SMIs on Intel -+ * and AMD systems, although there is no dependency upon x86 herein. -+ * -+ * The classical example usage of this module is in detecting the presence of -+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a -+ * somewhat special form of hardware interrupt spawned from earlier CPU debug -+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge -+ * LPC (or other device) to generate a special interrupt under certain -+ * circumstances, for example, upon expiration of a special SMI timer device, -+ * due to certain external thermal readings, on certain I/O address accesses, -+ * and other situations. An SMI hits a special CPU pin, triggers a special -+ * SMI mode (complete with special memory map), and the OS is unaware. -+ * -+ * Although certain hardware-inducing latencies are necessary (for example, -+ * a modern system often requires an SMI handler for correct thermal control -+ * and remote management) they can wreak havoc upon any OS-level performance -+ * guarantees toward low-latency, especially when the OS is not even made -+ * aware of the presence of these interrupts. For this reason, we need a -+ * somewhat brute force mechanism to detect these interrupts. In this case, -+ * we do it by hogging all of the CPU(s) for configurable timer intervals, -+ * sampling the built-in CPU timer, looking for discontiguous readings. -+ * -+ * WARNING: This implementation necessarily introduces latencies. Therefore, -+ * you should NEVER use this module in a production environment -+ * requiring any kind of low-latency performance guarantee(s). -+ * -+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. -+ * -+ * Includes useful feedback from Clark Williams -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ -+#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ -+#define U64STR_SIZE 22 /* 20 digits max */ -+ -+#define VERSION "1.0.0" -+#define BANNER "hwlat_detector: " -+#define DRVNAME "hwlat_detector" -+#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ -+#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ -+#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ -+ -+/* Module metadata */ -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Jon Masters "); -+MODULE_DESCRIPTION("A simple hardware latency detector"); -+MODULE_VERSION(VERSION); -+ -+/* Module parameters */ -+ -+static int debug; -+static int enabled; -+static int threshold; -+ -+module_param(debug, int, 0); /* enable debug */ -+module_param(enabled, int, 0); /* enable detector */ -+module_param(threshold, int, 0); /* latency threshold */ -+ -+/* Buffering and sampling */ -+ -+static struct ring_buffer *ring_buffer; /* sample buffer */ -+static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ -+static unsigned long buf_size = BUF_SIZE_DEFAULT; -+static struct task_struct *kthread; /* sampling thread */ -+ -+/* DebugFS filesystem entries */ -+ -+static struct dentry *debug_dir; /* debugfs directory */ -+static struct dentry *debug_max; /* maximum TSC delta */ -+static struct dentry *debug_count; /* total detect count */ -+static struct dentry *debug_sample_width; /* sample width us */ -+static struct dentry *debug_sample_window; /* sample window us */ -+static struct dentry *debug_sample; /* raw samples us */ -+static struct dentry *debug_threshold; /* threshold us */ -+static struct dentry *debug_enable; /* enable/disable */ -+ -+/* Individual samples and global state */ -+ -+struct sample; /* latency sample */ -+struct data; /* Global state */ -+ -+/* Sampling functions */ -+static int __buffer_add_sample(struct sample *sample); -+static struct sample *buffer_get_sample(struct sample *sample); -+static int get_sample(void *unused); -+ -+/* Threading and state */ -+static int kthread_fn(void *unused); -+static int start_kthread(void); -+static int stop_kthread(void); -+static void __reset_stats(void); -+static int init_stats(void); -+ -+/* Debugfs interface */ -+static ssize_t simple_data_read(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos, const u64 *entry); -+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, -+ size_t cnt, loff_t *ppos, u64 *entry); -+static int debug_sample_fopen(struct inode *inode, struct file *filp); -+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos); -+static int debug_sample_release(struct inode *inode, struct file *filp); -+static int debug_enable_fopen(struct inode *inode, struct file *filp); -+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos); -+static ssize_t debug_enable_fwrite(struct file *file, -+ const char __user *user_buffer, -+ size_t user_size, loff_t *offset); -+ -+/* Initialization functions */ -+static int init_debugfs(void); -+static void free_debugfs(void); -+static int detector_init(void); -+static void detector_exit(void); -+ -+/* Individual latency samples are stored here when detected and packed into -+ * the ring_buffer circular buffer, where they are overwritten when -+ * more than buf_size/sizeof(sample) samples are received. */ -+struct sample { -+ u64 seqnum; /* unique sequence */ -+ u64 duration; /* ktime delta */ -+ struct timespec timestamp; /* wall time */ -+ unsigned long lost; -+}; -+ -+/* keep the global state somewhere. Mostly used under stop_machine. */ -+static struct data { -+ -+ struct mutex lock; /* protect changes */ -+ -+ u64 count; /* total since reset */ -+ u64 max_sample; /* max hardware latency */ -+ u64 threshold; /* sample threshold level */ -+ -+ u64 sample_window; /* total sampling window (on+off) */ -+ u64 sample_width; /* active sampling portion of window */ -+ -+ atomic_t sample_open; /* whether the sample file is open */ -+ -+ wait_queue_head_t wq; /* waitqeue for new sample values */ -+ -+} data; -+ -+/** -+ * __buffer_add_sample - add a new latency sample recording to the ring buffer -+ * @sample: The new latency sample value -+ * -+ * This receives a new latency sample and records it in a global ring buffer. -+ * No additional locking is used in this case - suited for stop_machine use. -+ */ -+static int __buffer_add_sample(struct sample *sample) -+{ -+ return ring_buffer_write(ring_buffer, -+ sizeof(struct sample), sample); -+} -+ -+/** -+ * buffer_get_sample - remove a hardware latency sample from the ring buffer -+ * @sample: Pre-allocated storage for the sample -+ * -+ * This retrieves a hardware latency sample from the global circular buffer -+ */ -+static struct sample *buffer_get_sample(struct sample *sample) -+{ -+ struct ring_buffer_event *e = NULL; -+ struct sample *s = NULL; -+ unsigned int cpu = 0; -+ -+ if (!sample) -+ return NULL; -+ -+ mutex_lock(&ring_buffer_mutex); -+ for_each_online_cpu(cpu) { -+ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); -+ if (e) -+ break; -+ } -+ -+ if (e) { -+ s = ring_buffer_event_data(e); -+ memcpy(sample, s, sizeof(struct sample)); -+ } else -+ sample = NULL; -+ mutex_unlock(&ring_buffer_mutex); -+ -+ return sample; -+} -+ -+/** -+ * get_sample - sample the CPU TSC and look for likely hardware latencies -+ * @unused: This is not used but is a part of the stop_machine API -+ * -+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential -+ * hardware-induced latency. Called under stop_machine, with data.lock held. -+ */ -+static int get_sample(void *unused) -+{ -+ ktime_t start, t1, t2; -+ s64 diff, total = 0; -+ u64 sample = 0; -+ int ret = 1; -+ -+ start = ktime_get(); /* start timestamp */ -+ -+ do { -+ -+ t1 = ktime_get(); /* we'll look for a discontinuity */ -+ t2 = ktime_get(); -+ -+ total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ -+ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ -+ -+ /* This shouldn't happen */ -+ if (diff < 0) { -+ pr_err(BANNER "time running backwards\n"); -+ goto out; -+ } -+ -+ if (diff > sample) -+ sample = diff; /* only want highest value */ -+ -+ } while (total <= data.sample_width); -+ -+ /* If we exceed the threshold value, we have found a hardware latency */ -+ if (sample > data.threshold) { -+ struct sample s; -+ -+ data.count++; -+ s.seqnum = data.count; -+ s.duration = sample; -+ s.timestamp = CURRENT_TIME; -+ __buffer_add_sample(&s); -+ -+ /* Keep a running maximum ever recorded hardware latency */ -+ if (sample > data.max_sample) -+ data.max_sample = sample; -+ } -+ -+ ret = 0; -+out: -+ return ret; -+} -+ -+/* -+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread -+ * @unused: A required part of the kthread API. -+ * -+ * Used to periodically sample the CPU TSC via a call to get_sample. We -+ * use stop_machine, whith does (intentionally) introduce latency since we -+ * need to ensure nothing else might be running (and thus pre-empting). -+ * Obviously this should never be used in production environments. -+ * -+ * stop_machine will schedule us typically only on CPU0 which is fine for -+ * almost every real-world hardware latency situation - but we might later -+ * generalize this if we find there are any actualy systems with alternate -+ * SMI delivery or other non CPU0 hardware latencies. -+ */ -+static int kthread_fn(void *unused) -+{ -+ int err = 0; -+ u64 interval = 0; -+ -+ while (!kthread_should_stop()) { -+ -+ mutex_lock(&data.lock); -+ -+ err = stop_machine(get_sample, unused, 0); -+ if (err) { -+ /* Houston, we have a problem */ -+ mutex_unlock(&data.lock); -+ goto err_out; -+ } -+ -+ wake_up(&data.wq); /* wake up reader(s) */ -+ -+ interval = data.sample_window - data.sample_width; -+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */ -+ -+ mutex_unlock(&data.lock); -+ -+ if (msleep_interruptible(interval)) -+ goto out; -+ } -+ goto out; -+err_out: -+ pr_err(BANNER "could not call stop_machine, disabling\n"); -+ enabled = 0; -+out: -+ return err; -+ -+} -+ -+/** -+ * start_kthread - Kick off the hardware latency sampling/detector kthread -+ * -+ * This starts a kernel thread that will sit and sample the CPU timestamp -+ * counter (TSC or similar) and look for potential hardware latencies. -+ */ -+static int start_kthread(void) -+{ -+ kthread = kthread_run(kthread_fn, NULL, -+ DRVNAME); -+ if (IS_ERR(kthread)) { -+ pr_err(BANNER "could not start sampling thread\n"); -+ enabled = 0; -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+/** -+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop -+ * -+ * This kicks the running hardware latency sampling/detector kernel thread and -+ * tells it to stop sampling now. Use this on unload and at system shutdown. -+ */ -+static int stop_kthread(void) -+{ -+ int ret; -+ -+ ret = kthread_stop(kthread); -+ -+ return ret; -+} -+ -+/** -+ * __reset_stats - Reset statistics for the hardware latency detector -+ * -+ * We use data to store various statistics and global state. We call this -+ * function in order to reset those when "enable" is toggled on or off, and -+ * also at initialization. Should be called with data.lock held. -+ */ -+static void __reset_stats(void) -+{ -+ data.count = 0; -+ data.max_sample = 0; -+ ring_buffer_reset(ring_buffer); /* flush out old sample entries */ -+} -+ -+/** -+ * init_stats - Setup global state statistics for the hardware latency detector -+ * -+ * We use data to store various statistics and global state. We also use -+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware -+ * induced system latencies. This function initializes these structures and -+ * allocates the global ring buffer also. -+ */ -+static int init_stats(void) -+{ -+ int ret = -ENOMEM; -+ -+ mutex_init(&data.lock); -+ init_waitqueue_head(&data.wq); -+ atomic_set(&data.sample_open, 0); -+ -+ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); -+ -+ if (WARN(!ring_buffer, KERN_ERR BANNER -+ "failed to allocate ring buffer!\n")) -+ goto out; -+ -+ __reset_stats(); -+ data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ -+ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ -+ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ -+ -+ ret = 0; -+ -+out: -+ return ret; -+ -+} -+ -+/* -+ * simple_data_read - Wrapper read function for global state debugfs entries -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * @entry: The entry to read from -+ * -+ * This function provides a generic read implementation for the global state -+ * "data" structure debugfs filesystem entries. It would be nice to use -+ * simple_attr_read directly, but we need to make sure that the data.lock -+ * spinlock is held during the actual read (even though we likely won't ever -+ * actually race here as the updater runs under a stop_machine context). -+ */ -+static ssize_t simple_data_read(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos, const u64 *entry) -+{ -+ char buf[U64STR_SIZE]; -+ u64 val = 0; -+ int len = 0; -+ -+ memset(buf, 0, sizeof(buf)); -+ -+ if (!entry) -+ return -EFAULT; -+ -+ mutex_lock(&data.lock); -+ val = *entry; -+ mutex_unlock(&data.lock); -+ -+ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); -+ -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); -+ -+} -+ -+/* -+ * simple_data_write - Wrapper write function for global state debugfs entries -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to write value from -+ * @cnt: The maximum number of bytes to write -+ * @ppos: The current "file" position -+ * @entry: The entry to write to -+ * -+ * This function provides a generic write implementation for the global state -+ * "data" structure debugfs filesystem entries. It would be nice to use -+ * simple_attr_write directly, but we need to make sure that the data.lock -+ * spinlock is held during the actual write (even though we likely won't ever -+ * actually race here as the updater runs under a stop_machine context). -+ */ -+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, -+ size_t cnt, loff_t *ppos, u64 *entry) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ *entry = val; -+ mutex_unlock(&data.lock); -+ -+ return csize; -+} -+ -+/** -+ * debug_count_fopen - Open function for "count" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "count" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_count_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_count_fread - Read function for "count" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "count" debugfs -+ * interface to the hardware latency detector. Can be used to read the -+ * number of latency readings exceeding the configured threshold since -+ * the detector was last reset (e.g. by writing a zero into "count"). -+ */ -+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.count); -+} -+ -+/** -+ * debug_count_fwrite - Write function for "count" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "count" debugfs -+ * interface to the hardware latency detector. Can be used to write a -+ * desired value, especially to zero the total count. -+ */ -+static ssize_t debug_count_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ return simple_data_write(filp, ubuf, cnt, ppos, &data.count); -+} -+ -+/** -+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "enable" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_enable_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_enable_fread - Read function for "enable" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "enable" debugfs -+ * interface to the hardware latency detector. Can be used to determine -+ * whether the detector is currently enabled ("0\n" or "1\n" returned). -+ */ -+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ char buf[4]; -+ -+ if ((cnt < sizeof(buf)) || (*ppos)) -+ return 0; -+ -+ buf[0] = enabled ? '1' : '0'; -+ buf[1] = '\n'; -+ buf[2] = '\0'; -+ if (copy_to_user(ubuf, buf, strlen(buf))) -+ return -EFAULT; -+ return *ppos = strlen(buf); -+} -+ -+/** -+ * debug_enable_fwrite - Write function for "enable" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "enable" debugfs -+ * interface to the hardware latency detector. Can be used to enable or -+ * disable the detector, which will have the side-effect of possibly -+ * also resetting the global stats and kicking off the measuring -+ * kthread (on an enable) or the converse (upon a disable). -+ */ -+static ssize_t debug_enable_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[4]; -+ int csize = min(cnt, sizeof(buf)); -+ long val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[sizeof(buf)-1] = '\0'; /* just in case */ -+ err = kstrtoul(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ if (val) { -+ if (enabled) -+ goto unlock; -+ enabled = 1; -+ __reset_stats(); -+ if (start_kthread()) -+ return -EFAULT; -+ } else { -+ if (!enabled) -+ goto unlock; -+ enabled = 0; -+ err = stop_kthread(); -+ if (err) { -+ pr_err(BANNER "cannot stop kthread\n"); -+ return -EFAULT; -+ } -+ wake_up(&data.wq); /* reader(s) should return */ -+ } -+unlock: -+ return csize; -+} -+ -+/** -+ * debug_max_fopen - Open function for "max" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "max" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_max_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_max_fread - Read function for "max" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "max" debugfs -+ * interface to the hardware latency detector. Can be used to determine -+ * the maximum latency value observed since it was last reset. -+ */ -+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); -+} -+ -+/** -+ * debug_max_fwrite - Write function for "max" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "max" debugfs -+ * interface to the hardware latency detector. Can be used to reset the -+ * maximum or set it to some other desired value - if, then, subsequent -+ * measurements exceed this value, the maximum will be updated. -+ */ -+static ssize_t debug_max_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); -+} -+ -+ -+/** -+ * debug_sample_fopen - An open function for "sample" debugfs interface -+ * @inode: The in-kernel inode representation of this debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function handles opening the "sample" file within the hardware -+ * latency detector debugfs directory interface. This file is used to read -+ * raw samples from the global ring_buffer and allows the user to see a -+ * running latency history. Can be opened blocking or non-blocking, -+ * affecting whether it behaves as a buffer read pipe, or does not. -+ * Implements simple locking to prevent multiple simultaneous use. -+ */ -+static int debug_sample_fopen(struct inode *inode, struct file *filp) -+{ -+ if (!atomic_add_unless(&data.sample_open, 1, 1)) -+ return -EBUSY; -+ else -+ return 0; -+} -+ -+/** -+ * debug_sample_fread - A read function for "sample" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that will contain the samples read -+ * @cnt: The maximum bytes to read from the debugfs "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function handles reading from the "sample" file within the hardware -+ * latency detector debugfs directory interface. This file is used to read -+ * raw samples from the global ring_buffer and allows the user to see a -+ * running latency history. By default this will block pending a new -+ * value written into the sample buffer, unless there are already a -+ * number of value(s) waiting in the buffer, or the sample file was -+ * previously opened in a non-blocking mode of operation. -+ */ -+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ int len = 0; -+ char buf[64]; -+ struct sample *sample = NULL; -+ -+ if (!enabled) -+ return 0; -+ -+ sample = kzalloc(sizeof(struct sample), GFP_KERNEL); -+ if (!sample) -+ return -ENOMEM; -+ -+ while (!buffer_get_sample(sample)) { -+ -+ DEFINE_WAIT(wait); -+ -+ if (filp->f_flags & O_NONBLOCK) { -+ len = -EAGAIN; -+ goto out; -+ } -+ -+ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); -+ schedule(); -+ finish_wait(&data.wq, &wait); -+ -+ if (signal_pending(current)) { -+ len = -EINTR; -+ goto out; -+ } -+ -+ if (!enabled) { /* enable was toggled */ -+ len = 0; -+ goto out; -+ } -+ } -+ -+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", -+ sample->timestamp.tv_sec, -+ sample->timestamp.tv_nsec, -+ sample->duration); -+ -+ -+ /* handling partial reads is more trouble than it's worth */ -+ if (len > cnt) -+ goto out; -+ -+ if (copy_to_user(ubuf, buf, len)) -+ len = -EFAULT; -+ -+out: -+ kfree(sample); -+ return len; -+} -+ -+/** -+ * debug_sample_release - Release function for "sample" debugfs interface -+ * @inode: The in-kernel inode represenation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function completes the close of the debugfs interface "sample" file. -+ * Frees the sample_open "lock" so that other users may open the interface. -+ */ -+static int debug_sample_release(struct inode *inode, struct file *filp) -+{ -+ atomic_dec(&data.sample_open); -+ -+ return 0; -+} -+ -+/** -+ * debug_threshold_fopen - Open function for "threshold" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_threshold_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_threshold_fread - Read function for "threshold" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. It can be used to determine -+ * the current threshold level at which a latency will be recorded in the -+ * global ring buffer, typically on the order of 10us. -+ */ -+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); -+} -+ -+/** -+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. It can be used to configure -+ * the threshold level at which any subsequently detected latencies will -+ * be recorded into the global ring buffer. -+ */ -+static ssize_t debug_threshold_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ int ret; -+ -+ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); -+ -+ if (enabled) -+ wake_up_process(kthread); -+ -+ return ret; -+} -+ -+/** -+ * debug_width_fopen - Open function for "width" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "width" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_width_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_width_fread - Read function for "width" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "width" debugfs -+ * interface to the hardware latency detector. It can be used to determine -+ * for how many us of the total window us we will actively sample for any -+ * hardware-induced latecy periods. Obviously, it is not possible to -+ * sample constantly and have the system respond to a sample reader, or, -+ * worse, without having the system appear to have gone out to lunch. -+ */ -+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); -+} -+ -+/** -+ * debug_width_fwrite - Write function for "width" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "width" debugfs -+ * interface to the hardware latency detector. It can be used to configure -+ * for how many us of the total window us we will actively sample for any -+ * hardware-induced latency periods. Obviously, it is not possible to -+ * sample constantly and have the system respond to a sample reader, or, -+ * worse, without having the system appear to have gone out to lunch. It -+ * is enforced that width is less that the total window size. -+ */ -+static ssize_t debug_width_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ if (val < data.sample_window) -+ data.sample_width = val; -+ else { -+ mutex_unlock(&data.lock); -+ return -EINVAL; -+ } -+ mutex_unlock(&data.lock); -+ -+ if (enabled) -+ wake_up_process(kthread); -+ -+ return csize; -+} -+ -+/** -+ * debug_window_fopen - Open function for "window" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "window" debugfs -+ * interface to the hardware latency detector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. -+ */ -+static int debug_window_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_window_fread - Read function for "window" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "window" debugfs -+ * interface to the hardware latency detector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. Can be used to read the total window size. -+ */ -+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); -+} -+ -+/** -+ * debug_window_fwrite - Write function for "window" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "window" debufds -+ * interface to the hardware latency detetector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. Can be used to write a new total window size. It -+ * is enfoced that any value written must be greater than the sample width -+ * size, or an error results. -+ */ -+static ssize_t debug_window_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ if (data.sample_width < val) -+ data.sample_window = val; -+ else { -+ mutex_unlock(&data.lock); -+ return -EINVAL; -+ } -+ mutex_unlock(&data.lock); -+ -+ return csize; -+} -+ -+/* -+ * Function pointers for the "count" debugfs file operations -+ */ -+static const struct file_operations count_fops = { -+ .open = debug_count_fopen, -+ .read = debug_count_fread, -+ .write = debug_count_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "enable" debugfs file operations -+ */ -+static const struct file_operations enable_fops = { -+ .open = debug_enable_fopen, -+ .read = debug_enable_fread, -+ .write = debug_enable_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "max" debugfs file operations -+ */ -+static const struct file_operations max_fops = { -+ .open = debug_max_fopen, -+ .read = debug_max_fread, -+ .write = debug_max_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "sample" debugfs file operations -+ */ -+static const struct file_operations sample_fops = { -+ .open = debug_sample_fopen, -+ .read = debug_sample_fread, -+ .release = debug_sample_release, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "threshold" debugfs file operations -+ */ -+static const struct file_operations threshold_fops = { -+ .open = debug_threshold_fopen, -+ .read = debug_threshold_fread, -+ .write = debug_threshold_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "width" debugfs file operations -+ */ -+static const struct file_operations width_fops = { -+ .open = debug_width_fopen, -+ .read = debug_width_fread, -+ .write = debug_width_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "window" debugfs file operations -+ */ -+static const struct file_operations window_fops = { -+ .open = debug_window_fopen, -+ .read = debug_window_fread, -+ .write = debug_window_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/** -+ * init_debugfs - A function to initialize the debugfs interface files -+ * -+ * This function creates entries in debugfs for "hwlat_detector", including -+ * files to read values from the detector, current samples, and the -+ * maximum sample that has been captured since the hardware latency -+ * dectector was started. -+ */ -+static int init_debugfs(void) -+{ -+ int ret = -ENOMEM; -+ -+ debug_dir = debugfs_create_dir(DRVNAME, NULL); -+ if (!debug_dir) -+ goto err_debug_dir; -+ -+ debug_sample = debugfs_create_file("sample", 0444, -+ debug_dir, NULL, -+ &sample_fops); -+ if (!debug_sample) -+ goto err_sample; -+ -+ debug_count = debugfs_create_file("count", 0444, -+ debug_dir, NULL, -+ &count_fops); -+ if (!debug_count) -+ goto err_count; -+ -+ debug_max = debugfs_create_file("max", 0444, -+ debug_dir, NULL, -+ &max_fops); -+ if (!debug_max) -+ goto err_max; -+ -+ debug_sample_window = debugfs_create_file("window", 0644, -+ debug_dir, NULL, -+ &window_fops); -+ if (!debug_sample_window) -+ goto err_window; -+ -+ debug_sample_width = debugfs_create_file("width", 0644, -+ debug_dir, NULL, -+ &width_fops); -+ if (!debug_sample_width) -+ goto err_width; -+ -+ debug_threshold = debugfs_create_file("threshold", 0644, -+ debug_dir, NULL, -+ &threshold_fops); -+ if (!debug_threshold) -+ goto err_threshold; -+ -+ debug_enable = debugfs_create_file("enable", 0644, -+ debug_dir, &enabled, -+ &enable_fops); -+ if (!debug_enable) -+ goto err_enable; -+ -+ else { -+ ret = 0; -+ goto out; -+ } -+ -+err_enable: -+ debugfs_remove(debug_threshold); -+err_threshold: -+ debugfs_remove(debug_sample_width); -+err_width: -+ debugfs_remove(debug_sample_window); -+err_window: -+ debugfs_remove(debug_max); -+err_max: -+ debugfs_remove(debug_count); -+err_count: -+ debugfs_remove(debug_sample); -+err_sample: -+ debugfs_remove(debug_dir); -+err_debug_dir: -+out: -+ return ret; -+} -+ -+/** -+ * free_debugfs - A function to cleanup the debugfs file interface -+ */ -+static void free_debugfs(void) -+{ -+ /* could also use a debugfs_remove_recursive */ -+ debugfs_remove(debug_enable); -+ debugfs_remove(debug_threshold); -+ debugfs_remove(debug_sample_width); -+ debugfs_remove(debug_sample_window); -+ debugfs_remove(debug_max); -+ debugfs_remove(debug_count); -+ debugfs_remove(debug_sample); -+ debugfs_remove(debug_dir); -+} -+ -+/** -+ * detector_init - Standard module initialization code -+ */ -+static int detector_init(void) -+{ -+ int ret = -ENOMEM; -+ -+ pr_info(BANNER "version %s\n", VERSION); -+ -+ ret = init_stats(); -+ if (ret) -+ goto out; -+ -+ ret = init_debugfs(); -+ if (ret) -+ goto err_stats; -+ -+ if (enabled) -+ ret = start_kthread(); -+ -+ goto out; -+ -+err_stats: -+ ring_buffer_free(ring_buffer); -+out: -+ return ret; -+ -+} -+ -+/** -+ * detector_exit - Standard module cleanup code -+ */ -+static void detector_exit(void) -+{ -+ int err; -+ -+ if (enabled) { -+ enabled = 0; -+ err = stop_kthread(); -+ if (err) -+ pr_err(BANNER "cannot stop kthread\n"); -+ } -+ -+ free_debugfs(); -+ ring_buffer_free(ring_buffer); /* free up the ring buffer */ -+ -+} -+ -+module_init(detector_init); -+module_exit(detector_exit); diff --git a/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch b/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch deleted file mode 100644 index 83a0348bc..000000000 --- a/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch +++ /dev/null @@ -1,34 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 21 Mar 2013 11:35:49 +0100 -Subject: i2c/omap: drop the lock hard irq context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The lock is taken while reading two registers. On RT the first lock is -taken in hard irq where it might sleep and in the threaded irq. -The threaded irq runs in oneshot mode so the hard irq does not run until -the thread the completes so there is no reason to grab the lock. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/i2c/busses/i2c-omap.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/drivers/i2c/busses/i2c-omap.c -+++ b/drivers/i2c/busses/i2c-omap.c -@@ -995,15 +995,12 @@ omap_i2c_isr(int irq, void *dev_id) - u16 mask; - u16 stat; - -- spin_lock(&omap->lock); -- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); - stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); -+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); - - if (stat & mask) - ret = IRQ_WAKE_THREAD; - -- spin_unlock(&omap->lock); -- - return ret; - } - diff --git a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch deleted file mode 100644 index cdaef85b9..000000000 --- a/debian/patches/features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Clark Williams -Date: Tue, 26 May 2015 10:43:43 -0500 -Subject: i915: bogus warning from i915 when running on PREEMPT_RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The i915 driver has a 'WARN_ON(!in_interrupt())' in the display -handler, which whines constanly on the RT kernel (since the interrupt -is actually handled in a threaded handler and not actual interrupt -context). - -Change the WARN_ON to WARN_ON_NORT - -Tested-by: Joakim Hernberg -Signed-off-by: Clark Williams -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/gpu/drm/i915/intel_display.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -11376,7 +11376,7 @@ void intel_check_page_flip(struct drm_de - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; - -- WARN_ON(!in_interrupt()); -+ WARN_ON_NONRT(!in_interrupt()); - - if (crtc == NULL) - return; diff --git a/debian/patches/features/all/rt/i915_compile_fix.patch b/debian/patches/features/all/rt/i915_compile_fix.patch deleted file mode 100644 index 2cd234aaa..000000000 --- a/debian/patches/features/all/rt/i915_compile_fix.patch +++ /dev/null @@ -1,24 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: gpu/i915: don't open code these things -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions") -the owner check is still there. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/gpu/drm/i915/i915_gem_shrinker.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/gpu/drm/i915/i915_gem_shrinker.c -+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c -@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mu - if (!mutex_is_locked(mutex)) - return false; - --#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) -+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) - return mutex->owner == task; - #else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch deleted file mode 100644 index bac0c1cef..000000000 --- a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch +++ /dev/null @@ -1,170 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:30:16 -0500 -Subject: ide: Do not disable interrupts for PREEMPT-RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use the local_irq_*_nort variants. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - drivers/ide/alim15x3.c | 4 ++-- - drivers/ide/hpt366.c | 4 ++-- - drivers/ide/ide-io-std.c | 8 ++++---- - drivers/ide/ide-io.c | 2 +- - drivers/ide/ide-iops.c | 4 ++-- - drivers/ide/ide-probe.c | 4 ++-- - drivers/ide/ide-taskfile.c | 6 +++--- - 7 files changed, 16 insertions(+), 16 deletions(-) - ---- a/drivers/ide/alim15x3.c -+++ b/drivers/ide/alim15x3.c -@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p - - isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - if (m5229_revision < 0xC2) { - /* -@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct p - } - pci_dev_put(north); - pci_dev_put(isa_dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return 0; - } - ---- a/drivers/ide/hpt366.c -+++ b/drivers/ide/hpt366.c -@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *h - - dma_old = inb(base + 2); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - dma_new = dma_old; - pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); -@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *h - if (dma_new != dma_old) - outb(dma_new, base + 2); - -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", - hwif->name, base, base + 7); ---- a/drivers/ide/ide-io-std.c -+++ b/drivers/ide/ide-io-std.c -@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, - unsigned long uninitialized_var(flags); - - if ((io_32bit & 2) && !mmio) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - ata_vlb_sync(io_ports->nsect_addr); - } - -@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, - insl(data_addr, buf, words); - - if ((io_32bit & 2) && !mmio) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - if (((len + 1) & 3) < 2) - return; -@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, - unsigned long uninitialized_var(flags); - - if ((io_32bit & 2) && !mmio) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - ata_vlb_sync(io_ports->nsect_addr); - } - -@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, - outsl(data_addr, buf, words); - - if ((io_32bit & 2) && !mmio) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - if (((len + 1) & 3) < 2) - return; ---- a/drivers/ide/ide-io.c -+++ b/drivers/ide/ide-io.c -@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat - /* disable_irq_nosync ?? */ - disable_irq(hwif->irq); - /* local CPU only, as if we were handling an interrupt */ -- local_irq_disable(); -+ local_irq_disable_nort(); - if (hwif->polling) { - startstop = handler(drive); - } else if (drive_is_ready(drive)) { ---- a/drivers/ide/ide-iops.c -+++ b/drivers/ide/ide-iops.c -@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, - if ((stat & ATA_BUSY) == 0) - break; - -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - *rstat = stat; - return -EBUSY; - } - } -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - /* - * Allow status to settle, then read it again. ---- a/drivers/ide/ide-probe.c -+++ b/drivers/ide/ide-probe.c -@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri - int bswap = 1; - - /* local CPU only; some systems need this */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - /* read 512 bytes of id info */ - hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - drive->dev_flags |= IDE_DFLAG_ID_READ; - #ifdef DEBUG ---- a/drivers/ide/ide-taskfile.c -+++ b/drivers/ide/ide-taskfile.c -@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, s - - page_is_high = PageHighMem(page); - if (page_is_high) -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - buf = kmap_atomic(page) + offset; - -@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, s - kunmap_atomic(buf); - - if (page_is_high) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - len -= nr_bytes; - } -@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr - } - - if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) -- local_irq_disable(); -+ local_irq_disable_nort(); - - ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); - diff --git a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch deleted file mode 100644 index 45124b943..000000000 --- a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch +++ /dev/null @@ -1,124 +0,0 @@ -From: Thomas Gleixner -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: idr: Use local lock instead of preempt enable/disable -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -We need to protect the per cpu variable and prevent migration. - -Signed-off-by: Thomas Gleixner ---- - include/linux/idr.h | 4 ++++ - lib/idr.c | 43 +++++++++++++++++++++++++++++++++++++------ - 2 files changed, 41 insertions(+), 6 deletions(-) - ---- a/include/linux/idr.h -+++ b/include/linux/idr.h -@@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp); - * Each idr_preload() should be matched with an invocation of this - * function. See idr_preload() for details. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+void idr_preload_end(void); -+#else - static inline void idr_preload_end(void) - { - preempt_enable(); - } -+#endif - - /** - * idr_find - return pointer for given id ---- a/lib/idr.c -+++ b/lib/idr.c -@@ -30,6 +30,7 @@ - #include - #include - #include -+#include - - #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) - #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) -@@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer * - static DEFINE_PER_CPU(int, idr_preload_cnt); - static DEFINE_SPINLOCK(simple_ida_lock); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static DEFINE_LOCAL_IRQ_LOCK(idr_lock); -+ -+static inline void idr_preload_lock(void) -+{ -+ local_lock(idr_lock); -+} -+ -+static inline void idr_preload_unlock(void) -+{ -+ local_unlock(idr_lock); -+} -+ -+void idr_preload_end(void) -+{ -+ idr_preload_unlock(); -+} -+EXPORT_SYMBOL(idr_preload_end); -+#else -+static inline void idr_preload_lock(void) -+{ -+ preempt_disable(); -+} -+ -+static inline void idr_preload_unlock(void) -+{ -+ preempt_enable(); -+} -+#endif -+ -+ - /* the maximum ID which can be allocated given idr->layers */ - static int idr_max(int layers) - { -@@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc - * context. See idr_preload() for details. - */ - if (!in_interrupt()) { -- preempt_disable(); -+ idr_preload_lock(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; - } -- preempt_enable(); -+ idr_preload_unlock(); - if (new) - return new; - } -@@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *id - idr_mark_full(pa, id); - } - -- - /** - * idr_preload - preload for idr_alloc() - * @gfp_mask: allocation mask to use for preloading -@@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask) - WARN_ON_ONCE(in_interrupt()); - might_sleep_if(gfpflags_allow_blocking(gfp_mask)); - -- preempt_disable(); -+ idr_preload_lock(); - - /* - * idr_alloc() is likely to succeed w/o full idr_layer buffer and -@@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask) - while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { - struct idr_layer *new; - -- preempt_enable(); -+ idr_preload_unlock(); - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); -- preempt_disable(); -+ idr_preload_lock(); - if (!new) - break; - diff --git a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch deleted file mode 100644 index 70eeab4b4..000000000 --- a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch +++ /dev/null @@ -1,41 +0,0 @@ -From: Sven-Thorsten Dietrich -Date: Fri, 3 Jul 2009 08:30:35 -0500 -Subject: infiniband: Mellanox IB driver patch use _nort() primitives -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT -Kernel. - -Michael S. Tsirkin sayeth: -"Basically, if you just make spin_lock_irqsave (and spin_lock_irq) not disable -interrupts for non-raw spinlocks, I think all of infiniband will be fine without -changes." - -Signed-off-by: Sven-Thorsten Dietrich -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c -+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c -@@ -847,7 +847,7 @@ void ipoib_mcast_restart_task(struct wor - - ipoib_dbg_mcast(priv, "restarting multicast task\n"); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - netif_addr_lock(dev); - spin_lock(&priv->lock); - -@@ -929,7 +929,7 @@ void ipoib_mcast_restart_task(struct wor - - spin_unlock(&priv->lock); - netif_addr_unlock(dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - /* - * make sure the in-flight joins have finished before we attempt diff --git a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch deleted file mode 100644 index eeac41289..000000000 --- a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch +++ /dev/null @@ -1,61 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:30:16 -0500 -Subject: input: gameport: Do not disable interrupts on PREEMPT_RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use the _nort() primitives. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - drivers/input/gameport/gameport.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - ---- a/drivers/input/gameport/gameport.c -+++ b/drivers/input/gameport/gameport.c -@@ -91,13 +91,13 @@ static int gameport_measure_speed(struct - tx = ~0; - - for (i = 0; i < 50; i++) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - t1 = ktime_get_ns(); - for (t = 0; t < 50; t++) - gameport_read(gameport); - t2 = ktime_get_ns(); - t3 = ktime_get_ns(); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - udelay(i * 10); - t = (t2 - t1) - (t3 - t2); - if (t < tx) -@@ -124,12 +124,12 @@ static int old_gameport_measure_speed(st - tx = 1 << 30; - - for(i = 0; i < 50; i++) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - GET_TIME(t1); - for (t = 0; t < 50; t++) gameport_read(gameport); - GET_TIME(t2); - GET_TIME(t3); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - udelay(i * 10); - if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; - } -@@ -148,11 +148,11 @@ static int old_gameport_measure_speed(st - tx = 1 << 30; - - for(i = 0; i < 50; i++) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - t1 = rdtsc(); - for (t = 0; t < 50; t++) gameport_read(gameport); - t2 = rdtsc(); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - udelay(i * 10); - if (t2 - t1 < tx) tx = t2 - t1; - } diff --git a/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch b/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch deleted file mode 100644 index 7e4c4f4e6..000000000 --- a/debian/patches/features/all/rt/introduce_migrate_disable_cpu_light.patch +++ /dev/null @@ -1,281 +0,0 @@ -Subject: Intrduce migrate_disable() + cpu_light() -From: Thomas Gleixner -Date: Fri, 17 Jun 2011 15:42:38 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Introduce migrate_disable(). The task can't be pushed to another CPU but can -be preempted. - -From: Peter Zijlstra : -|Make migrate_disable() be a preempt_disable() for !rt kernels. This -|allows generic code to use it but still enforces that these code -|sections stay relatively small. -| -|A preemptible migrate_disable() accessible for general use would allow -|people growing arbitrary per-cpu crap instead of clean these things -|up. - -From: Steven Rostedt -| The migrate_disable() can cause a bit of a overhead to the RT kernel, -| as changing the affinity is expensive to do at every lock encountered. -| As a running task can not migrate, the actual disabling of migration -| does not need to occur until the task is about to schedule out. -| -| In most cases, a task that disables migration will enable it before -| it schedules making this change improve performance tremendously. - -On top of this build get/put_cpu_light(). It is similar to get_cpu(): -it uses migrate_disable() instead of preempt_disable(). That means the user -remains on the same CPU but the function using it may be preempted and -invoked again from another caller on the same CPU. - -Signed-off-by: Thomas Gleixner ---- - include/linux/cpu.h | 3 ++ - include/linux/preempt.h | 9 ++++++ - include/linux/sched.h | 39 +++++++++++++++++++++----- - include/linux/smp.h | 3 ++ - kernel/sched/core.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++- - kernel/sched/debug.c | 7 ++++ - lib/smp_processor_id.c | 5 ++- - 7 files changed, 125 insertions(+), 11 deletions(-) - ---- a/include/linux/cpu.h -+++ b/include/linux/cpu.h -@@ -222,6 +222,9 @@ static inline void smpboot_thread_init(v - #endif /* CONFIG_SMP */ - extern struct bus_type cpu_subsys; - -+static inline void pin_current_cpu(void) { } -+static inline void unpin_current_cpu(void) { } -+ - #ifdef CONFIG_HOTPLUG_CPU - /* Stop CPUs going up and down. */ - ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -257,11 +257,20 @@ do { \ - # define preempt_enable_rt() preempt_enable() - # define preempt_disable_nort() barrier() - # define preempt_enable_nort() barrier() -+# ifdef CONFIG_SMP -+ extern void migrate_disable(void); -+ extern void migrate_enable(void); -+# else /* CONFIG_SMP */ -+# define migrate_disable() barrier() -+# define migrate_enable() barrier() -+# endif /* CONFIG_SMP */ - #else - # define preempt_disable_rt() barrier() - # define preempt_enable_rt() barrier() - # define preempt_disable_nort() preempt_disable() - # define preempt_enable_nort() preempt_enable() -+# define migrate_disable() preempt_disable() -+# define migrate_enable() preempt_enable() - #endif - - #ifdef CONFIG_PREEMPT_NOTIFIERS ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1414,6 +1414,12 @@ struct task_struct { - #endif - - unsigned int policy; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int migrate_disable; -+# ifdef CONFIG_SCHED_DEBUG -+ int migrate_disable_atomic; -+# endif -+#endif - int nr_cpus_allowed; - cpumask_t cpus_allowed; - -@@ -1838,14 +1844,6 @@ extern int arch_task_struct_size __read_ - # define arch_task_struct_size (sizeof(struct task_struct)) - #endif - --/* Future-safe accessor for struct task_struct's cpus_allowed. */ --#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -- --static inline int tsk_nr_cpus_allowed(struct task_struct *p) --{ -- return p->nr_cpus_allowed; --} -- - #define TNF_MIGRATED 0x01 - #define TNF_NO_GROUP 0x02 - #define TNF_SHARED 0x04 -@@ -3122,6 +3120,31 @@ static inline void set_task_cpu(struct t - - #endif /* CONFIG_SMP */ - -+static inline int __migrate_disabled(struct task_struct *p) -+{ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ return p->migrate_disable; -+#else -+ return 0; -+#endif -+} -+ -+/* Future-safe accessor for struct task_struct's cpus_allowed. */ -+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) -+{ -+ if (__migrate_disabled(p)) -+ return cpumask_of(task_cpu(p)); -+ -+ return &p->cpus_allowed; -+} -+ -+static inline int tsk_nr_cpus_allowed(struct task_struct *p) -+{ -+ if (__migrate_disabled(p)) -+ return 1; -+ return p->nr_cpus_allowed; -+} -+ - extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); - extern long sched_getaffinity(pid_t pid, struct cpumask *mask); - ---- a/include/linux/smp.h -+++ b/include/linux/smp.h -@@ -185,6 +185,9 @@ static inline void smp_init(void) { } - #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) - #define put_cpu() preempt_enable() - -+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) -+#define put_cpu_light() migrate_enable() -+ - /* - * Callback to arch code if there's nosmp or maxcpus=0 on the - * boot command line: ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1171,6 +1171,11 @@ void do_set_cpus_allowed(struct task_str - - lockdep_assert_held(&p->pi_lock); - -+ if (__migrate_disabled(p)) { -+ cpumask_copy(&p->cpus_allowed, new_mask); -+ return; -+ } -+ - queued = task_on_rq_queued(p); - running = task_current(rq, p); - -@@ -1232,7 +1237,7 @@ static int __set_cpus_allowed_ptr(struct - do_set_cpus_allowed(p, new_mask); - - /* Can the task run on the task's current CPU? If so, we're done */ -- if (cpumask_test_cpu(task_cpu(p), new_mask)) -+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) - goto out; - - dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); -@@ -3022,6 +3027,69 @@ static inline void schedule_debug(struct - schedstat_inc(this_rq(), sched_count); - } - -+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) -+ -+void migrate_disable(void) -+{ -+ struct task_struct *p = current; -+ -+ if (in_atomic()) { -+#ifdef CONFIG_SCHED_DEBUG -+ p->migrate_disable_atomic++; -+#endif -+ return; -+ } -+ -+#ifdef CONFIG_SCHED_DEBUG -+ WARN_ON_ONCE(p->migrate_disable_atomic); -+#endif -+ -+ if (p->migrate_disable) { -+ p->migrate_disable++; -+ return; -+ } -+ -+ preempt_disable(); -+ pin_current_cpu(); -+ p->migrate_disable = 1; -+ preempt_enable(); -+} -+EXPORT_SYMBOL(migrate_disable); -+ -+void migrate_enable(void) -+{ -+ struct task_struct *p = current; -+ -+ if (in_atomic()) { -+#ifdef CONFIG_SCHED_DEBUG -+ p->migrate_disable_atomic--; -+#endif -+ return; -+ } -+ -+#ifdef CONFIG_SCHED_DEBUG -+ WARN_ON_ONCE(p->migrate_disable_atomic); -+#endif -+ WARN_ON_ONCE(p->migrate_disable <= 0); -+ -+ if (p->migrate_disable > 1) { -+ p->migrate_disable--; -+ return; -+ } -+ -+ preempt_disable(); -+ /* -+ * Clearing migrate_disable causes tsk_cpus_allowed to -+ * show the tasks original cpu affinity. -+ */ -+ p->migrate_disable = 0; -+ -+ unpin_current_cpu(); -+ preempt_enable(); -+} -+EXPORT_SYMBOL(migrate_enable); -+#endif -+ - /* - * Pick up the highest-prio task: - */ ---- a/kernel/sched/debug.c -+++ b/kernel/sched/debug.c -@@ -251,6 +251,9 @@ void print_rt_rq(struct seq_file *m, int - P(rt_throttled); - PN(rt_time); - PN(rt_runtime); -+#ifdef CONFIG_SMP -+ P(rt_nr_migratory); -+#endif - - #undef PN - #undef P -@@ -635,6 +638,10 @@ void proc_sched_show_task(struct task_st - #endif - P(policy); - P(prio); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ P(migrate_disable); -+#endif -+ P(nr_cpus_allowed); - #undef PN - #undef __PN - #undef P ---- a/lib/smp_processor_id.c -+++ b/lib/smp_processor_id.c -@@ -39,8 +39,9 @@ notrace static unsigned int check_preemp - if (!printk_ratelimit()) - goto out_enable; - -- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n", -- what1, what2, preempt_count() - 1, current->comm, current->pid); -+ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n", -+ what1, what2, preempt_count() - 1, __migrate_disabled(current), -+ current->comm, current->pid); - - print_symbol("caller is %s\n", (long)__builtin_return_address(0)); - dump_stack(); diff --git a/debian/patches/features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch b/debian/patches/features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch deleted file mode 100644 index ec8af0e80..000000000 --- a/debian/patches/features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch +++ /dev/null @@ -1,228 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 30 Oct 2015 11:59:07 +0100 -Subject: ipc/msg: Implement lockless pipelined wakeups -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -This patch moves the wakeup_process() invocation so it is not done under -the perm->lock by making use of a lockless wake_q. With this change, the -waiter is woken up once the message has been assigned and it does not -need to loop on SMP if the message points to NULL. In the signal case we -still need to check the pointer under the lock to verify the state. - -This change should also avoid the introduction of preempt_disable() in --RT which avoids a busy-loop which pools for the NULL -> !NULL -change if the waiter has a higher priority compared to the waker. - -Cc: Davidlohr Bueso -Cc: Manfred Spraul -Cc: Andrew Morton -Cc: George Spelvin -Cc: Thomas Gleixner -Cc: Peter Zijlstra -Signed-off-by: Sebastian Andrzej Siewior ---- - - ipc/msg.c | 101 +++++++++++++++++--------------------------------------------- - 1 file changed, 28 insertions(+), 73 deletions(-) - ---- a/ipc/msg.c -+++ b/ipc/msg.c -@@ -183,20 +183,14 @@ static void ss_wakeup(struct list_head * - } - } - --static void expunge_all(struct msg_queue *msq, int res) -+static void expunge_all(struct msg_queue *msq, int res, -+ struct wake_q_head *wake_q) - { - struct msg_receiver *msr, *t; - - list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { -- msr->r_msg = NULL; /* initialize expunge ordering */ -- wake_up_process(msr->r_tsk); -- /* -- * Ensure that the wakeup is visible before setting r_msg as -- * the receiving end depends on it: either spinning on a nil, -- * or dealing with -EAGAIN cases. See lockless receive part 1 -- * and 2 in do_msgrcv(). -- */ -- smp_wmb(); /* barrier (B) */ -+ -+ wake_q_add(wake_q, msr->r_tsk); - msr->r_msg = ERR_PTR(res); - } - } -@@ -213,11 +207,13 @@ static void freeque(struct ipc_namespace - { - struct msg_msg *msg, *t; - struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); -+ WAKE_Q(wake_q); - -- expunge_all(msq, -EIDRM); -+ expunge_all(msq, -EIDRM, &wake_q); - ss_wakeup(&msq->q_senders, 1); - msg_rmid(ns, msq); - ipc_unlock_object(&msq->q_perm); -+ wake_up_q(&wake_q); - rcu_read_unlock(); - - list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { -@@ -342,6 +338,7 @@ static int msgctl_down(struct ipc_namesp - struct kern_ipc_perm *ipcp; - struct msqid64_ds uninitialized_var(msqid64); - struct msg_queue *msq; -+ WAKE_Q(wake_q); - int err; - - if (cmd == IPC_SET) { -@@ -389,7 +386,7 @@ static int msgctl_down(struct ipc_namesp - /* sleeping receivers might be excluded by - * stricter permissions. - */ -- expunge_all(msq, -EAGAIN); -+ expunge_all(msq, -EAGAIN, &wake_q); - /* sleeping senders might be able to send - * due to a larger queue size. - */ -@@ -402,6 +399,7 @@ static int msgctl_down(struct ipc_namesp - - out_unlock0: - ipc_unlock_object(&msq->q_perm); -+ wake_up_q(&wake_q); - out_unlock1: - rcu_read_unlock(); - out_up: -@@ -566,7 +564,8 @@ static int testmsg(struct msg_msg *msg, - return 0; - } - --static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) -+static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, -+ struct wake_q_head *wake_q) - { - struct msg_receiver *msr, *t; - -@@ -577,27 +576,13 @@ static inline int pipelined_send(struct - - list_del(&msr->r_list); - if (msr->r_maxsize < msg->m_ts) { -- /* initialize pipelined send ordering */ -- msr->r_msg = NULL; -- wake_up_process(msr->r_tsk); -- /* barrier (B) see barrier comment below */ -- smp_wmb(); -+ wake_q_add(wake_q, msr->r_tsk); - msr->r_msg = ERR_PTR(-E2BIG); - } else { -- msr->r_msg = NULL; - msq->q_lrpid = task_pid_vnr(msr->r_tsk); - msq->q_rtime = get_seconds(); -- wake_up_process(msr->r_tsk); -- /* -- * Ensure that the wakeup is visible before -- * setting r_msg, as the receiving can otherwise -- * exit - once r_msg is set, the receiver can -- * continue. See lockless receive part 1 and 2 -- * in do_msgrcv(). Barrier (B). -- */ -- smp_wmb(); -+ wake_q_add(wake_q, msr->r_tsk); - msr->r_msg = msg; -- - return 1; - } - } -@@ -613,6 +598,7 @@ long do_msgsnd(int msqid, long mtype, vo - struct msg_msg *msg; - int err; - struct ipc_namespace *ns; -+ WAKE_Q(wake_q); - - ns = current->nsproxy->ipc_ns; - -@@ -698,7 +684,7 @@ long do_msgsnd(int msqid, long mtype, vo - msq->q_lspid = task_tgid_vnr(current); - msq->q_stime = get_seconds(); - -- if (!pipelined_send(msq, msg)) { -+ if (!pipelined_send(msq, msg, &wake_q)) { - /* no one is waiting for this message, enqueue it */ - list_add_tail(&msg->m_list, &msq->q_messages); - msq->q_cbytes += msgsz; -@@ -712,6 +698,7 @@ long do_msgsnd(int msqid, long mtype, vo - - out_unlock0: - ipc_unlock_object(&msq->q_perm); -+ wake_up_q(&wake_q); - out_unlock1: - rcu_read_unlock(); - if (msg != NULL) -@@ -932,57 +919,25 @@ long do_msgrcv(int msqid, void __user *b - rcu_read_lock(); - - /* Lockless receive, part 2: -- * Wait until pipelined_send or expunge_all are outside of -- * wake_up_process(). There is a race with exit(), see -- * ipc/mqueue.c for the details. The correct serialization -- * ensures that a receiver cannot continue without the wakeup -- * being visibible _before_ setting r_msg: -+ * The work in pipelined_send() and expunge_all(): -+ * - Set pointer to message -+ * - Queue the receiver task for later wakeup -+ * - Wake up the process after the lock is dropped. - * -- * CPU 0 CPU 1 -- * -- * smp_rmb(); (A) <-- pair -. -- * r_msg> | msr->r_msg = NULL; -- * | wake_up_process(); -- * `------> smp_wmb(); (B) -- * msr->r_msg = msg; -- * -- * Where (A) orders the message value read and where (B) orders -- * the write to the r_msg -- done in both pipelined_send and -- * expunge_all. -+ * Should the process wake up before this wakeup (due to a -+ * signal) it will either see the message and continue … - */ -- for (;;) { -- /* -- * Pairs with writer barrier in pipelined_send -- * or expunge_all. -- */ -- smp_rmb(); /* barrier (A) */ -- msg = (struct msg_msg *)msr_d.r_msg; -- if (msg) -- break; - -- /* -- * The cpu_relax() call is a compiler barrier -- * which forces everything in this loop to be -- * re-loaded. -- */ -- cpu_relax(); -- } -- -- /* Lockless receive, part 3: -- * If there is a message or an error then accept it without -- * locking. -- */ -+ msg = (struct msg_msg *)msr_d.r_msg; - if (msg != ERR_PTR(-EAGAIN)) - goto out_unlock1; - -- /* Lockless receive, part 3: -- * Acquire the queue spinlock. -- */ -+ /* -+ * … or see -EAGAIN, acquire the lock to check the message -+ * again. -+ */ - ipc_lock_object(&msq->q_perm); - -- /* Lockless receive, part 4: -- * Repeat test after acquiring the spinlock. -- */ - msg = (struct msg_msg *)msr_d.r_msg; - if (msg != ERR_PTR(-EAGAIN)) - goto out_unlock0; diff --git a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch deleted file mode 100644 index d4db346b2..000000000 --- a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch +++ /dev/null @@ -1,70 +0,0 @@ -Subject: ipc/sem: Rework semaphore wakeups -From: Peter Zijlstra -Date: Wed, 14 Sep 2011 11:57:04 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Current sysv sems have a weird ass wakeup scheme that involves keeping -preemption disabled over a potential O(n^2) loop and busy waiting on -that on other CPUs. - -Kill this and simply wake the task directly from under the sem_lock. - -This was discovered by a migrate_disable() debug feature that -disallows: - - spin_lock(); - preempt_disable(); - spin_unlock() - preempt_enable(); - -Cc: Manfred Spraul -Suggested-by: Thomas Gleixner -Reported-by: Mike Galbraith -Signed-off-by: Peter Zijlstra -Cc: Manfred Spraul -Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins -Signed-off-by: Thomas Gleixner ---- - ipc/sem.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - ---- a/ipc/sem.c -+++ b/ipc/sem.c -@@ -690,6 +690,13 @@ static int perform_atomic_semop(struct s - static void wake_up_sem_queue_prepare(struct list_head *pt, - struct sem_queue *q, int error) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct task_struct *p = q->sleeper; -+ get_task_struct(p); -+ q->status = error; -+ wake_up_process(p); -+ put_task_struct(p); -+#else - if (list_empty(pt)) { - /* - * Hold preempt off so that we don't get preempted and have the -@@ -701,6 +708,7 @@ static void wake_up_sem_queue_prepare(st - q->pid = error; - - list_add_tail(&q->list, pt); -+#endif - } - - /** -@@ -714,6 +722,7 @@ static void wake_up_sem_queue_prepare(st - */ - static void wake_up_sem_queue_do(struct list_head *pt) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - struct sem_queue *q, *t; - int did_something; - -@@ -726,6 +735,7 @@ static void wake_up_sem_queue_do(struct - } - if (did_something) - preempt_enable(); -+#endif - } - - static void unlink_queue(struct sem_array *sma, struct sem_queue *q) diff --git a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch deleted file mode 100644 index 1d0838dd3..000000000 --- a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch +++ /dev/null @@ -1,147 +0,0 @@ -Subject: genirq: Allow disabling of softirq processing in irq thread context -From: Thomas Gleixner -Date: Tue, 31 Jan 2012 13:01:27 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The processing of softirqs in irq thread context is a performance gain -for the non-rt workloads of a system, but it's counterproductive for -interrupts which are explicitely related to the realtime -workload. Allow such interrupts to prevent softirq processing in their -thread context. - -Signed-off-by: Thomas Gleixner - ---- - include/linux/interrupt.h | 2 ++ - include/linux/irq.h | 4 +++- - kernel/irq/manage.c | 13 ++++++++++++- - kernel/irq/settings.h | 12 ++++++++++++ - kernel/softirq.c | 9 +++++++++ - 5 files changed, 38 insertions(+), 2 deletions(-) - ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -61,6 +61,7 @@ - * interrupt handler after suspending interrupts. For system - * wakeup devices users need to implement wakeup detection in - * their interrupt handlers. -+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) - */ - #define IRQF_SHARED 0x00000080 - #define IRQF_PROBE_SHARED 0x00000100 -@@ -74,6 +75,7 @@ - #define IRQF_NO_THREAD 0x00010000 - #define IRQF_EARLY_RESUME 0x00020000 - #define IRQF_COND_SUSPEND 0x00040000 -+#define IRQF_NO_SOFTIRQ_CALL 0x00080000 - - #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) - ---- a/include/linux/irq.h -+++ b/include/linux/irq.h -@@ -72,6 +72,7 @@ enum irqchip_irq_state; - * IRQ_IS_POLLED - Always polled by another interrupt. Exclude - * it from the spurious interrupt detection - * mechanism and from core side polling. -+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) - * IRQ_DISABLE_UNLAZY - Disable lazy irq disable - */ - enum { -@@ -99,13 +100,14 @@ enum { - IRQ_PER_CPU_DEVID = (1 << 17), - IRQ_IS_POLLED = (1 << 18), - IRQ_DISABLE_UNLAZY = (1 << 19), -+ IRQ_NO_SOFTIRQ_CALL = (1 << 20), - }; - - #define IRQF_MODIFY_MASK \ - (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ - IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ -- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) -+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) - - #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) - ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -940,7 +940,15 @@ irq_forced_thread_fn(struct irq_desc *de - local_bh_disable(); - ret = action->thread_fn(action->irq, action->dev_id); - irq_finalize_oneshot(desc, action); -- local_bh_enable(); -+ /* -+ * Interrupts which have real time requirements can be set up -+ * to avoid softirq processing in the thread handler. This is -+ * safe as these interrupts do not raise soft interrupts. -+ */ -+ if (irq_settings_no_softirq_call(desc)) -+ _local_bh_enable(); -+ else -+ local_bh_enable(); - return ret; - } - -@@ -1390,6 +1398,9 @@ static int - irqd_set(&desc->irq_data, IRQD_NO_BALANCING); - } - -+ if (new->flags & IRQF_NO_SOFTIRQ_CALL) -+ irq_settings_set_no_softirq_call(desc); -+ - /* Set default affinity mask once everything is setup */ - setup_affinity(desc, mask); - ---- a/kernel/irq/settings.h -+++ b/kernel/irq/settings.h -@@ -16,6 +16,7 @@ enum { - _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, - _IRQ_IS_POLLED = IRQ_IS_POLLED, - _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, -+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, - _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, - }; - -@@ -30,6 +31,7 @@ enum { - #define IRQ_PER_CPU_DEVID GOT_YOU_MORON - #define IRQ_IS_POLLED GOT_YOU_MORON - #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON -+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON - #undef IRQF_MODIFY_MASK - #define IRQF_MODIFY_MASK GOT_YOU_MORON - -@@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc - desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); - } - -+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) -+{ -+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; -+} -+ -+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) -+{ -+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; -+} -+ - static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) - { - return desc->status_use_accessors & _IRQ_PER_CPU; ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -576,6 +576,15 @@ void __local_bh_enable(void) - } - EXPORT_SYMBOL(__local_bh_enable); - -+void _local_bh_enable(void) -+{ -+ if (WARN_ON(current->softirq_nestcnt == 0)) -+ return; -+ if (--current->softirq_nestcnt == 0) -+ migrate_enable(); -+} -+EXPORT_SYMBOL(_local_bh_enable); -+ - int in_serving_softirq(void) - { - return current->flags & PF_IN_SOFTIRQ; diff --git a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch b/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch deleted file mode 100644 index 14720b800..000000000 --- a/debian/patches/features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch +++ /dev/null @@ -1,78 +0,0 @@ -Subject: irqwork: Move irq safe work to irq context -From: Thomas Gleixner -Date: Sun, 15 Nov 2015 18:40:17 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On architectures where arch_irq_work_has_interrupt() returns false, we -end up running the irq safe work from the softirq context. That -results in a potential deadlock in the scheduler irq work which -expects that function to be called with interrupts disabled. - -Split the irq_work_tick() function into a hard and soft variant. Call -the hard variant from the tick interrupt and add the soft variant to -the timer softirq. - -Reported-and-tested-by: Yanjiang Jin -Signed-off-by: Thomas Gleixner -Cc: stable-rt@vger.kernel.org ---- - include/linux/irq_work.h | 6 ++++++ - kernel/irq_work.c | 9 +++++++++ - kernel/time/timer.c | 6 ++---- - 3 files changed, 17 insertions(+), 4 deletions(-) - ---- a/include/linux/irq_work.h -+++ b/include/linux/irq_work.h -@@ -52,4 +52,10 @@ static inline bool irq_work_needs_cpu(vo - static inline void irq_work_run(void) { } - #endif - -+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) -+void irq_work_tick_soft(void); -+#else -+static inline void irq_work_tick_soft(void) { } -+#endif -+ - #endif /* _LINUX_IRQ_WORK_H */ ---- a/kernel/irq_work.c -+++ b/kernel/irq_work.c -@@ -200,8 +200,17 @@ void irq_work_tick(void) - - if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) - irq_work_run_list(raised); -+ -+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) -+ irq_work_run_list(this_cpu_ptr(&lazy_list)); -+} -+ -+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) -+void irq_work_tick_soft(void) -+{ - irq_work_run_list(this_cpu_ptr(&lazy_list)); - } -+#endif - - /* - * Synchronize against the irq_work @entry, ensures the entry is not ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick) - scheduler_tick(); - run_local_timers(); - rcu_check_callbacks(user_tick); --#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) -+#if defined(CONFIG_IRQ_WORK) - if (in_irq()) - irq_work_tick(); - #endif -@@ -1498,9 +1498,7 @@ static void run_timer_softirq(struct sof - { - struct tvec_base *base = this_cpu_ptr(&tvec_bases); - --#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) -- irq_work_tick(); --#endif -+ irq_work_tick_soft(); - - if (time_after_eq(jiffies, base->timer_jiffies)) - __run_timers(base); diff --git a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch deleted file mode 100644 index b416f2064..000000000 --- a/debian/patches/features/all/rt/irqwork-push_most_work_into_softirq_context.patch +++ /dev/null @@ -1,198 +0,0 @@ -Subject: irqwork: push most work into softirq context -From: Sebastian Andrzej Siewior -Date: Tue, 23 Jun 2015 15:32:51 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Initially we defered all irqwork into softirq because we didn't want the -latency spikes if perf or another user was busy and delayed the RT task. -The NOHZ trigger (nohz_full_kick_work) was the first user that did not work -as expected if it did not run in the original irqwork context so we had to -bring it back somehow for it. push_irq_work_func is the second one that -requires this. - -This patch adds the IRQ_WORK_HARD_IRQ which makes sure the callback runs -in raw-irq context. Everything else is defered into softirq context. Without --RT we have the orignal behavior. - -This patch incorporates tglx orignal work which revoked a little bringing back -the arch_irq_work_raise() if possible and a few fixes from Steven Rostedt and -Mike Galbraith, - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/irq_work.h | 1 + - kernel/irq_work.c | 47 ++++++++++++++++++++++++++++++++++------------- - kernel/sched/rt.c | 1 + - kernel/time/tick-sched.c | 6 ++++++ - kernel/time/timer.c | 6 +++++- - 5 files changed, 47 insertions(+), 14 deletions(-) - ---- a/include/linux/irq_work.h -+++ b/include/linux/irq_work.h -@@ -16,6 +16,7 @@ - #define IRQ_WORK_BUSY 2UL - #define IRQ_WORK_FLAGS 3UL - #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ -+#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ - - struct irq_work { - unsigned long flags; ---- a/kernel/irq_work.c -+++ b/kernel/irq_work.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - #include - - -@@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void) - */ - bool irq_work_queue_on(struct irq_work *work, int cpu) - { -+ struct llist_head *list; -+ - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(cpu)); - -@@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work * - if (!irq_work_claim(work)) - return false; - -- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) -+ list = &per_cpu(lazy_list, cpu); -+ else -+ list = &per_cpu(raised_list, cpu); -+ -+ if (llist_add(&work->llnode, list)) - arch_send_call_function_single_ipi(cpu); - - return true; -@@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on); - /* Enqueue the irq work @work on the current CPU */ - bool irq_work_queue(struct irq_work *work) - { -+ struct llist_head *list; -+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); -+ - /* Only queue if not already pending */ - if (!irq_work_claim(work)) - return false; -@@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *wor - /* Queue the entry and raise the IPI if needed. */ - preempt_disable(); - -- /* If the work is "lazy", handle it from next tick if any */ -- if (work->flags & IRQ_WORK_LAZY) { -- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && -- tick_nohz_tick_stopped()) -- arch_irq_work_raise(); -- } else { -- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) -+ lazy_work = work->flags & IRQ_WORK_LAZY; -+ -+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) -+ list = this_cpu_ptr(&lazy_list); -+ else -+ list = this_cpu_ptr(&raised_list); -+ -+ if (llist_add(&work->llnode, list)) { -+ if (!lazy_work || tick_nohz_tick_stopped()) - arch_irq_work_raise(); - } - -@@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void) - raised = this_cpu_ptr(&raised_list); - lazy = this_cpu_ptr(&lazy_list); - -- if (llist_empty(raised) || arch_irq_work_has_interrupt()) -- if (llist_empty(lazy)) -- return false; -+ if (llist_empty(raised) && llist_empty(lazy)) -+ return false; - - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); -@@ -132,7 +144,7 @@ static void irq_work_run_list(struct lli - struct irq_work *work; - struct llist_node *llnode; - -- BUG_ON(!irqs_disabled()); -+ BUG_ON_NONRT(!irqs_disabled()); - - if (llist_empty(list)) - return; -@@ -169,7 +181,16 @@ static void irq_work_run_list(struct lli - void irq_work_run(void) - { - irq_work_run_list(this_cpu_ptr(&raised_list)); -- irq_work_run_list(this_cpu_ptr(&lazy_list)); -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { -+ /* -+ * NOTE: we raise softirq via IPI for safety, -+ * and execute in irq_work_tick() to move the -+ * overhead from hard to soft irq context. -+ */ -+ if (!llist_empty(this_cpu_ptr(&lazy_list))) -+ raise_softirq(TIMER_SOFTIRQ); -+ } else -+ irq_work_run_list(this_cpu_ptr(&lazy_list)); - } - EXPORT_SYMBOL_GPL(irq_work_run); - ---- a/kernel/sched/rt.c -+++ b/kernel/sched/rt.c -@@ -94,6 +94,7 @@ void init_rt_rq(struct rt_rq *rt_rq) - rt_rq->push_cpu = nr_cpu_ids; - raw_spin_lock_init(&rt_rq->push_lock); - init_irq_work(&rt_rq->push_work, push_irq_work_func); -+ rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ; - #endif - #endif /* CONFIG_SMP */ - /* We start is dequeued state, because no RT tasks are queued */ ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -181,6 +181,11 @@ static bool can_stop_full_tick(void) - return false; - } - -+ if (!arch_irq_work_has_interrupt()) { -+ trace_tick_stop(0, "missing irq work interrupt\n"); -+ return false; -+ } -+ - /* sched_clock_tick() needs us? */ - #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK - /* -@@ -209,6 +214,7 @@ static void nohz_full_kick_work_func(str - - static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { - .func = nohz_full_kick_work_func, -+ .flags = IRQ_WORK_HARD_IRQ, - }; - - /* ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick) - scheduler_tick(); - run_local_timers(); - rcu_check_callbacks(user_tick); --#ifdef CONFIG_IRQ_WORK -+#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) - if (in_irq()) - irq_work_tick(); - #endif -@@ -1498,6 +1498,10 @@ static void run_timer_softirq(struct sof - { - struct tvec_base *base = this_cpu_ptr(&tvec_bases); - -+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) -+ irq_work_tick(); -+#endif -+ - if (time_after_eq(jiffies, base->timer_jiffies)) - __run_timers(base); - } diff --git a/debian/patches/features/all/rt/jump-label-rt.patch b/debian/patches/features/all/rt/jump-label-rt.patch deleted file mode 100644 index ca0ef11f0..000000000 --- a/debian/patches/features/all/rt/jump-label-rt.patch +++ /dev/null @@ -1,36 +0,0 @@ -Subject: jump-label: disable if stop_machine() is used -From: Thomas Gleixner -Date: Wed, 08 Jul 2015 17:14:48 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Some architectures are using stop_machine() while switching the opcode which -leads to latency spikes. -The architectures which use stop_machine() atm: -- ARM stop machine -- s390 stop machine - -The architecures which use other sorcery: -- MIPS -- X86 -- powerpc -- sparc -- arm64 - -Signed-off-by: Thomas Gleixner -[bigeasy: only ARM for now] -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -33,7 +33,7 @@ config ARM - select HARDIRQS_SW_RESEND - select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) - select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 -- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 -+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !PREEMPT_RT_BASE - select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 - select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) - select HAVE_ARCH_TRACEHOOK diff --git a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch deleted file mode 100644 index a97425bad..000000000 --- a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch +++ /dev/null @@ -1,34 +0,0 @@ -Subject: kconfig: Disable config options which are not RT compatible -From: Thomas Gleixner -Date: Sun, 24 Jul 2011 12:11:43 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Disable stuff which is known to have issues on RT - -Signed-off-by: Thomas Gleixner ---- - arch/Kconfig | 1 + - mm/Kconfig | 2 +- - 2 files changed, 2 insertions(+), 1 deletion(-) - ---- a/arch/Kconfig -+++ b/arch/Kconfig -@@ -9,6 +9,7 @@ config OPROFILE - tristate "OProfile system profiling" - depends on PROFILING - depends on HAVE_OPROFILE -+ depends on !PREEMPT_RT_FULL - select RING_BUFFER - select RING_BUFFER_ALLOW_SWAP - help ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -392,7 +392,7 @@ config NOMMU_INITIAL_TRIM_EXCESS - - config TRANSPARENT_HUGEPAGE - bool "Transparent Hugepage Support" -- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE -+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL - select COMPACTION - help - Transparent Hugepages allows the kernel to use huge pages and diff --git a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch deleted file mode 100644 index 7c44ed138..000000000 --- a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch +++ /dev/null @@ -1,59 +0,0 @@ -Subject: kconfig: Add PREEMPT_RT_FULL -From: Thomas Gleixner -Date: Wed, 29 Jun 2011 14:58:57 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Introduce the final symbol for PREEMPT_RT_FULL. - -Signed-off-by: Thomas Gleixner ---- - init/Makefile | 2 +- - kernel/Kconfig.preempt | 8 ++++++++ - scripts/mkcompile_h | 4 +++- - 3 files changed, 12 insertions(+), 2 deletions(-) - ---- a/init/Makefile -+++ b/init/Makefile -@@ -33,4 +33,4 @@ mounts-$(CONFIG_BLK_DEV_MD) += do_mounts - include/generated/compile.h: FORCE - @$($(quiet)chk_compile.h) - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ -- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" -+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" ---- a/kernel/Kconfig.preempt -+++ b/kernel/Kconfig.preempt -@@ -67,6 +67,14 @@ config PREEMPT_RTB - enables changes which are preliminary for the full preemptible - RT kernel. - -+config PREEMPT_RT_FULL -+ bool "Fully Preemptible Kernel (RT)" -+ depends on IRQ_FORCED_THREADING -+ select PREEMPT_RT_BASE -+ select PREEMPT_RCU -+ help -+ All and everything -+ - endchoice - - config PREEMPT_COUNT ---- a/scripts/mkcompile_h -+++ b/scripts/mkcompile_h -@@ -4,7 +4,8 @@ TARGET=$1 - ARCH=$2 - SMP=$3 - PREEMPT=$4 --CC=$5 -+RT=$5 -+CC=$6 - - vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } - -@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION" - CONFIG_FLAGS="" - if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi - if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi -+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi - UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" - - # Truncate to maximum length diff --git a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch b/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch deleted file mode 100644 index 4a590af02..000000000 --- a/debian/patches/features/all/rt/kernel-SRCU-provide-a-static-initializer.patch +++ /dev/null @@ -1,125 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 19 Mar 2013 14:44:30 +0100 -Subject: kernel/SRCU: provide a static initializer -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -There are macros for static initializer for the three out of four -possible notifier types, that are: - ATOMIC_NOTIFIER_HEAD() - BLOCKING_NOTIFIER_HEAD() - RAW_NOTIFIER_HEAD() - -This patch provides a static initilizer for the forth type to make it -complete. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/notifier.h | 34 +++++++++++++++++++++++++--------- - include/linux/srcu.h | 6 +++--- - 2 files changed, 28 insertions(+), 12 deletions(-) - ---- a/include/linux/notifier.h -+++ b/include/linux/notifier.h -@@ -6,7 +6,7 @@ - * - * Alan Cox - */ -- -+ - #ifndef _LINUX_NOTIFIER_H - #define _LINUX_NOTIFIER_H - #include -@@ -42,9 +42,7 @@ - * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. - * As compensation, srcu_notifier_chain_unregister() is rather expensive. - * SRCU notifier chains should be used when the chain will be called very -- * often but notifier_blocks will seldom be removed. Also, SRCU notifier -- * chains are slightly more difficult to use because they require special -- * runtime initialization. -+ * often but notifier_blocks will seldom be removed. - */ - - typedef int (*notifier_fn_t)(struct notifier_block *nb, -@@ -88,7 +86,7 @@ struct srcu_notifier_head { - (name)->head = NULL; \ - } while (0) - --/* srcu_notifier_heads must be initialized and cleaned up dynamically */ -+/* srcu_notifier_heads must be cleaned up dynamically */ - extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); - #define srcu_cleanup_notifier_head(name) \ - cleanup_srcu_struct(&(name)->srcu); -@@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(stru - .head = NULL } - #define RAW_NOTIFIER_INIT(name) { \ - .head = NULL } --/* srcu_notifier_heads cannot be initialized statically */ -+ -+#define SRCU_NOTIFIER_INIT(name, pcpu) \ -+ { \ -+ .mutex = __MUTEX_INITIALIZER(name.mutex), \ -+ .head = NULL, \ -+ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ -+ } - - #define ATOMIC_NOTIFIER_HEAD(name) \ - struct atomic_notifier_head name = \ -@@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(stru - struct raw_notifier_head name = \ - RAW_NOTIFIER_INIT(name) - -+#define _SRCU_NOTIFIER_HEAD(name, mod) \ -+ static DEFINE_PER_CPU(struct srcu_struct_array, \ -+ name##_head_srcu_array); \ -+ mod struct srcu_notifier_head name = \ -+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) -+ -+#define SRCU_NOTIFIER_HEAD(name) \ -+ _SRCU_NOTIFIER_HEAD(name, ) -+ -+#define SRCU_NOTIFIER_HEAD_STATIC(name) \ -+ _SRCU_NOTIFIER_HEAD(name, static) -+ - #ifdef __KERNEL__ - - extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, -@@ -182,12 +198,12 @@ static inline int notifier_to_errno(int - - /* - * Declared notifiers so far. I can imagine quite a few more chains -- * over time (eg laptop power reset chains, reboot chain (to clean -+ * over time (eg laptop power reset chains, reboot chain (to clean - * device units up), device [un]mount chain, module load/unload chain, -- * low memory chain, screenblank chain (for plug in modular screenblankers) -+ * low memory chain, screenblank chain (for plug in modular screenblankers) - * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... - */ -- -+ - /* CPU notfiers are defined in include/linux/cpu.h. */ - - /* netdevice notifiers are defined in include/linux/netdevice.h */ ---- a/include/linux/srcu.h -+++ b/include/linux/srcu.h -@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct - - void process_srcu(struct work_struct *work); - --#define __SRCU_STRUCT_INIT(name) \ -+#define __SRCU_STRUCT_INIT(name, pcpu_name) \ - { \ - .completed = -300, \ -- .per_cpu_ref = &name##_srcu_array, \ -+ .per_cpu_ref = &pcpu_name, \ - .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ - .running = false, \ - .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ -@@ -104,7 +104,7 @@ void process_srcu(struct work_struct *wo - */ - #define __DEFINE_SRCU(name, is_static) \ - static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ -- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) -+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array) - #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) - #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) - diff --git a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch deleted file mode 100644 index a95325d10..000000000 --- a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch +++ /dev/null @@ -1,86 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 7 Jun 2013 22:37:06 +0200 -Subject: kernel/cpu: fix cpu down problem if kthread's cpu is going down -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -If kthread is pinned to CPUx and CPUx is going down then we get into -trouble: -- first the unplug thread is created -- it will set itself to hp->unplug. As a result, every task that is - going to take a lock, has to leave the CPU. -- the CPU_DOWN_PREPARE notifier are started. The worker thread will - start a new process for the "high priority worker". - Now kthread would like to take a lock but since it can't leave the CPU - it will never complete its task. - -We could fire the unplug thread after the notifier but then the cpu is -no longer marked "online" and the unplug thread will run on CPU0 which -was fixed before :) - -So instead the unplug thread is started and kept waiting until the -notfier complete their work. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/cpu.c | 15 +++++++++++++-- - 1 file changed, 13 insertions(+), 2 deletions(-) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -109,6 +109,7 @@ struct hotplug_pcp { - int refcount; - int grab_lock; - struct completion synced; -+ struct completion unplug_wait; - #ifdef CONFIG_PREEMPT_RT_FULL - /* - * Note, on PREEMPT_RT, the hotplug lock must save the state of -@@ -212,6 +213,7 @@ static int sync_unplug_thread(void *data - { - struct hotplug_pcp *hp = data; - -+ wait_for_completion(&hp->unplug_wait); - preempt_disable(); - hp->unplug = current; - wait_for_pinned_cpus(hp); -@@ -277,6 +279,14 @@ static void __cpu_unplug_sync(struct hot - wait_for_completion(&hp->synced); - } - -+static void __cpu_unplug_wait(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ -+ complete(&hp->unplug_wait); -+ wait_for_completion(&hp->synced); -+} -+ - /* - * Start the sync_unplug_thread on the target cpu and wait for it to - * complete. -@@ -300,6 +310,7 @@ static int cpu_unplug_begin(unsigned int - tell_sched_cpu_down_begin(cpu); - - init_completion(&hp->synced); -+ init_completion(&hp->unplug_wait); - - hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); - if (IS_ERR(hp->sync_tsk)) { -@@ -315,8 +326,7 @@ static int cpu_unplug_begin(unsigned int - * wait for tasks that are going to enter these sections and - * we must not have them block. - */ -- __cpu_unplug_sync(hp); -- -+ wake_up_process(hp->sync_tsk); - return 0; - } - -@@ -671,6 +681,7 @@ static int _cpu_down(unsigned int cpu, i - else - synchronize_rcu(); - -+ __cpu_unplug_wait(cpu); - smpboot_park_threads(cpu); - - /* Notifiers are done. Don't let any more tasks pin this CPU. */ diff --git a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch deleted file mode 100644 index 3458fae52..000000000 --- a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch +++ /dev/null @@ -1,59 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 14 Jun 2013 17:16:35 +0200 -Subject: kernel/hotplug: restore original cpu mask oncpu/down -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -If a task which is allowed to run only on CPU X puts CPU Y down then it -will be allowed on all CPUs but the on CPU Y after it comes back from -kernel. This patch ensures that we don't lose the initial setting unless -the CPU the task is running is going down. - - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/cpu.c | 13 ++++++++++++- - 1 file changed, 12 insertions(+), 1 deletion(-) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -629,6 +629,7 @@ static int _cpu_down(unsigned int cpu, i - .hcpu = hcpu, - }; - cpumask_var_t cpumask; -+ cpumask_var_t cpumask_org; - - if (num_online_cpus() == 1) - return -EBUSY; -@@ -639,6 +640,12 @@ static int _cpu_down(unsigned int cpu, i - /* Move the downtaker off the unplug cpu */ - if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) - return -ENOMEM; -+ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { -+ free_cpumask_var(cpumask); -+ return -ENOMEM; -+ } -+ -+ cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); - cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); - set_cpus_allowed_ptr(current, cpumask); - free_cpumask_var(cpumask); -@@ -647,7 +654,8 @@ static int _cpu_down(unsigned int cpu, i - if (mycpu == cpu) { - printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); - migrate_enable(); -- return -EBUSY; -+ err = -EBUSY; -+ goto restore_cpus; - } - - cpu_hotplug_begin(); -@@ -737,6 +745,9 @@ static int _cpu_down(unsigned int cpu, i - cpu_hotplug_done(); - if (!err) - cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); -+restore_cpus: -+ set_cpus_allowed_ptr(current, cpumask_org); -+ free_cpumask_var(cpumask_org); - return err; - } - diff --git a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch deleted file mode 100644 index afa8e803d..000000000 --- a/debian/patches/features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 4 Feb 2016 16:38:10 +0100 -Subject: [PATCH] kernel/perf: mark perf_cpu_context's timer as irqsafe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Otherwise we get a WARN_ON() backtrace and some events are reported as -"not counted". - -Cc: stable-rt@vger.kernel.org -Reported-by: Yang Shi -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/events/core.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/kernel/events/core.c b/kernel/events/core.c ---- a/kernel/events/core.c -+++ b/kernel/events/core.c -@@ -802,6 +802,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) - raw_spin_lock_init(&cpuctx->hrtimer_lock); - hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); - timer->function = perf_mux_hrtimer_handler; -+ timer->irqsafe = 1; - } - - static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) diff --git a/debian/patches/features/all/rt/kgb-serial-hackaround.patch b/debian/patches/features/all/rt/kgb-serial-hackaround.patch deleted file mode 100644 index 7c6ae67b6..000000000 --- a/debian/patches/features/all/rt/kgb-serial-hackaround.patch +++ /dev/null @@ -1,102 +0,0 @@ -From: Jason Wessel -Date: Thu, 28 Jul 2011 12:42:23 -0500 -Subject: kgdb/serial: Short term workaround -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On 07/27/2011 04:37 PM, Thomas Gleixner wrote: -> - KGDB (not yet disabled) is reportedly unusable on -rt right now due -> to missing hacks in the console locking which I dropped on purpose. -> - -To work around this in the short term you can use this patch, in -addition to the clocksource watchdog patch that Thomas brewed up. - -Comments are welcome of course. Ultimately the right solution is to -change separation between the console and the HW to have a polled mode -+ work queue so as not to introduce any kind of latency. - -Thanks, -Jason. - ---- - drivers/tty/serial/8250/8250_port.c | 3 ++- - include/linux/kdb.h | 2 ++ - kernel/debug/kdb/kdb_io.c | 6 ++---- - 3 files changed, 6 insertions(+), 5 deletions(-) - ---- a/drivers/tty/serial/8250/8250_port.c -+++ b/drivers/tty/serial/8250/8250_port.c -@@ -35,6 +35,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -2851,7 +2852,7 @@ void serial8250_console_write(struct uar - - if (port->sysrq) - locked = 0; -- else if (oops_in_progress) -+ else if (oops_in_progress || in_kdb_printk()) - locked = spin_trylock_irqsave(&port->lock, flags); - else - spin_lock_irqsave(&port->lock, flags); ---- a/include/linux/kdb.h -+++ b/include/linux/kdb.h -@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(en - extern __printf(1, 2) int kdb_printf(const char *, ...); - typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); - -+#define in_kdb_printk() (kdb_trap_printk) - extern void kdb_init(int level); - - /* Access to kdb specific polling devices */ -@@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kd - extern int kdb_unregister(char *); - #else /* ! CONFIG_KGDB_KDB */ - static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } -+#define in_kdb_printk() (0) - static inline void kdb_init(int level) {} - static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen) { return 0; } ---- a/kernel/debug/kdb/kdb_io.c -+++ b/kernel/debug/kdb/kdb_io.c -@@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, con - int linecount; - int colcount; - int logging, saved_loglevel = 0; -- int saved_trap_printk; - int got_printf_lock = 0; - int retlen = 0; - int fnd, len; -@@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, con - unsigned long uninitialized_var(flags); - - preempt_disable(); -- saved_trap_printk = kdb_trap_printk; -- kdb_trap_printk = 0; - - /* Serialize kdb_printf if multiple cpus try to write at once. - * But if any cpu goes recursive in kdb, just print the output, -@@ -855,7 +852,6 @@ int vkdb_printf(enum kdb_msgsrc src, con - } else { - __release(kdb_printf_lock); - } -- kdb_trap_printk = saved_trap_printk; - preempt_enable(); - return retlen; - } -@@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...) - va_list ap; - int r; - -+ kdb_trap_printk++; - va_start(ap, fmt); - r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); - va_end(ap); -+ kdb_trap_printk--; - - return r; - } diff --git a/debian/patches/features/all/rt/latency-hist.patch b/debian/patches/features/all/rt/latency-hist.patch deleted file mode 100644 index 6371f8a6d..000000000 --- a/debian/patches/features/all/rt/latency-hist.patch +++ /dev/null @@ -1,1817 +0,0 @@ -Subject: tracing: Add latency histograms -From: Carsten Emde -Date: Tue, 19 Jul 2011 14:03:41 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -This patch provides a recording mechanism to store data of potential -sources of system latencies. The recordings separately determine the -latency caused by a delayed timer expiration, by a delayed wakeup of the -related user space program and by the sum of both. The histograms can be -enabled and reset individually. The data are accessible via the debug -filesystem. For details please consult Documentation/trace/histograms.txt. - -Signed-off-by: Carsten Emde -Signed-off-by: Thomas Gleixner - ---- - Documentation/trace/histograms.txt | 186 +++++ - include/linux/hrtimer.h | 4 - include/linux/sched.h | 6 - include/trace/events/hist.h | 72 ++ - include/trace/events/latency_hist.h | 29 - kernel/time/hrtimer.c | 21 - kernel/trace/Kconfig | 104 +++ - kernel/trace/Makefile | 4 - kernel/trace/latency_hist.c | 1178 ++++++++++++++++++++++++++++++++++++ - kernel/trace/trace_irqsoff.c | 11 - 10 files changed, 1615 insertions(+) - ---- /dev/null -+++ b/Documentation/trace/histograms.txt -@@ -0,0 +1,186 @@ -+ Using the Linux Kernel Latency Histograms -+ -+ -+This document gives a short explanation how to enable, configure and use -+latency histograms. Latency histograms are primarily relevant in the -+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) -+and are used in the quality management of the Linux real-time -+capabilities. -+ -+ -+* Purpose of latency histograms -+ -+A latency histogram continuously accumulates the frequencies of latency -+data. There are two types of histograms -+- potential sources of latencies -+- effective latencies -+ -+ -+* Potential sources of latencies -+ -+Potential sources of latencies are code segments where interrupts, -+preemption or both are disabled (aka critical sections). To create -+histograms of potential sources of latency, the kernel stores the time -+stamp at the start of a critical section, determines the time elapsed -+when the end of the section is reached, and increments the frequency -+counter of that latency value - irrespective of whether any concurrently -+running process is affected by latency or not. -+- Configuration items (in the Kernel hacking/Tracers submenu) -+ CONFIG_INTERRUPT_OFF_LATENCY -+ CONFIG_PREEMPT_OFF_LATENCY -+ -+ -+* Effective latencies -+ -+Effective latencies are actually occuring during wakeup of a process. To -+determine effective latencies, the kernel stores the time stamp when a -+process is scheduled to be woken up, and determines the duration of the -+wakeup time shortly before control is passed over to this process. Note -+that the apparent latency in user space may be somewhat longer, since the -+process may be interrupted after control is passed over to it but before -+the execution in user space takes place. Simply measuring the interval -+between enqueuing and wakeup may also not appropriate in cases when a -+process is scheduled as a result of a timer expiration. The timer may have -+missed its deadline, e.g. due to disabled interrupts, but this latency -+would not be registered. Therefore, the offsets of missed timers are -+recorded in a separate histogram. If both wakeup latency and missed timer -+offsets are configured and enabled, a third histogram may be enabled that -+records the overall latency as a sum of the timer latency, if any, and the -+wakeup latency. This histogram is called "timerandwakeup". -+- Configuration items (in the Kernel hacking/Tracers submenu) -+ CONFIG_WAKEUP_LATENCY -+ CONFIG_MISSED_TIMER_OFSETS -+ -+ -+* Usage -+ -+The interface to the administration of the latency histograms is located -+in the debugfs file system. To mount it, either enter -+ -+mount -t sysfs nodev /sys -+mount -t debugfs nodev /sys/kernel/debug -+ -+from shell command line level, or add -+ -+nodev /sys sysfs defaults 0 0 -+nodev /sys/kernel/debug debugfs defaults 0 0 -+ -+to the file /etc/fstab. All latency histogram related files are then -+available in the directory /sys/kernel/debug/tracing/latency_hist. A -+particular histogram type is enabled by writing non-zero to the related -+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. -+Select "preemptirqsoff" for the histograms of potential sources of -+latencies and "wakeup" for histograms of effective latencies etc. The -+histogram data - one per CPU - are available in the files -+ -+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx -+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx -+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx -+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx -+ -+The histograms are reset by writing non-zero to the file "reset" in a -+particular latency directory. To reset all latency data, use -+ -+#!/bin/sh -+ -+TRACINGDIR=/sys/kernel/debug/tracing -+HISTDIR=$TRACINGDIR/latency_hist -+ -+if test -d $HISTDIR -+then -+ cd $HISTDIR -+ for i in `find . | grep /reset$` -+ do -+ echo 1 >$i -+ done -+fi -+ -+ -+* Data format -+ -+Latency data are stored with a resolution of one microsecond. The -+maximum latency is 10,240 microseconds. The data are only valid, if the -+overflow register is empty. Every output line contains the latency in -+microseconds in the first row and the number of samples in the second -+row. To display only lines with a positive latency count, use, for -+example, -+ -+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 -+ -+#Minimum latency: 0 microseconds. -+#Average latency: 0 microseconds. -+#Maximum latency: 25 microseconds. -+#Total samples: 3104770694 -+#There are 0 samples greater or equal than 10240 microseconds -+#usecs samples -+ 0 2984486876 -+ 1 49843506 -+ 2 58219047 -+ 3 5348126 -+ 4 2187960 -+ 5 3388262 -+ 6 959289 -+ 7 208294 -+ 8 40420 -+ 9 4485 -+ 10 14918 -+ 11 18340 -+ 12 25052 -+ 13 19455 -+ 14 5602 -+ 15 969 -+ 16 47 -+ 17 18 -+ 18 14 -+ 19 1 -+ 20 3 -+ 21 2 -+ 22 5 -+ 23 2 -+ 25 1 -+ -+ -+* Wakeup latency of a selected process -+ -+To only collect wakeup latency data of a particular process, write the -+PID of the requested process to -+ -+/sys/kernel/debug/tracing/latency_hist/wakeup/pid -+ -+PIDs are not considered, if this variable is set to 0. -+ -+ -+* Details of the process with the highest wakeup latency so far -+ -+Selected data of the process that suffered from the highest wakeup -+latency that occurred in a particular CPU are available in the file -+ -+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. -+ -+In addition, other relevant system data at the time when the -+latency occurred are given. -+ -+The format of the data is (all in one line): -+ () \ -+<- -+ -+The value of is only relevant in the combined timer -+and wakeup latency recording. In the wakeup recording, it is -+always 0, in the missed_timer_offsets recording, it is the same -+as . -+ -+When retrospectively searching for the origin of a latency and -+tracing was not enabled, it may be helpful to know the name and -+some basic data of the task that (finally) was switching to the -+late real-tlme task. In addition to the victim's data, also the -+data of the possible culprit are therefore displayed after the -+"<-" symbol. -+ -+Finally, the timestamp of the time when the latency occurred -+in . after the most recent system boot -+is provided. -+ -+These data are also reset when the wakeup histogram is reset. ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -87,6 +87,7 @@ enum hrtimer_restart { - * @function: timer expiry callback function - * @base: pointer to the timer base (per cpu and per clock) - * @state: state information (See bit values above) -+ * @praecox: timer expiry time if expired at the time of programming - * @start_pid: timer statistics field to store the pid of the task which - * started the timer - * @start_site: timer statistics field to store the site where the timer -@@ -102,6 +103,9 @@ struct hrtimer { - enum hrtimer_restart (*function)(struct hrtimer *); - struct hrtimer_clock_base *base; - unsigned long state; -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ ktime_t praecox; -+#endif - #ifdef CONFIG_TIMER_STATS - int start_pid; - void *start_site; ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1796,6 +1796,12 @@ struct task_struct { - unsigned long trace; - /* bitmask and counter of trace recursion */ - unsigned long trace_recursion; -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ u64 preempt_timestamp_hist; -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ long timer_offset; -+#endif -+#endif - #endif /* CONFIG_TRACING */ - #ifdef CONFIG_MEMCG - struct mem_cgroup *memcg_in_oom; ---- /dev/null -+++ b/include/trace/events/hist.h -@@ -0,0 +1,72 @@ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM hist -+ -+#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) -+#define _TRACE_HIST_H -+ -+#include "latency_hist.h" -+#include -+ -+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) -+#define trace_preemptirqsoff_hist(a, b) -+#else -+TRACE_EVENT(preemptirqsoff_hist, -+ -+ TP_PROTO(int reason, int starthist), -+ -+ TP_ARGS(reason, starthist), -+ -+ TP_STRUCT__entry( -+ __field(int, reason) -+ __field(int, starthist) -+ ), -+ -+ TP_fast_assign( -+ __entry->reason = reason; -+ __entry->starthist = starthist; -+ ), -+ -+ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), -+ __entry->starthist ? "start" : "stop") -+); -+#endif -+ -+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST -+#define trace_hrtimer_interrupt(a, b, c, d) -+#else -+TRACE_EVENT(hrtimer_interrupt, -+ -+ TP_PROTO(int cpu, long long offset, struct task_struct *curr, -+ struct task_struct *task), -+ -+ TP_ARGS(cpu, offset, curr, task), -+ -+ TP_STRUCT__entry( -+ __field(int, cpu) -+ __field(long long, offset) -+ __array(char, ccomm, TASK_COMM_LEN) -+ __field(int, cprio) -+ __array(char, tcomm, TASK_COMM_LEN) -+ __field(int, tprio) -+ ), -+ -+ TP_fast_assign( -+ __entry->cpu = cpu; -+ __entry->offset = offset; -+ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); -+ __entry->cprio = curr->prio; -+ memcpy(__entry->tcomm, task != NULL ? task->comm : "", -+ task != NULL ? TASK_COMM_LEN : 7); -+ __entry->tprio = task != NULL ? task->prio : -1; -+ ), -+ -+ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", -+ __entry->cpu, __entry->offset, __entry->ccomm, -+ __entry->cprio, __entry->tcomm, __entry->tprio) -+); -+#endif -+ -+#endif /* _TRACE_HIST_H */ -+ -+/* This part must be outside protection */ -+#include ---- /dev/null -+++ b/include/trace/events/latency_hist.h -@@ -0,0 +1,29 @@ -+#ifndef _LATENCY_HIST_H -+#define _LATENCY_HIST_H -+ -+enum hist_action { -+ IRQS_ON, -+ PREEMPT_ON, -+ TRACE_STOP, -+ IRQS_OFF, -+ PREEMPT_OFF, -+ TRACE_START, -+}; -+ -+static char *actions[] = { -+ "IRQS_ON", -+ "PREEMPT_ON", -+ "TRACE_STOP", -+ "IRQS_OFF", -+ "PREEMPT_OFF", -+ "TRACE_START", -+}; -+ -+static inline char *getaction(int action) -+{ -+ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) -+ return actions[action]; -+ return "unknown"; -+} -+ -+#endif /* _LATENCY_HIST_H */ ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -53,6 +53,7 @@ - #include - - #include -+#include - - #include "tick-internal.h" - -@@ -994,7 +995,16 @@ void hrtimer_start_range_ns(struct hrtim - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); - - timer_stats_hrtimer_set_start_info(timer); -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ { -+ ktime_t now = new_base->get_time(); - -+ if (ktime_to_ns(tim) < ktime_to_ns(now)) -+ timer->praecox = now; -+ else -+ timer->praecox = ktime_set(0, 0); -+ } -+#endif - leftmost = enqueue_hrtimer(timer, new_base); - if (!leftmost) - goto unlock; -@@ -1256,6 +1266,8 @@ static void __run_hrtimer(struct hrtimer - cpu_base->running = NULL; - } - -+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); -+ - static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) - { - struct hrtimer_clock_base *base = cpu_base->clock_base; -@@ -1275,6 +1287,15 @@ static void __hrtimer_run_queues(struct - - timer = container_of(node, struct hrtimer, node); - -+ trace_hrtimer_interrupt(raw_smp_processor_id(), -+ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? -+ timer->praecox : hrtimer_get_expires(timer), -+ basenow)), -+ current, -+ timer->function == hrtimer_wakeup ? -+ container_of(timer, struct hrtimer_sleeper, -+ timer)->task : NULL); -+ - /* - * The immediate goal for using the softexpires is - * minimizing wakeups, not running timers at the ---- a/kernel/trace/Kconfig -+++ b/kernel/trace/Kconfig -@@ -187,6 +187,24 @@ config IRQSOFF_TRACER - enabled. This option and the preempt-off timing option can be - used together or separately.) - -+config INTERRUPT_OFF_HIST -+ bool "Interrupts-off Latency Histogram" -+ depends on IRQSOFF_TRACER -+ help -+ This option generates continuously updated histograms (one per cpu) -+ of the duration of time periods with interrupts disabled. The -+ histograms are disabled by default. To enable them, write a non-zero -+ number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff -+ -+ If PREEMPT_OFF_HIST is also selected, additional histograms (one -+ per cpu) are generated that accumulate the duration of time periods -+ when both interrupts and preemption are disabled. The histogram data -+ will be located in the debug file system at -+ -+ /sys/kernel/debug/tracing/latency_hist/irqsoff -+ - config PREEMPT_TRACER - bool "Preemption-off Latency Tracer" - default n -@@ -211,6 +229,24 @@ config PREEMPT_TRACER - enabled. This option and the irqs-off timing option can be - used together or separately.) - -+config PREEMPT_OFF_HIST -+ bool "Preemption-off Latency Histogram" -+ depends on PREEMPT_TRACER -+ help -+ This option generates continuously updated histograms (one per cpu) -+ of the duration of time periods with preemption disabled. The -+ histograms are disabled by default. To enable them, write a non-zero -+ number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff -+ -+ If INTERRUPT_OFF_HIST is also selected, additional histograms (one -+ per cpu) are generated that accumulate the duration of time periods -+ when both interrupts and preemption are disabled. The histogram data -+ will be located in the debug file system at -+ -+ /sys/kernel/debug/tracing/latency_hist/preemptoff -+ - config SCHED_TRACER - bool "Scheduling Latency Tracer" - select GENERIC_TRACER -@@ -221,6 +257,74 @@ config SCHED_TRACER - This tracer tracks the latency of the highest priority task - to be scheduled in, starting from the point it has woken up. - -+config WAKEUP_LATENCY_HIST -+ bool "Scheduling Latency Histogram" -+ depends on SCHED_TRACER -+ help -+ This option generates continuously updated histograms (one per cpu) -+ of the scheduling latency of the highest priority task. -+ The histograms are disabled by default. To enable them, write a -+ non-zero number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/wakeup -+ -+ Two different algorithms are used, one to determine the latency of -+ processes that exclusively use the highest priority of the system and -+ another one to determine the latency of processes that share the -+ highest system priority with other processes. The former is used to -+ improve hardware and system software, the latter to optimize the -+ priority design of a given system. The histogram data will be -+ located in the debug file system at -+ -+ /sys/kernel/debug/tracing/latency_hist/wakeup -+ -+ and -+ -+ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio -+ -+ If both Scheduling Latency Histogram and Missed Timer Offsets -+ Histogram are selected, additional histogram data will be collected -+ that contain, in addition to the wakeup latency, the timer latency, in -+ case the wakeup was triggered by an expired timer. These histograms -+ are available in the -+ -+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup -+ -+ directory. They reflect the apparent interrupt and scheduling latency -+ and are best suitable to determine the worst-case latency of a given -+ system. To enable these histograms, write a non-zero number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup -+ -+config MISSED_TIMER_OFFSETS_HIST -+ depends on HIGH_RES_TIMERS -+ select GENERIC_TRACER -+ bool "Missed Timer Offsets Histogram" -+ help -+ Generate a histogram of missed timer offsets in microseconds. The -+ histograms are disabled by default. To enable them, write a non-zero -+ number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets -+ -+ The histogram data will be located in the debug file system at -+ -+ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets -+ -+ If both Scheduling Latency Histogram and Missed Timer Offsets -+ Histogram are selected, additional histogram data will be collected -+ that contain, in addition to the wakeup latency, the timer latency, in -+ case the wakeup was triggered by an expired timer. These histograms -+ are available in the -+ -+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup -+ -+ directory. They reflect the apparent interrupt and scheduling latency -+ and are best suitable to determine the worst-case latency of a given -+ system. To enable these histograms, write a non-zero number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup -+ - config ENABLE_DEFAULT_TRACERS - bool "Trace process context switches and events" - depends on !GENERIC_TRACER ---- a/kernel/trace/Makefile -+++ b/kernel/trace/Makefile -@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f - obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o - obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o - obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o -+obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o -+obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o -+obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o -+obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o - obj-$(CONFIG_NOP_TRACER) += trace_nop.o - obj-$(CONFIG_STACK_TRACER) += trace_stack.o - obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o ---- /dev/null -+++ b/kernel/trace/latency_hist.c -@@ -0,0 +1,1178 @@ -+/* -+ * kernel/trace/latency_hist.c -+ * -+ * Add support for histograms of preemption-off latency and -+ * interrupt-off latency and wakeup latency, it depends on -+ * Real-Time Preemption Support. -+ * -+ * Copyright (C) 2005 MontaVista Software, Inc. -+ * Yi Yang -+ * -+ * Converted to work with the new latency tracer. -+ * Copyright (C) 2008 Red Hat, Inc. -+ * Steven Rostedt -+ * -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "trace.h" -+#include -+ -+#define NSECS_PER_USECS 1000L -+ -+#define CREATE_TRACE_POINTS -+#include -+ -+enum { -+ IRQSOFF_LATENCY = 0, -+ PREEMPTOFF_LATENCY, -+ PREEMPTIRQSOFF_LATENCY, -+ WAKEUP_LATENCY, -+ WAKEUP_LATENCY_SHAREDPRIO, -+ MISSED_TIMER_OFFSETS, -+ TIMERANDWAKEUP_LATENCY, -+ MAX_LATENCY_TYPE, -+}; -+ -+#define MAX_ENTRY_NUM 10240 -+ -+struct hist_data { -+ atomic_t hist_mode; /* 0 log, 1 don't log */ -+ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ -+ long min_lat; -+ long max_lat; -+ unsigned long long below_hist_bound_samples; -+ unsigned long long above_hist_bound_samples; -+ long long accumulate_lat; -+ unsigned long long total_samples; -+ unsigned long long hist_array[MAX_ENTRY_NUM]; -+}; -+ -+struct enable_data { -+ int latency_type; -+ int enabled; -+}; -+ -+static char *latency_hist_dir_root = "latency_hist"; -+ -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); -+static char *irqsoff_hist_dir = "irqsoff"; -+static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); -+static DEFINE_PER_CPU(int, hist_irqsoff_counting); -+#endif -+ -+#ifdef CONFIG_PREEMPT_OFF_HIST -+static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); -+static char *preemptoff_hist_dir = "preemptoff"; -+static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); -+static DEFINE_PER_CPU(int, hist_preemptoff_counting); -+#endif -+ -+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) -+static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); -+static char *preemptirqsoff_hist_dir = "preemptirqsoff"; -+static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); -+static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); -+#endif -+ -+#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) -+static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); -+static struct enable_data preemptirqsoff_enabled_data = { -+ .latency_type = PREEMPTIRQSOFF_LATENCY, -+ .enabled = 0, -+}; -+#endif -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+struct maxlatproc_data { -+ char comm[FIELD_SIZEOF(struct task_struct, comm)]; -+ char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; -+ int pid; -+ int current_pid; -+ int prio; -+ int current_prio; -+ long latency; -+ long timeroffset; -+ cycle_t timestamp; -+}; -+#endif -+ -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); -+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); -+static char *wakeup_latency_hist_dir = "wakeup"; -+static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; -+static notrace void probe_wakeup_latency_hist_start(void *v, -+ struct task_struct *p, int success); -+static notrace void probe_wakeup_latency_hist_stop(void *v, -+ struct task_struct *prev, struct task_struct *next); -+static notrace void probe_sched_migrate_task(void *, -+ struct task_struct *task, int cpu); -+static struct enable_data wakeup_latency_enabled_data = { -+ .latency_type = WAKEUP_LATENCY, -+ .enabled = 0, -+}; -+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); -+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); -+static DEFINE_PER_CPU(struct task_struct *, wakeup_task); -+static DEFINE_PER_CPU(int, wakeup_sharedprio); -+static unsigned long wakeup_pid; -+#endif -+ -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); -+static char *missed_timer_offsets_dir = "missed_timer_offsets"; -+static notrace void probe_hrtimer_interrupt(void *v, int cpu, -+ long long offset, struct task_struct *curr, struct task_struct *task); -+static struct enable_data missed_timer_offsets_enabled_data = { -+ .latency_type = MISSED_TIMER_OFFSETS, -+ .enabled = 0, -+}; -+static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); -+static unsigned long missed_timer_offsets_pid; -+#endif -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); -+static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; -+static struct enable_data timerandwakeup_enabled_data = { -+ .latency_type = TIMERANDWAKEUP_LATENCY, -+ .enabled = 0, -+}; -+static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); -+#endif -+ -+void notrace latency_hist(int latency_type, int cpu, long latency, -+ long timeroffset, cycle_t stop, -+ struct task_struct *p) -+{ -+ struct hist_data *my_hist; -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ struct maxlatproc_data *mp = NULL; -+#endif -+ -+ if (!cpu_possible(cpu) || latency_type < 0 || -+ latency_type >= MAX_LATENCY_TYPE) -+ return; -+ -+ switch (latency_type) { -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ case IRQSOFF_LATENCY: -+ my_hist = &per_cpu(irqsoff_hist, cpu); -+ break; -+#endif -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ case PREEMPTOFF_LATENCY: -+ my_hist = &per_cpu(preemptoff_hist, cpu); -+ break; -+#endif -+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) -+ case PREEMPTIRQSOFF_LATENCY: -+ my_hist = &per_cpu(preemptirqsoff_hist, cpu); -+ break; -+#endif -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ case WAKEUP_LATENCY: -+ my_hist = &per_cpu(wakeup_latency_hist, cpu); -+ mp = &per_cpu(wakeup_maxlatproc, cpu); -+ break; -+ case WAKEUP_LATENCY_SHAREDPRIO: -+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); -+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); -+ break; -+#endif -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ case MISSED_TIMER_OFFSETS: -+ my_hist = &per_cpu(missed_timer_offsets, cpu); -+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); -+ break; -+#endif -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ case TIMERANDWAKEUP_LATENCY: -+ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); -+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); -+ break; -+#endif -+ -+ default: -+ return; -+ } -+ -+ latency += my_hist->offset; -+ -+ if (atomic_read(&my_hist->hist_mode) == 0) -+ return; -+ -+ if (latency < 0 || latency >= MAX_ENTRY_NUM) { -+ if (latency < 0) -+ my_hist->below_hist_bound_samples++; -+ else -+ my_hist->above_hist_bound_samples++; -+ } else -+ my_hist->hist_array[latency]++; -+ -+ if (unlikely(latency > my_hist->max_lat || -+ my_hist->min_lat == LONG_MAX)) { -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ if (latency_type == WAKEUP_LATENCY || -+ latency_type == WAKEUP_LATENCY_SHAREDPRIO || -+ latency_type == MISSED_TIMER_OFFSETS || -+ latency_type == TIMERANDWAKEUP_LATENCY) { -+ strncpy(mp->comm, p->comm, sizeof(mp->comm)); -+ strncpy(mp->current_comm, current->comm, -+ sizeof(mp->current_comm)); -+ mp->pid = task_pid_nr(p); -+ mp->current_pid = task_pid_nr(current); -+ mp->prio = p->prio; -+ mp->current_prio = current->prio; -+ mp->latency = latency; -+ mp->timeroffset = timeroffset; -+ mp->timestamp = stop; -+ } -+#endif -+ my_hist->max_lat = latency; -+ } -+ if (unlikely(latency < my_hist->min_lat)) -+ my_hist->min_lat = latency; -+ my_hist->total_samples++; -+ my_hist->accumulate_lat += latency; -+} -+ -+static void *l_start(struct seq_file *m, loff_t *pos) -+{ -+ loff_t *index_ptr = NULL; -+ loff_t index = *pos; -+ struct hist_data *my_hist = m->private; -+ -+ if (index == 0) { -+ char minstr[32], avgstr[32], maxstr[32]; -+ -+ atomic_dec(&my_hist->hist_mode); -+ -+ if (likely(my_hist->total_samples)) { -+ long avg = (long) div64_s64(my_hist->accumulate_lat, -+ my_hist->total_samples); -+ snprintf(minstr, sizeof(minstr), "%ld", -+ my_hist->min_lat - my_hist->offset); -+ snprintf(avgstr, sizeof(avgstr), "%ld", -+ avg - my_hist->offset); -+ snprintf(maxstr, sizeof(maxstr), "%ld", -+ my_hist->max_lat - my_hist->offset); -+ } else { -+ strcpy(minstr, ""); -+ strcpy(avgstr, minstr); -+ strcpy(maxstr, minstr); -+ } -+ -+ seq_printf(m, "#Minimum latency: %s microseconds\n" -+ "#Average latency: %s microseconds\n" -+ "#Maximum latency: %s microseconds\n" -+ "#Total samples: %llu\n" -+ "#There are %llu samples lower than %ld" -+ " microseconds.\n" -+ "#There are %llu samples greater or equal" -+ " than %ld microseconds.\n" -+ "#usecs\t%16s\n", -+ minstr, avgstr, maxstr, -+ my_hist->total_samples, -+ my_hist->below_hist_bound_samples, -+ -my_hist->offset, -+ my_hist->above_hist_bound_samples, -+ MAX_ENTRY_NUM - my_hist->offset, -+ "samples"); -+ } -+ if (index < MAX_ENTRY_NUM) { -+ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); -+ if (index_ptr) -+ *index_ptr = index; -+ } -+ -+ return index_ptr; -+} -+ -+static void *l_next(struct seq_file *m, void *p, loff_t *pos) -+{ -+ loff_t *index_ptr = p; -+ struct hist_data *my_hist = m->private; -+ -+ if (++*pos >= MAX_ENTRY_NUM) { -+ atomic_inc(&my_hist->hist_mode); -+ return NULL; -+ } -+ *index_ptr = *pos; -+ return index_ptr; -+} -+ -+static void l_stop(struct seq_file *m, void *p) -+{ -+ kfree(p); -+} -+ -+static int l_show(struct seq_file *m, void *p) -+{ -+ int index = *(loff_t *) p; -+ struct hist_data *my_hist = m->private; -+ -+ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, -+ my_hist->hist_array[index]); -+ return 0; -+} -+ -+static const struct seq_operations latency_hist_seq_op = { -+ .start = l_start, -+ .next = l_next, -+ .stop = l_stop, -+ .show = l_show -+}; -+ -+static int latency_hist_open(struct inode *inode, struct file *file) -+{ -+ int ret; -+ -+ ret = seq_open(file, &latency_hist_seq_op); -+ if (!ret) { -+ struct seq_file *seq = file->private_data; -+ seq->private = inode->i_private; -+ } -+ return ret; -+} -+ -+static const struct file_operations latency_hist_fops = { -+ .open = latency_hist_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = seq_release, -+}; -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static void clear_maxlatprocdata(struct maxlatproc_data *mp) -+{ -+ mp->comm[0] = mp->current_comm[0] = '\0'; -+ mp->prio = mp->current_prio = mp->pid = mp->current_pid = -+ mp->latency = mp->timeroffset = -1; -+ mp->timestamp = 0; -+} -+#endif -+ -+static void hist_reset(struct hist_data *hist) -+{ -+ atomic_dec(&hist->hist_mode); -+ -+ memset(hist->hist_array, 0, sizeof(hist->hist_array)); -+ hist->below_hist_bound_samples = 0ULL; -+ hist->above_hist_bound_samples = 0ULL; -+ hist->min_lat = LONG_MAX; -+ hist->max_lat = LONG_MIN; -+ hist->total_samples = 0ULL; -+ hist->accumulate_lat = 0LL; -+ -+ atomic_inc(&hist->hist_mode); -+} -+ -+static ssize_t -+latency_hist_reset(struct file *file, const char __user *a, -+ size_t size, loff_t *off) -+{ -+ int cpu; -+ struct hist_data *hist = NULL; -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ struct maxlatproc_data *mp = NULL; -+#endif -+ off_t latency_type = (off_t) file->private_data; -+ -+ for_each_online_cpu(cpu) { -+ -+ switch (latency_type) { -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ case PREEMPTOFF_LATENCY: -+ hist = &per_cpu(preemptoff_hist, cpu); -+ break; -+#endif -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ case IRQSOFF_LATENCY: -+ hist = &per_cpu(irqsoff_hist, cpu); -+ break; -+#endif -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ case PREEMPTIRQSOFF_LATENCY: -+ hist = &per_cpu(preemptirqsoff_hist, cpu); -+ break; -+#endif -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ case WAKEUP_LATENCY: -+ hist = &per_cpu(wakeup_latency_hist, cpu); -+ mp = &per_cpu(wakeup_maxlatproc, cpu); -+ break; -+ case WAKEUP_LATENCY_SHAREDPRIO: -+ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); -+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); -+ break; -+#endif -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ case MISSED_TIMER_OFFSETS: -+ hist = &per_cpu(missed_timer_offsets, cpu); -+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); -+ break; -+#endif -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ case TIMERANDWAKEUP_LATENCY: -+ hist = &per_cpu(timerandwakeup_latency_hist, cpu); -+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); -+ break; -+#endif -+ } -+ -+ hist_reset(hist); -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ if (latency_type == WAKEUP_LATENCY || -+ latency_type == WAKEUP_LATENCY_SHAREDPRIO || -+ latency_type == MISSED_TIMER_OFFSETS || -+ latency_type == TIMERANDWAKEUP_LATENCY) -+ clear_maxlatprocdata(mp); -+#endif -+ } -+ -+ return size; -+} -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static ssize_t -+show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -+{ -+ char buf[64]; -+ int r; -+ unsigned long *this_pid = file->private_data; -+ -+ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -+} -+ -+static ssize_t do_pid(struct file *file, const char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ char buf[64]; -+ unsigned long pid; -+ unsigned long *this_pid = file->private_data; -+ -+ if (cnt >= sizeof(buf)) -+ return -EINVAL; -+ -+ if (copy_from_user(&buf, ubuf, cnt)) -+ return -EFAULT; -+ -+ buf[cnt] = '\0'; -+ -+ if (kstrtoul(buf, 10, &pid)) -+ return -EINVAL; -+ -+ *this_pid = pid; -+ -+ return cnt; -+} -+#endif -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static ssize_t -+show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -+{ -+ int r; -+ struct maxlatproc_data *mp = file->private_data; -+ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); -+ unsigned long long t; -+ unsigned long usecs, secs; -+ char *buf; -+ -+ if (mp->pid == -1 || mp->current_pid == -1) { -+ buf = "(none)\n"; -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, -+ strlen(buf)); -+ } -+ -+ buf = kmalloc(strmaxlen, GFP_KERNEL); -+ if (buf == NULL) -+ return -ENOMEM; -+ -+ t = ns2usecs(mp->timestamp); -+ usecs = do_div(t, USEC_PER_SEC); -+ secs = (unsigned long) t; -+ r = snprintf(buf, strmaxlen, -+ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, -+ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, -+ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, -+ secs, usecs); -+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -+ kfree(buf); -+ return r; -+} -+#endif -+ -+static ssize_t -+show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -+{ -+ char buf[64]; -+ struct enable_data *ed = file->private_data; -+ int r; -+ -+ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -+} -+ -+static ssize_t -+do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) -+{ -+ char buf[64]; -+ long enable; -+ struct enable_data *ed = file->private_data; -+ -+ if (cnt >= sizeof(buf)) -+ return -EINVAL; -+ -+ if (copy_from_user(&buf, ubuf, cnt)) -+ return -EFAULT; -+ -+ buf[cnt] = 0; -+ -+ if (kstrtoul(buf, 10, &enable)) -+ return -EINVAL; -+ -+ if ((enable && ed->enabled) || (!enable && !ed->enabled)) -+ return cnt; -+ -+ if (enable) { -+ int ret; -+ -+ switch (ed->latency_type) { -+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -+ case PREEMPTIRQSOFF_LATENCY: -+ ret = register_trace_preemptirqsoff_hist( -+ probe_preemptirqsoff_hist, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_preemptirqsoff_hist " -+ "to trace_preemptirqsoff_hist\n"); -+ return ret; -+ } -+ break; -+#endif -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ case WAKEUP_LATENCY: -+ ret = register_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_wakeup_latency_hist_start " -+ "to trace_sched_wakeup\n"); -+ return ret; -+ } -+ ret = register_trace_sched_wakeup_new( -+ probe_wakeup_latency_hist_start, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_wakeup_latency_hist_start " -+ "to trace_sched_wakeup_new\n"); -+ unregister_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ return ret; -+ } -+ ret = register_trace_sched_switch( -+ probe_wakeup_latency_hist_stop, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_wakeup_latency_hist_stop " -+ "to trace_sched_switch\n"); -+ unregister_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_wakeup_new( -+ probe_wakeup_latency_hist_start, NULL); -+ return ret; -+ } -+ ret = register_trace_sched_migrate_task( -+ probe_sched_migrate_task, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_sched_migrate_task " -+ "to trace_sched_migrate_task\n"); -+ unregister_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_wakeup_new( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_switch( -+ probe_wakeup_latency_hist_stop, NULL); -+ return ret; -+ } -+ break; -+#endif -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ case MISSED_TIMER_OFFSETS: -+ ret = register_trace_hrtimer_interrupt( -+ probe_hrtimer_interrupt, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_hrtimer_interrupt " -+ "to trace_hrtimer_interrupt\n"); -+ return ret; -+ } -+ break; -+#endif -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ case TIMERANDWAKEUP_LATENCY: -+ if (!wakeup_latency_enabled_data.enabled || -+ !missed_timer_offsets_enabled_data.enabled) -+ return -EINVAL; -+ break; -+#endif -+ default: -+ break; -+ } -+ } else { -+ switch (ed->latency_type) { -+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -+ case PREEMPTIRQSOFF_LATENCY: -+ { -+ int cpu; -+ -+ unregister_trace_preemptirqsoff_hist( -+ probe_preemptirqsoff_hist, NULL); -+ for_each_online_cpu(cpu) { -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ per_cpu(hist_irqsoff_counting, -+ cpu) = 0; -+#endif -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ per_cpu(hist_preemptoff_counting, -+ cpu) = 0; -+#endif -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ per_cpu(hist_preemptirqsoff_counting, -+ cpu) = 0; -+#endif -+ } -+ } -+ break; -+#endif -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ case WAKEUP_LATENCY: -+ { -+ int cpu; -+ -+ unregister_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_wakeup_new( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_switch( -+ probe_wakeup_latency_hist_stop, NULL); -+ unregister_trace_sched_migrate_task( -+ probe_sched_migrate_task, NULL); -+ -+ for_each_online_cpu(cpu) { -+ per_cpu(wakeup_task, cpu) = NULL; -+ per_cpu(wakeup_sharedprio, cpu) = 0; -+ } -+ } -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ timerandwakeup_enabled_data.enabled = 0; -+#endif -+ break; -+#endif -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ case MISSED_TIMER_OFFSETS: -+ unregister_trace_hrtimer_interrupt( -+ probe_hrtimer_interrupt, NULL); -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ timerandwakeup_enabled_data.enabled = 0; -+#endif -+ break; -+#endif -+ default: -+ break; -+ } -+ } -+ ed->enabled = enable; -+ return cnt; -+} -+ -+static const struct file_operations latency_hist_reset_fops = { -+ .open = tracing_open_generic, -+ .write = latency_hist_reset, -+}; -+ -+static const struct file_operations enable_fops = { -+ .open = tracing_open_generic, -+ .read = show_enable, -+ .write = do_enable, -+}; -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static const struct file_operations pid_fops = { -+ .open = tracing_open_generic, -+ .read = show_pid, -+ .write = do_pid, -+}; -+ -+static const struct file_operations maxlatproc_fops = { -+ .open = tracing_open_generic, -+ .read = show_maxlatproc, -+}; -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -+static notrace void probe_preemptirqsoff_hist(void *v, int reason, -+ int starthist) -+{ -+ int cpu = raw_smp_processor_id(); -+ int time_set = 0; -+ -+ if (starthist) { -+ cycle_t uninitialized_var(start); -+ -+ if (!preempt_count() && !irqs_disabled()) -+ return; -+ -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ if ((reason == IRQS_OFF || reason == TRACE_START) && -+ !per_cpu(hist_irqsoff_counting, cpu)) { -+ per_cpu(hist_irqsoff_counting, cpu) = 1; -+ start = ftrace_now(cpu); -+ time_set++; -+ per_cpu(hist_irqsoff_start, cpu) = start; -+ } -+#endif -+ -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ if ((reason == PREEMPT_OFF || reason == TRACE_START) && -+ !per_cpu(hist_preemptoff_counting, cpu)) { -+ per_cpu(hist_preemptoff_counting, cpu) = 1; -+ if (!(time_set++)) -+ start = ftrace_now(cpu); -+ per_cpu(hist_preemptoff_start, cpu) = start; -+ } -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ if (per_cpu(hist_irqsoff_counting, cpu) && -+ per_cpu(hist_preemptoff_counting, cpu) && -+ !per_cpu(hist_preemptirqsoff_counting, cpu)) { -+ per_cpu(hist_preemptirqsoff_counting, cpu) = 1; -+ if (!time_set) -+ start = ftrace_now(cpu); -+ per_cpu(hist_preemptirqsoff_start, cpu) = start; -+ } -+#endif -+ } else { -+ cycle_t uninitialized_var(stop); -+ -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ if ((reason == IRQS_ON || reason == TRACE_STOP) && -+ per_cpu(hist_irqsoff_counting, cpu)) { -+ cycle_t start = per_cpu(hist_irqsoff_start, cpu); -+ -+ stop = ftrace_now(cpu); -+ time_set++; -+ if (start) { -+ long latency = ((long) (stop - start)) / -+ NSECS_PER_USECS; -+ -+ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, -+ stop, NULL); -+ } -+ per_cpu(hist_irqsoff_counting, cpu) = 0; -+ } -+#endif -+ -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ if ((reason == PREEMPT_ON || reason == TRACE_STOP) && -+ per_cpu(hist_preemptoff_counting, cpu)) { -+ cycle_t start = per_cpu(hist_preemptoff_start, cpu); -+ -+ if (!(time_set++)) -+ stop = ftrace_now(cpu); -+ if (start) { -+ long latency = ((long) (stop - start)) / -+ NSECS_PER_USECS; -+ -+ latency_hist(PREEMPTOFF_LATENCY, cpu, latency, -+ 0, stop, NULL); -+ } -+ per_cpu(hist_preemptoff_counting, cpu) = 0; -+ } -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ if ((!per_cpu(hist_irqsoff_counting, cpu) || -+ !per_cpu(hist_preemptoff_counting, cpu)) && -+ per_cpu(hist_preemptirqsoff_counting, cpu)) { -+ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); -+ -+ if (!time_set) -+ stop = ftrace_now(cpu); -+ if (start) { -+ long latency = ((long) (stop - start)) / -+ NSECS_PER_USECS; -+ -+ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, -+ latency, 0, stop, NULL); -+ } -+ per_cpu(hist_preemptirqsoff_counting, cpu) = 0; -+ } -+#endif -+ } -+} -+#endif -+ -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+static DEFINE_RAW_SPINLOCK(wakeup_lock); -+static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, -+ int cpu) -+{ -+ int old_cpu = task_cpu(task); -+ -+ if (cpu != old_cpu) { -+ unsigned long flags; -+ struct task_struct *cpu_wakeup_task; -+ -+ raw_spin_lock_irqsave(&wakeup_lock, flags); -+ -+ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); -+ if (task == cpu_wakeup_task) { -+ put_task_struct(cpu_wakeup_task); -+ per_cpu(wakeup_task, old_cpu) = NULL; -+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; -+ get_task_struct(cpu_wakeup_task); -+ } -+ -+ raw_spin_unlock_irqrestore(&wakeup_lock, flags); -+ } -+} -+ -+static notrace void probe_wakeup_latency_hist_start(void *v, -+ struct task_struct *p, int success) -+{ -+ unsigned long flags; -+ struct task_struct *curr = current; -+ int cpu = task_cpu(p); -+ struct task_struct *cpu_wakeup_task; -+ -+ raw_spin_lock_irqsave(&wakeup_lock, flags); -+ -+ cpu_wakeup_task = per_cpu(wakeup_task, cpu); -+ -+ if (wakeup_pid) { -+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || -+ p->prio == curr->prio) -+ per_cpu(wakeup_sharedprio, cpu) = 1; -+ if (likely(wakeup_pid != task_pid_nr(p))) -+ goto out; -+ } else { -+ if (likely(!rt_task(p)) || -+ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || -+ p->prio > curr->prio) -+ goto out; -+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || -+ p->prio == curr->prio) -+ per_cpu(wakeup_sharedprio, cpu) = 1; -+ } -+ -+ if (cpu_wakeup_task) -+ put_task_struct(cpu_wakeup_task); -+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; -+ get_task_struct(cpu_wakeup_task); -+ cpu_wakeup_task->preempt_timestamp_hist = -+ ftrace_now(raw_smp_processor_id()); -+out: -+ raw_spin_unlock_irqrestore(&wakeup_lock, flags); -+} -+ -+static notrace void probe_wakeup_latency_hist_stop(void *v, -+ struct task_struct *prev, struct task_struct *next) -+{ -+ unsigned long flags; -+ int cpu = task_cpu(next); -+ long latency; -+ cycle_t stop; -+ struct task_struct *cpu_wakeup_task; -+ -+ raw_spin_lock_irqsave(&wakeup_lock, flags); -+ -+ cpu_wakeup_task = per_cpu(wakeup_task, cpu); -+ -+ if (cpu_wakeup_task == NULL) -+ goto out; -+ -+ /* Already running? */ -+ if (unlikely(current == cpu_wakeup_task)) -+ goto out_reset; -+ -+ if (next != cpu_wakeup_task) { -+ if (next->prio < cpu_wakeup_task->prio) -+ goto out_reset; -+ -+ if (next->prio == cpu_wakeup_task->prio) -+ per_cpu(wakeup_sharedprio, cpu) = 1; -+ -+ goto out; -+ } -+ -+ if (current->prio == cpu_wakeup_task->prio) -+ per_cpu(wakeup_sharedprio, cpu) = 1; -+ -+ /* -+ * The task we are waiting for is about to be switched to. -+ * Calculate latency and store it in histogram. -+ */ -+ stop = ftrace_now(raw_smp_processor_id()); -+ -+ latency = ((long) (stop - next->preempt_timestamp_hist)) / -+ NSECS_PER_USECS; -+ -+ if (per_cpu(wakeup_sharedprio, cpu)) { -+ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, -+ next); -+ per_cpu(wakeup_sharedprio, cpu) = 0; -+ } else { -+ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ if (timerandwakeup_enabled_data.enabled) { -+ latency_hist(TIMERANDWAKEUP_LATENCY, cpu, -+ next->timer_offset + latency, next->timer_offset, -+ stop, next); -+ } -+#endif -+ } -+ -+out_reset: -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ next->timer_offset = 0; -+#endif -+ put_task_struct(cpu_wakeup_task); -+ per_cpu(wakeup_task, cpu) = NULL; -+out: -+ raw_spin_unlock_irqrestore(&wakeup_lock, flags); -+} -+#endif -+ -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+static notrace void probe_hrtimer_interrupt(void *v, int cpu, -+ long long latency_ns, struct task_struct *curr, -+ struct task_struct *task) -+{ -+ if (latency_ns <= 0 && task != NULL && rt_task(task) && -+ (task->prio < curr->prio || -+ (task->prio == curr->prio && -+ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { -+ long latency; -+ cycle_t now; -+ -+ if (missed_timer_offsets_pid) { -+ if (likely(missed_timer_offsets_pid != -+ task_pid_nr(task))) -+ return; -+ } -+ -+ now = ftrace_now(cpu); -+ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); -+ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, -+ task); -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ task->timer_offset = latency; -+#endif -+ } -+} -+#endif -+ -+static __init int latency_hist_init(void) -+{ -+ struct dentry *latency_hist_root = NULL; -+ struct dentry *dentry; -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ struct dentry *dentry_sharedprio; -+#endif -+ struct dentry *entry; -+ struct dentry *enable_root; -+ int i = 0; -+ struct hist_data *my_hist; -+ char name[64]; -+ char *cpufmt = "CPU%d"; -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ char *cpufmt_maxlatproc = "max_latency-CPU%d"; -+ struct maxlatproc_data *mp = NULL; -+#endif -+ -+ dentry = tracing_init_dentry(); -+ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); -+ enable_root = debugfs_create_dir("enable", latency_hist_root); -+ -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(irqsoff_hist, i), &latency_hist_fops); -+ my_hist = &per_cpu(irqsoff_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ } -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); -+#endif -+ -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ dentry = debugfs_create_dir(preemptoff_hist_dir, -+ latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(preemptoff_hist, i), &latency_hist_fops); -+ my_hist = &per_cpu(preemptoff_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ } -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ dentry = debugfs_create_dir(preemptirqsoff_hist_dir, -+ latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); -+ my_hist = &per_cpu(preemptirqsoff_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ } -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -+ entry = debugfs_create_file("preemptirqsoff", 0644, -+ enable_root, (void *)&preemptirqsoff_enabled_data, -+ &enable_fops); -+#endif -+ -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ dentry = debugfs_create_dir(wakeup_latency_hist_dir, -+ latency_hist_root); -+ dentry_sharedprio = debugfs_create_dir( -+ wakeup_latency_hist_dir_sharedprio, dentry); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(wakeup_latency_hist, i), -+ &latency_hist_fops); -+ my_hist = &per_cpu(wakeup_latency_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ -+ entry = debugfs_create_file(name, 0444, dentry_sharedprio, -+ &per_cpu(wakeup_latency_hist_sharedprio, i), -+ &latency_hist_fops); -+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ -+ sprintf(name, cpufmt_maxlatproc, i); -+ -+ mp = &per_cpu(wakeup_maxlatproc, i); -+ entry = debugfs_create_file(name, 0444, dentry, mp, -+ &maxlatproc_fops); -+ clear_maxlatprocdata(mp); -+ -+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); -+ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, -+ &maxlatproc_fops); -+ clear_maxlatprocdata(mp); -+ } -+ entry = debugfs_create_file("pid", 0644, dentry, -+ (void *)&wakeup_pid, &pid_fops); -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); -+ entry = debugfs_create_file("reset", 0644, dentry_sharedprio, -+ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); -+ entry = debugfs_create_file("wakeup", 0644, -+ enable_root, (void *)&wakeup_latency_enabled_data, -+ &enable_fops); -+#endif -+ -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ dentry = debugfs_create_dir(missed_timer_offsets_dir, -+ latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(missed_timer_offsets, i), &latency_hist_fops); -+ my_hist = &per_cpu(missed_timer_offsets, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ -+ sprintf(name, cpufmt_maxlatproc, i); -+ mp = &per_cpu(missed_timer_offsets_maxlatproc, i); -+ entry = debugfs_create_file(name, 0444, dentry, mp, -+ &maxlatproc_fops); -+ clear_maxlatprocdata(mp); -+ } -+ entry = debugfs_create_file("pid", 0644, dentry, -+ (void *)&missed_timer_offsets_pid, &pid_fops); -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); -+ entry = debugfs_create_file("missed_timer_offsets", 0644, -+ enable_root, (void *)&missed_timer_offsets_enabled_data, -+ &enable_fops); -+#endif -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, -+ latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(timerandwakeup_latency_hist, i), -+ &latency_hist_fops); -+ my_hist = &per_cpu(timerandwakeup_latency_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ -+ sprintf(name, cpufmt_maxlatproc, i); -+ mp = &per_cpu(timerandwakeup_maxlatproc, i); -+ entry = debugfs_create_file(name, 0444, dentry, mp, -+ &maxlatproc_fops); -+ clear_maxlatprocdata(mp); -+ } -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); -+ entry = debugfs_create_file("timerandwakeup", 0644, -+ enable_root, (void *)&timerandwakeup_enabled_data, -+ &enable_fops); -+#endif -+ return 0; -+} -+ -+device_initcall(latency_hist_init); ---- a/kernel/trace/trace_irqsoff.c -+++ b/kernel/trace/trace_irqsoff.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - #include "trace.h" - -@@ -420,11 +421,13 @@ void start_critical_timings(void) - { - if (preempt_trace() || irq_trace()) - start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); -+ trace_preemptirqsoff_hist(TRACE_START, 1); - } - EXPORT_SYMBOL_GPL(start_critical_timings); - - void stop_critical_timings(void) - { -+ trace_preemptirqsoff_hist(TRACE_STOP, 0); - if (preempt_trace() || irq_trace()) - stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); - } -@@ -434,6 +437,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings) - #ifdef CONFIG_PROVE_LOCKING - void time_hardirqs_on(unsigned long a0, unsigned long a1) - { -+ trace_preemptirqsoff_hist(IRQS_ON, 0); - if (!preempt_trace() && irq_trace()) - stop_critical_timing(a0, a1); - } -@@ -442,6 +446,7 @@ void time_hardirqs_off(unsigned long a0, - { - if (!preempt_trace() && irq_trace()) - start_critical_timing(a0, a1); -+ trace_preemptirqsoff_hist(IRQS_OFF, 1); - } - - #else /* !CONFIG_PROVE_LOCKING */ -@@ -467,6 +472,7 @@ inline void print_irqtrace_events(struct - */ - void trace_hardirqs_on(void) - { -+ trace_preemptirqsoff_hist(IRQS_ON, 0); - if (!preempt_trace() && irq_trace()) - stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); - } -@@ -476,11 +482,13 @@ void trace_hardirqs_off(void) - { - if (!preempt_trace() && irq_trace()) - start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); -+ trace_preemptirqsoff_hist(IRQS_OFF, 1); - } - EXPORT_SYMBOL(trace_hardirqs_off); - - __visible void trace_hardirqs_on_caller(unsigned long caller_addr) - { -+ trace_preemptirqsoff_hist(IRQS_ON, 0); - if (!preempt_trace() && irq_trace()) - stop_critical_timing(CALLER_ADDR0, caller_addr); - } -@@ -490,6 +498,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller); - { - if (!preempt_trace() && irq_trace()) - start_critical_timing(CALLER_ADDR0, caller_addr); -+ trace_preemptirqsoff_hist(IRQS_OFF, 1); - } - EXPORT_SYMBOL(trace_hardirqs_off_caller); - -@@ -499,12 +508,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller) - #ifdef CONFIG_PREEMPT_TRACER - void trace_preempt_on(unsigned long a0, unsigned long a1) - { -+ trace_preemptirqsoff_hist(PREEMPT_ON, 0); - if (preempt_trace() && !irq_trace()) - stop_critical_timing(a0, a1); - } - - void trace_preempt_off(unsigned long a0, unsigned long a1) - { -+ trace_preemptirqsoff_hist(PREEMPT_ON, 1); - if (preempt_trace() && !irq_trace()) - start_critical_timing(a0, a1); - } diff --git a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch b/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch deleted file mode 100644 index 62487b805..000000000 --- a/debian/patches/features/all/rt/latency_hist-update-sched_wakeup-probe.patch +++ /dev/null @@ -1,41 +0,0 @@ -Subject: latency_hist: Update sched_wakeup probe -From: Mathieu Desnoyers -Date: Sun, 25 Oct 2015 18:06:05 -0400 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -"sched: Introduce the 'trace_sched_waking' tracepoint" introduces a -prototype change for the sched_wakeup probe: the "success" argument is -removed. Update the latency_hist probe following this change. - -Signed-off-by: Mathieu Desnoyers -Cc: Peter Zijlstra (Intel) -Cc: Julien Desfossez -Cc: Francis Giraldeau -Cc: Mike Galbraith -Cc: Steven Rostedt -Link: http://lkml.kernel.org/r/1445810765-18732-1-git-send-email-mathieu.desnoyers@efficios.com -Signed-off-by: Thomas Gleixner ---- - kernel/trace/latency_hist.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/kernel/trace/latency_hist.c -+++ b/kernel/trace/latency_hist.c -@@ -115,7 +115,7 @@ static DEFINE_PER_CPU(struct hist_data, - static char *wakeup_latency_hist_dir = "wakeup"; - static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; - static notrace void probe_wakeup_latency_hist_start(void *v, -- struct task_struct *p, int success); -+ struct task_struct *p); - static notrace void probe_wakeup_latency_hist_stop(void *v, - struct task_struct *prev, struct task_struct *next); - static notrace void probe_sched_migrate_task(void *, -@@ -869,7 +869,7 @@ static notrace void probe_sched_migrate_ - } - - static notrace void probe_wakeup_latency_hist_start(void *v, -- struct task_struct *p, int success) -+ struct task_struct *p) - { - unsigned long flags; - struct task_struct *curr = current; diff --git a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch b/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch deleted file mode 100644 index b86d47a5c..000000000 --- a/debian/patches/features/all/rt/latencyhist-disable-jump-labels.patch +++ /dev/null @@ -1,62 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 4 Feb 2016 14:08:06 +0100 -Subject: latencyhist: disable jump-labels -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Atleast on X86 we die a recursive death - -|CPU: 3 PID: 585 Comm: bash Not tainted 4.4.1-rt4+ #198 -|Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS Debian-1.8.2-1 04/01/2014 -|task: ffff88007ab4cd00 ti: ffff88007ab94000 task.ti: ffff88007ab94000 -|RIP: 0010:[] [] int3+0x0/0x10 -|RSP: 0018:ffff88013c107fd8 EFLAGS: 00010082 -|RAX: ffff88007ab4cd00 RBX: ffffffff8100ceab RCX: 0000000080202001 -|RDX: 0000000000000000 RSI: ffffffff8100ceab RDI: ffffffff810c78b2 -|RBP: ffff88007ab97c10 R08: ffffffffff57b000 R09: 0000000000000000 -|R10: ffff88013bb64790 R11: ffff88007ab4cd68 R12: ffffffff8100ceab -|R13: ffffffff810c78b2 R14: ffffffff810f8158 R15: ffffffff810f9120 -|FS: 0000000000000000(0000) GS:ffff88013c100000(0063) knlGS:00000000f74e3940 -|CS: 0010 DS: 002b ES: 002b CR0: 000000008005003b -|CR2: 0000000008cf6008 CR3: 000000013b169000 CR4: 00000000000006e0 -|Call Trace: -| <#DB> -| [] ? trace_preempt_off+0x18/0x170 -| <> -| [] preempt_count_add+0xa5/0xc0 -| [] on_each_cpu+0x22/0x90 -| [] text_poke_bp+0x5b/0xc0 -| [] arch_jump_label_transform+0x8c/0xf0 -| [] __jump_label_update+0x6c/0x80 -| [] jump_label_update+0xaa/0xc0 -| [] static_key_slow_inc+0x94/0xa0 -| [] tracepoint_probe_register_prio+0x26d/0x2c0 -| [] tracepoint_probe_register+0x13/0x20 -| [] trace_event_reg+0x98/0xd0 -| [] __ftrace_event_enable_disable+0x6b/0x180 -| [] event_enable_write+0x78/0xc0 -| [] __vfs_write+0x28/0xe0 -| [] vfs_write+0xa5/0x180 -| [] SyS_write+0x46/0xa0 -| [] do_fast_syscall_32+0xa1/0x1d0 -| [] sysenter_flags_fixed+0xd/0x17 - -during - echo 1 > /sys/kernel/debug/tracing/events/hist/preemptirqsoff_hist/enable - -Reported-By: Christoph Mathys -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/Kconfig | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/Kconfig -+++ b/arch/Kconfig -@@ -52,6 +52,7 @@ config KPROBES - config JUMP_LABEL - bool "Optimize very unlikely/likely branches" - depends on HAVE_ARCH_JUMP_LABEL -+ depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST) - help - This option enables a transparent branch optimization that - makes certain almost-always-true or almost-always-false branch diff --git a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch deleted file mode 100644 index f2a5b6eff..000000000 --- a/debian/patches/features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch +++ /dev/null @@ -1,36 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 23 Jan 2014 14:45:59 +0100 -Subject: leds: trigger: disable CPU trigger on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -as it triggers: -|CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141 -|[] (unwind_backtrace+0x0/0xf8) from [] (show_stack+0x1c/0x20) -|[] (show_stack+0x1c/0x20) from [] (dump_stack+0x20/0x2c) -|[] (dump_stack+0x20/0x2c) from [] (__might_sleep+0x13c/0x170) -|[] (__might_sleep+0x13c/0x170) from [] (__rt_spin_lock+0x28/0x38) -|[] (__rt_spin_lock+0x28/0x38) from [] (rt_read_lock+0x68/0x7c) -|[] (rt_read_lock+0x68/0x7c) from [] (led_trigger_event+0x2c/0x5c) -|[] (led_trigger_event+0x2c/0x5c) from [] (ledtrig_cpu+0x54/0x5c) -|[] (ledtrig_cpu+0x54/0x5c) from [] (arch_cpu_idle_exit+0x18/0x1c) -|[] (arch_cpu_idle_exit+0x18/0x1c) from [] (cpu_startup_entry+0xa8/0x234) -|[] (cpu_startup_entry+0xa8/0x234) from [] (rest_init+0xb8/0xe0) -|[] (rest_init+0xb8/0xe0) from [] (start_kernel+0x2c4/0x380) - - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/leds/trigger/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/leds/trigger/Kconfig -+++ b/drivers/leds/trigger/Kconfig -@@ -61,7 +61,7 @@ config LEDS_TRIGGER_BACKLIGHT - - config LEDS_TRIGGER_CPU - bool "LED CPU Trigger" -- depends on LEDS_TRIGGERS -+ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE - help - This allows LEDs to be controlled by active CPUs. This shows - the active CPUs across an array of LEDs so you can see which diff --git a/debian/patches/features/all/rt/lglocks-rt.patch b/debian/patches/features/all/rt/lglocks-rt.patch deleted file mode 100644 index 3ec66224e..000000000 --- a/debian/patches/features/all/rt/lglocks-rt.patch +++ /dev/null @@ -1,200 +0,0 @@ -Subject: lglocks: Provide a RT safe variant -From: Thomas Gleixner -Date: Wed, 15 Jun 2011 11:02:21 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -lglocks by itself will spin in order to get the lock. This will end up -badly if a task with the highest priority keeps spinning while a task -with the lowest priority owns the lock. - -Lets replace them with rt_mutex based locks so they can sleep, track -owner and boost if needed. - -Signed-off-by: Thomas Gleixner ---- - include/linux/lglock.h | 18 +++++++++++++ - kernel/locking/lglock.c | 62 ++++++++++++++++++++++++++++++------------------ - 2 files changed, 58 insertions(+), 22 deletions(-) - ---- a/include/linux/lglock.h -+++ b/include/linux/lglock.h -@@ -34,13 +34,30 @@ - #endif - - struct lglock { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct rt_mutex __percpu *lock; -+#else - arch_spinlock_t __percpu *lock; -+#endif - #ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lock_class_key lock_key; - struct lockdep_map lock_dep_map; - #endif - }; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define DEFINE_LGLOCK(name) \ -+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ -+ = __RT_MUTEX_INITIALIZER( name ## _lock); \ -+ struct lglock name = { .lock = &name ## _lock } -+ -+# define DEFINE_STATIC_LGLOCK(name) \ -+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ -+ = __RT_MUTEX_INITIALIZER( name ## _lock); \ -+ static struct lglock name = { .lock = &name ## _lock } -+ -+#else -+ - #define DEFINE_LGLOCK(name) \ - static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ - = __ARCH_SPIN_LOCK_UNLOCKED; \ -@@ -50,6 +67,7 @@ struct lglock { - static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ - = __ARCH_SPIN_LOCK_UNLOCKED; \ - static struct lglock name = { .lock = &name ## _lock } -+#endif - - void lg_lock_init(struct lglock *lg, char *name); - ---- a/kernel/locking/lglock.c -+++ b/kernel/locking/lglock.c -@@ -4,6 +4,15 @@ - #include - #include - -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define lg_lock_ptr arch_spinlock_t -+# define lg_do_lock(l) arch_spin_lock(l) -+# define lg_do_unlock(l) arch_spin_unlock(l) -+#else -+# define lg_lock_ptr struct rt_mutex -+# define lg_do_lock(l) __rt_spin_lock(l) -+# define lg_do_unlock(l) __rt_spin_unlock(l) -+#endif - /* - * Note there is no uninit, so lglocks cannot be defined in - * modules (but it's fine to use them from there) -@@ -12,51 +21,60 @@ - - void lg_lock_init(struct lglock *lg, char *name) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int i; -+ -+ for_each_possible_cpu(i) { -+ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i); -+ -+ rt_mutex_init(lock); -+ } -+#endif - LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); - } - EXPORT_SYMBOL(lg_lock_init); - - void lg_local_lock(struct lglock *lg) - { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - -- preempt_disable(); -+ migrate_disable(); - lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); - lock = this_cpu_ptr(lg->lock); -- arch_spin_lock(lock); -+ lg_do_lock(lock); - } - EXPORT_SYMBOL(lg_local_lock); - - void lg_local_unlock(struct lglock *lg) - { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - - lock_release(&lg->lock_dep_map, 1, _RET_IP_); - lock = this_cpu_ptr(lg->lock); -- arch_spin_unlock(lock); -- preempt_enable(); -+ lg_do_unlock(lock); -+ migrate_enable(); - } - EXPORT_SYMBOL(lg_local_unlock); - - void lg_local_lock_cpu(struct lglock *lg, int cpu) - { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - -- preempt_disable(); -+ preempt_disable_nort(); - lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); - lock = per_cpu_ptr(lg->lock, cpu); -- arch_spin_lock(lock); -+ lg_do_lock(lock); - } - EXPORT_SYMBOL(lg_local_lock_cpu); - - void lg_local_unlock_cpu(struct lglock *lg, int cpu) - { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - - lock_release(&lg->lock_dep_map, 1, _RET_IP_); - lock = per_cpu_ptr(lg->lock, cpu); -- arch_spin_unlock(lock); -- preempt_enable(); -+ lg_do_unlock(lock); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(lg_local_unlock_cpu); - -@@ -70,15 +88,15 @@ void lg_double_lock(struct lglock *lg, i - - preempt_disable(); - lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); -- arch_spin_lock(per_cpu_ptr(lg->lock, cpu1)); -- arch_spin_lock(per_cpu_ptr(lg->lock, cpu2)); -+ lg_do_lock(per_cpu_ptr(lg->lock, cpu1)); -+ lg_do_lock(per_cpu_ptr(lg->lock, cpu2)); - } - - void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2) - { - lock_release(&lg->lock_dep_map, 1, _RET_IP_); -- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1)); -- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2)); -+ lg_do_unlock(per_cpu_ptr(lg->lock, cpu1)); -+ lg_do_unlock(per_cpu_ptr(lg->lock, cpu2)); - preempt_enable(); - } - -@@ -86,12 +104,12 @@ void lg_global_lock(struct lglock *lg) - { - int i; - -- preempt_disable(); -+ preempt_disable_nort(); - lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); - for_each_possible_cpu(i) { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - lock = per_cpu_ptr(lg->lock, i); -- arch_spin_lock(lock); -+ lg_do_lock(lock); - } - } - EXPORT_SYMBOL(lg_global_lock); -@@ -102,10 +120,10 @@ void lg_global_unlock(struct lglock *lg) - - lock_release(&lg->lock_dep_map, 1, _RET_IP_); - for_each_possible_cpu(i) { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - lock = per_cpu_ptr(lg->lock, i); -- arch_spin_unlock(lock); -+ lg_do_unlock(lock); - } -- preempt_enable(); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(lg_global_unlock); diff --git a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch deleted file mode 100644 index e78a8fad1..000000000 --- a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch +++ /dev/null @@ -1,115 +0,0 @@ -From: Paul Gortmaker -Date: Fri, 21 Jun 2013 15:07:25 -0400 -Subject: list_bl: Make list head locking RT safe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -As per changes in include/linux/jbd_common.h for avoiding the -bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal -head lock rt safe") we do the same thing here. - -We use the non atomic __set_bit and __clear_bit inside the scope of -the lock to preserve the ability of the existing LIST_DEBUG code to -use the zero'th bit in the sanity checks. - -As a bit spinlock, we had no lockdep visibility into the usage -of the list head locking. Now, if we were to implement it as a -standard non-raw spinlock, we would see: - -BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 -in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd -5 locks held by udevd/122: - #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [] lock_rename+0xe8/0xf0 - #1: (rename_lock){+.+...}, at: [] d_move+0x2c/0x60 - #2: (&dentry->d_lock){+.+...}, at: [] dentry_lock_for_move+0xf3/0x130 - #3: (&dentry->d_lock/2){+.+...}, at: [] dentry_lock_for_move+0xc4/0x130 - #4: (&dentry->d_lock/3){+.+...}, at: [] dentry_lock_for_move+0xd7/0x130 -Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7 -Call Trace: - [] __might_sleep+0x134/0x1f0 - [] rt_spin_lock+0x24/0x60 - [] __d_shrink+0x5c/0xa0 - [] __d_drop+0x1d/0x40 - [] __d_move+0x8e/0x320 - [] d_move+0x3e/0x60 - [] vfs_rename+0x198/0x4c0 - [] sys_renameat+0x213/0x240 - [] ? _raw_spin_unlock+0x35/0x60 - [] ? do_page_fault+0x1ec/0x4b0 - [] ? retint_swapgs+0xe/0x13 - [] ? trace_hardirqs_on_thunk+0x3a/0x3f - [] sys_rename+0x1b/0x20 - [] system_call_fastpath+0x1a/0x1f - -Since we are only taking the lock during short lived list operations, -lets assume for now that it being raw won't be a significant latency -concern. - - -Signed-off-by: Paul Gortmaker -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/list_bl.h | 28 ++++++++++++++++++++++++++-- - 1 file changed, 26 insertions(+), 2 deletions(-) - ---- a/include/linux/list_bl.h -+++ b/include/linux/list_bl.h -@@ -2,6 +2,7 @@ - #define _LINUX_LIST_BL_H - - #include -+#include - #include - - /* -@@ -32,13 +33,22 @@ - - struct hlist_bl_head { - struct hlist_bl_node *first; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ raw_spinlock_t lock; -+#endif - }; - - struct hlist_bl_node { - struct hlist_bl_node *next, **pprev; - }; --#define INIT_HLIST_BL_HEAD(ptr) \ -- ((ptr)->first = NULL) -+ -+static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) -+{ -+ h->first = NULL; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ raw_spin_lock_init(&h->lock); -+#endif -+} - - static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) - { -@@ -118,12 +128,26 @@ static inline void hlist_bl_del_init(str - - static inline void hlist_bl_lock(struct hlist_bl_head *b) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(0, (unsigned long *)b); -+#else -+ raw_spin_lock(&b->lock); -+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -+ __set_bit(0, (unsigned long *)b); -+#endif -+#endif - } - - static inline void hlist_bl_unlock(struct hlist_bl_head *b) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - __bit_spin_unlock(0, (unsigned long *)b); -+#else -+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -+ __clear_bit(0, (unsigned long *)b); -+#endif -+ raw_spin_unlock(&b->lock); -+#endif - } - - static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) diff --git a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch deleted file mode 100644 index 9eefd2f09..000000000 --- a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch +++ /dev/null @@ -1,53 +0,0 @@ -From: Thomas Gleixner -Date: Tue, 21 Jul 2009 22:34:14 +0200 -Subject: rt: local_irq_* variants depending on RT/!RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Add local_irq_*_(no)rt variant which are mainly used to break -interrupt disabled sections on PREEMPT_RT or to explicitely disable -interrupts on PREEMPT_RT. - -Signed-off-by: Thomas Gleixner - ---- - include/linux/interrupt.h | 2 +- - include/linux/irqflags.h | 19 +++++++++++++++++++ - 2 files changed, 20 insertions(+), 1 deletion(-) - ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -186,7 +186,7 @@ extern void devm_free_irq(struct device - #ifdef CONFIG_LOCKDEP - # define local_irq_enable_in_hardirq() do { } while (0) - #else --# define local_irq_enable_in_hardirq() local_irq_enable() -+# define local_irq_enable_in_hardirq() local_irq_enable_nort() - #endif - - extern void disable_irq_nosync(unsigned int irq); ---- a/include/linux/irqflags.h -+++ b/include/linux/irqflags.h -@@ -148,4 +148,23 @@ - - #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) - -+/* -+ * local_irq* variants depending on RT/!RT -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define local_irq_disable_nort() do { } while (0) -+# define local_irq_enable_nort() do { } while (0) -+# define local_irq_save_nort(flags) local_save_flags(flags) -+# define local_irq_restore_nort(flags) (void)(flags) -+# define local_irq_disable_rt() local_irq_disable() -+# define local_irq_enable_rt() local_irq_enable() -+#else -+# define local_irq_disable_nort() local_irq_disable() -+# define local_irq_enable_nort() local_irq_enable() -+# define local_irq_save_nort(flags) local_irq_save(flags) -+# define local_irq_restore_nort(flags) local_irq_restore(flags) -+# define local_irq_disable_rt() do { } while (0) -+# define local_irq_enable_rt() do { } while (0) -+#endif -+ - #endif diff --git a/debian/patches/features/all/rt/localversion.patch b/debian/patches/features/all/rt/localversion.patch deleted file mode 100644 index 9b054662f..000000000 --- a/debian/patches/features/all/rt/localversion.patch +++ /dev/null @@ -1,14 +0,0 @@ -Subject: v4.4.1-rt5 -From: Thomas Gleixner -Date: Fri, 08 Jul 2011 20:25:16 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Signed-off-by: Thomas Gleixner ---- - localversion-rt | 1 + - 1 file changed, 1 insertion(+) - ---- /dev/null -+++ b/localversion-rt -@@ -0,0 +1 @@ -+-rt5 diff --git a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch deleted file mode 100644 index 57d118951..000000000 --- a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch +++ /dev/null @@ -1,59 +0,0 @@ -Subject: lockdep: Make it RT aware -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 18:51:23 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -teach lockdep that we don't really do softirqs on -RT. - -Signed-off-by: Thomas Gleixner ---- - include/linux/irqflags.h | 10 +++++++--- - kernel/locking/lockdep.c | 2 ++ - 2 files changed, 9 insertions(+), 3 deletions(-) - ---- a/include/linux/irqflags.h -+++ b/include/linux/irqflags.h -@@ -25,8 +25,6 @@ - # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) - # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) - # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) --# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) --# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) - # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, - #else - # define trace_hardirqs_on() do { } while (0) -@@ -39,9 +37,15 @@ - # define trace_softirqs_enabled(p) 0 - # define trace_hardirq_enter() do { } while (0) - # define trace_hardirq_exit() do { } while (0) -+# define INIT_TRACE_IRQFLAGS -+#endif -+ -+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) -+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) -+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) -+#else - # define lockdep_softirq_enter() do { } while (0) - # define lockdep_softirq_exit() do { } while (0) --# define INIT_TRACE_IRQFLAGS - #endif - - #if defined(CONFIG_IRQSOFF_TRACER) || \ ---- a/kernel/locking/lockdep.c -+++ b/kernel/locking/lockdep.c -@@ -3525,6 +3525,7 @@ static void check_flags(unsigned long fl - } - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * We dont accurately track softirq state in e.g. - * hardirq contexts (such as on 4KSTACKS), so only -@@ -3539,6 +3540,7 @@ static void check_flags(unsigned long fl - DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); - } - } -+#endif - - if (!debug_locks) - print_irqtrace_events(current); diff --git a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch deleted file mode 100644 index be66315e6..000000000 --- a/debian/patches/features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch +++ /dev/null @@ -1,142 +0,0 @@ -From: Josh Cartwright -Date: Wed, 28 Jan 2015 13:08:45 -0600 -Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -"lockdep: Selftest: Only do hardirq context test for raw spinlock" -disabled the execution of certain tests with PREEMPT_RT_FULL, but did -not prevent the tests from still being defined. This leads to warnings -like: - - ./linux/lib/locking-selftest.c:574:1: warning: 'irqsafe1_hard_rlock_12' defined but not used [-Wunused-function] - ./linux/lib/locking-selftest.c:574:1: warning: 'irqsafe1_hard_rlock_21' defined but not used [-Wunused-function] - ./linux/lib/locking-selftest.c:577:1: warning: 'irqsafe1_hard_wlock_12' defined but not used [-Wunused-function] - ./linux/lib/locking-selftest.c:577:1: warning: 'irqsafe1_hard_wlock_21' defined but not used [-Wunused-function] - ./linux/lib/locking-selftest.c:580:1: warning: 'irqsafe1_soft_spin_12' defined but not used [-Wunused-function] - ... - -Fixed by wrapping the test definitions in #ifndef CONFIG_PREEMPT_RT_FULL -conditionals. - - -Signed-off-by: Josh Cartwright -Signed-off-by: Xander Huff -Acked-by: Gratian Crisan -Signed-off-by: Sebastian Andrzej Siewior ---- - lib/locking-selftest.c | 27 +++++++++++++++++++++++++++ - 1 file changed, 27 insertions(+) - ---- a/lib/locking-selftest.c -+++ b/lib/locking-selftest.c -@@ -590,6 +590,8 @@ GENERATE_TESTCASE(init_held_rsem) - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) - -@@ -605,9 +607,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_ - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Enabling hardirqs with a softirq-safe lock held: - */ -@@ -640,6 +645,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A - #undef E1 - #undef E2 - -+#endif -+ - /* - * Enabling irqs with an irq-safe lock held: - */ -@@ -663,6 +670,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) - -@@ -678,6 +687,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - -@@ -709,6 +720,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) - -@@ -724,6 +737,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_ - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - #undef E3 -@@ -757,6 +772,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_ - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) - -@@ -772,10 +789,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_ - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - #undef E3 - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * read-lock / write-lock irq inversion. - * -@@ -838,6 +859,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inver - #undef E2 - #undef E3 - -+#endif -+ -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * read-lock / write-lock recursion that is actually safe. - */ -@@ -876,6 +901,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_ - #undef E2 - #undef E3 - -+#endif -+ - /* - * read-lock / write-lock recursion that is unsafe. - */ diff --git a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch deleted file mode 100644 index 41e626c80..000000000 --- a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch +++ /dev/null @@ -1,57 +0,0 @@ -Subject: lockdep: selftest: Only do hardirq context test for raw spinlock -From: Yong Zhang -Date: Mon, 16 Apr 2012 15:01:56 +0800 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -From: Yong Zhang - -On -rt there is no softirq context any more and rwlock is sleepable, -disable softirq context test and rwlock+irq test. - -Signed-off-by: Yong Zhang -Cc: Yong Zhang -Link: http://lkml.kernel.org/r/1334559716-18447-3-git-send-email-yong.zhang0@gmail.com -Signed-off-by: Thomas Gleixner ---- - lib/locking-selftest.c | 23 +++++++++++++++++++++++ - 1 file changed, 23 insertions(+) - ---- a/lib/locking-selftest.c -+++ b/lib/locking-selftest.c -@@ -1858,6 +1858,7 @@ void locking_selftest(void) - - printk(" --------------------------------------------------------------------------\n"); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * irq-context testcases: - */ -@@ -1870,6 +1871,28 @@ void locking_selftest(void) - - DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); - // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); -+#else -+ /* On -rt, we only do hardirq context test for raw spinlock */ -+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); -+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); -+ -+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); -+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); -+ -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); -+ -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); -+#endif - - ww_tests(); - diff --git a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch deleted file mode 100644 index 698ec90d1..000000000 --- a/debian/patches/features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: "Wolfgang M. Reimer" -Date: Tue, 21 Jul 2015 16:20:07 +0200 -Subject: locking: locktorture: Do NOT include rwlock.h directly -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Including rwlock.h directly will cause kernel builds to fail -if CONFIG_PREEMPT_RT_FULL is defined. The correct header file -(rwlock_rt.h OR rwlock.h) will be included by spinlock.h which -is included by locktorture.c anyway. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Wolfgang M. Reimer -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/locktorture.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/kernel/locking/locktorture.c -+++ b/kernel/locking/locktorture.c -@@ -26,7 +26,6 @@ - #include - #include - #include --#include - #include - #include - #include diff --git a/debian/patches/features/all/rt/md-disable-bcache.patch b/debian/patches/features/all/rt/md-disable-bcache.patch deleted file mode 100644 index c5b7585e1..000000000 --- a/debian/patches/features/all/rt/md-disable-bcache.patch +++ /dev/null @@ -1,32 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 29 Aug 2013 11:48:57 +0200 -Subject: md: disable bcache -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -It uses anon semaphores -|drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’: -|drivers/md/bcache/request.c:1007:2: error: implicit declaration of function ‘up_read_non_owner’ [-Werror=implicit-function-declaration] -| up_read_non_owner(&dc->writeback_lock); -| ^ -|drivers/md/bcache/request.c: In function ‘request_write’: -|drivers/md/bcache/request.c:1033:2: error: implicit declaration of function ‘down_read_non_owner’ [-Werror=implicit-function-declaration] -| down_read_non_owner(&dc->writeback_lock); -| ^ - -either we get rid of those or we have to introduce them… - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/md/bcache/Kconfig | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/md/bcache/Kconfig -+++ b/drivers/md/bcache/Kconfig -@@ -1,6 +1,7 @@ - - config BCACHE - tristate "Block device as cache" -+ depends on !PREEMPT_RT_FULL - ---help--- - Allows a block device to be used as cache for other devices; uses - a btree for indexing and the layout is optimized for SSDs. diff --git a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch deleted file mode 100644 index d99ce29a4..000000000 --- a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch +++ /dev/null @@ -1,62 +0,0 @@ -From: Thomas Gleixner -Date: Tue, 6 Apr 2010 16:51:31 +0200 -Subject: md: raid5: Make raid5_percpu handling RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -__raid_run_ops() disables preemption with get_cpu() around the access -to the raid5_percpu variables. That causes scheduling while atomic -spews on RT. - -Serialize the access to the percpu data with a lock and keep the code -preemptible. - -Reported-by: Udo van den Heuvel -Signed-off-by: Thomas Gleixner -Tested-by: Udo van den Heuvel - ---- - drivers/md/raid5.c | 7 +++++-- - drivers/md/raid5.h | 1 + - 2 files changed, 6 insertions(+), 2 deletions(-) - ---- a/drivers/md/raid5.c -+++ b/drivers/md/raid5.c -@@ -1929,8 +1929,9 @@ static void raid_run_ops(struct stripe_h - struct raid5_percpu *percpu; - unsigned long cpu; - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - percpu = per_cpu_ptr(conf->percpu, cpu); -+ spin_lock(&percpu->lock); - if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { - ops_run_biofill(sh); - overlap_clear++; -@@ -1986,7 +1987,8 @@ static void raid_run_ops(struct stripe_h - if (test_and_clear_bit(R5_Overlap, &dev->flags)) - wake_up(&sh->raid_conf->wait_for_overlap); - } -- put_cpu(); -+ spin_unlock(&percpu->lock); -+ put_cpu_light(); - } - - static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) -@@ -6411,6 +6413,7 @@ static int raid5_alloc_percpu(struct r5c - __func__, cpu); - break; - } -+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); - } - put_online_cpus(); - ---- a/drivers/md/raid5.h -+++ b/drivers/md/raid5.h -@@ -504,6 +504,7 @@ struct r5conf { - int recovery_disabled; - /* per cpu variables */ - struct raid5_percpu { -+ spinlock_t lock; /* Protection for -RT */ - struct page *spare_page; /* Used when checking P/Q in raid6 */ - struct flex_array *scribble; /* space for constructing buffer - * lists and performing address diff --git a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch deleted file mode 100644 index c08c0ed78..000000000 --- a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch +++ /dev/null @@ -1,23 +0,0 @@ -Subject: mips: Disable highmem on RT -From: Thomas Gleixner -Date: Mon, 18 Jul 2011 17:10:12 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The current highmem handling on -RT is not compatible and needs fixups. - -Signed-off-by: Thomas Gleixner ---- - arch/mips/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/mips/Kconfig -+++ b/arch/mips/Kconfig -@@ -2409,7 +2409,7 @@ config CPU_R4400_WORKAROUNDS - # - config HIGHMEM - bool "High Memory Support" -- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA -+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL - - config CPU_SUPPORTS_HIGHMEM - bool diff --git a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch deleted file mode 100644 index 6fb98cc54..000000000 --- a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch +++ /dev/null @@ -1,28 +0,0 @@ -Subject: mm: bounce: Use local_irq_save_nort -From: Thomas Gleixner -Date: Wed, 09 Jan 2013 10:33:09 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -kmap_atomic() is preemptible on RT. - -Signed-off-by: Thomas Gleixner ---- - block/bounce.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/block/bounce.c -+++ b/block/bounce.c -@@ -55,11 +55,11 @@ static void bounce_copy_vec(struct bio_v - unsigned long flags; - unsigned char *vto; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - vto = kmap_atomic(to->bv_page); - memcpy(vto + to->bv_offset, vfrom, to->bv_len); - kunmap_atomic(vto); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - - #else /* CONFIG_HIGHMEM */ diff --git a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch deleted file mode 100644 index 3e700ea7e..000000000 --- a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch +++ /dev/null @@ -1,135 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:51 -0500 -Subject: mm/swap: Convert to percpu locked -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Replace global locks (get_cpu + local_irq_save) with "local_locks()". -Currently there is one of for "rotate" and one for "swap". - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - mm/swap.c | 34 ++++++++++++++++++++-------------- - 1 file changed, 20 insertions(+), 14 deletions(-) - ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -46,6 +47,9 @@ static DEFINE_PER_CPU(struct pagevec, lr - static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); - static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); - -+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); -+static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); -+ - /* - * This path almost never happens for VM activity - pages are normally - * freed via pagevecs. But it gets used by networking. -@@ -481,11 +485,11 @@ void rotate_reclaimable_page(struct page - unsigned long flags; - - page_cache_get(page); -- local_irq_save(flags); -+ local_lock_irqsave(rotate_lock, flags); - pvec = this_cpu_ptr(&lru_rotate_pvecs); - if (!pagevec_add(pvec, page)) - pagevec_move_tail(pvec); -- local_irq_restore(flags); -+ local_unlock_irqrestore(rotate_lock, flags); - } - } - -@@ -536,12 +540,13 @@ static bool need_activate_page_drain(int - void activate_page(struct page *page) - { - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { -- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, -+ activate_page_pvecs); - - page_cache_get(page); - if (!pagevec_add(pvec, page)) - pagevec_lru_move_fn(pvec, __activate_page, NULL); -- put_cpu_var(activate_page_pvecs); -+ put_locked_var(swapvec_lock, activate_page_pvecs); - } - } - -@@ -567,7 +572,7 @@ void activate_page(struct page *page) - - static void __lru_cache_activate_page(struct page *page) - { -- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); - int i; - - /* -@@ -589,7 +594,7 @@ static void __lru_cache_activate_page(st - } - } - -- put_cpu_var(lru_add_pvec); -+ put_locked_var(swapvec_lock, lru_add_pvec); - } - - /* -@@ -630,13 +635,13 @@ EXPORT_SYMBOL(mark_page_accessed); - - static void __lru_cache_add(struct page *page) - { -- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); - - page_cache_get(page); - if (!pagevec_space(pvec)) - __pagevec_lru_add(pvec); - pagevec_add(pvec, page); -- put_cpu_var(lru_add_pvec); -+ put_locked_var(swapvec_lock, lru_add_pvec); - } - - /** -@@ -816,9 +821,9 @@ void lru_add_drain_cpu(int cpu) - unsigned long flags; - - /* No harm done if a racing interrupt already did this */ -- local_irq_save(flags); -+ local_lock_irqsave(rotate_lock, flags); - pagevec_move_tail(pvec); -- local_irq_restore(flags); -+ local_unlock_irqrestore(rotate_lock, flags); - } - - pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); -@@ -846,18 +851,19 @@ void deactivate_file_page(struct page *p - return; - - if (likely(get_page_unless_zero(page))) { -- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, -+ lru_deactivate_file_pvecs); - - if (!pagevec_add(pvec, page)) - pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); -- put_cpu_var(lru_deactivate_file_pvecs); -+ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); - } - } - - void lru_add_drain(void) - { -- lru_add_drain_cpu(get_cpu()); -- put_cpu(); -+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); -+ local_unlock_cpu(swapvec_lock); - } - - static void lru_add_drain_per_cpu(struct work_struct *dummy) diff --git a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch b/debian/patches/features/all/rt/mm-disable-sloub-rt.patch deleted file mode 100644 index adaea51a3..000000000 --- a/debian/patches/features/all/rt/mm-disable-sloub-rt.patch +++ /dev/null @@ -1,32 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:44:03 -0500 -Subject: mm: Allow only slub on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - init/Kconfig | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1719,6 +1719,7 @@ choice - - config SLAB - bool "SLAB" -+ depends on !PREEMPT_RT_FULL - help - The regular slab allocator that is established and known to work - well in all environments. It organizes cache hot objects in -@@ -1737,6 +1738,7 @@ config SLUB - config SLOB - depends on EXPERT - bool "SLOB (Simple Allocator)" -+ depends on !PREEMPT_RT_FULL - help - SLOB replaces the stock allocator with a drastically simpler - allocator. SLOB is generally more space efficient but diff --git a/debian/patches/features/all/rt/mm-enable-slub.patch b/debian/patches/features/all/rt/mm-enable-slub.patch deleted file mode 100644 index 00fed7e15..000000000 --- a/debian/patches/features/all/rt/mm-enable-slub.patch +++ /dev/null @@ -1,428 +0,0 @@ -Subject: mm: Enable SLUB for RT -From: Thomas Gleixner -Date: Thu, 25 Oct 2012 10:32:35 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Make SLUB RT aware by converting locks to raw and using free lists to -move the freeing out of the lock held region. - -Signed-off-by: Thomas Gleixner ---- - mm/slab.h | 4 + - mm/slub.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++-------------- - 2 files changed, 102 insertions(+), 27 deletions(-) - ---- a/mm/slab.h -+++ b/mm/slab.h -@@ -324,7 +324,11 @@ static inline struct kmem_cache *cache_f - * The slab lists for all objects. - */ - struct kmem_cache_node { -+#ifdef CONFIG_SLUB -+ raw_spinlock_t list_lock; -+#else - spinlock_t list_lock; -+#endif - - #ifdef CONFIG_SLAB - struct list_head slabs_partial; /* partial list first, better asm code */ ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -1075,7 +1075,7 @@ static noinline struct kmem_cache_node * - void *object = head; - int cnt = 0; - -- spin_lock_irqsave(&n->list_lock, *flags); -+ raw_spin_lock_irqsave(&n->list_lock, *flags); - slab_lock(page); - - if (!check_slab(s, page)) -@@ -1136,7 +1136,7 @@ static noinline struct kmem_cache_node * - - fail: - slab_unlock(page); -- spin_unlock_irqrestore(&n->list_lock, *flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, *flags); - slab_fix(s, "Object at 0x%p not freed", object); - return NULL; - } -@@ -1263,6 +1263,12 @@ static inline void dec_slabs_node(struct - - #endif /* CONFIG_SLUB_DEBUG */ - -+struct slub_free_list { -+ raw_spinlock_t lock; -+ struct list_head list; -+}; -+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); -+ - /* - * Hooks for other subsystems that check memory allocations. In a typical - * production configuration these hooks all should produce no code at all. -@@ -1402,7 +1408,11 @@ static struct page *allocate_slab(struct - - flags &= gfp_allowed_mask; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (system_state == SYSTEM_RUNNING) -+#else - if (gfpflags_allow_blocking(flags)) -+#endif - local_irq_enable(); - - flags |= s->allocflags; -@@ -1473,7 +1483,11 @@ static struct page *allocate_slab(struct - page->frozen = 1; - - out: -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (system_state == SYSTEM_RUNNING) -+#else - if (gfpflags_allow_blocking(flags)) -+#endif - local_irq_disable(); - if (!page) - return NULL; -@@ -1529,6 +1543,16 @@ static void __free_slab(struct kmem_cach - __free_kmem_pages(page, order); - } - -+static void free_delayed(struct list_head *h) -+{ -+ while(!list_empty(h)) { -+ struct page *page = list_first_entry(h, struct page, lru); -+ -+ list_del(&page->lru); -+ __free_slab(page->slab_cache, page); -+ } -+} -+ - #define need_reserve_slab_rcu \ - (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) - -@@ -1560,6 +1584,12 @@ static void free_slab(struct kmem_cache - } - - call_rcu(head, rcu_free_slab); -+ } else if (irqs_disabled()) { -+ struct slub_free_list *f = this_cpu_ptr(&slub_free_list); -+ -+ raw_spin_lock(&f->lock); -+ list_add(&page->lru, &f->list); -+ raw_spin_unlock(&f->lock); - } else - __free_slab(s, page); - } -@@ -1673,7 +1703,7 @@ static void *get_partial_node(struct kme - if (!n || !n->nr_partial) - return NULL; - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - list_for_each_entry_safe(page, page2, &n->partial, lru) { - void *t; - -@@ -1698,7 +1728,7 @@ static void *get_partial_node(struct kme - break; - - } -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - return object; - } - -@@ -1944,7 +1974,7 @@ static void deactivate_slab(struct kmem_ - * that acquire_slab() will see a slab page that - * is frozen - */ -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - } else { - m = M_FULL; -@@ -1955,7 +1985,7 @@ static void deactivate_slab(struct kmem_ - * slabs from diagnostic functions will not see - * any frozen slabs. - */ -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - } - -@@ -1990,7 +2020,7 @@ static void deactivate_slab(struct kmem_ - goto redo; - - if (lock) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - if (m == M_FREE) { - stat(s, DEACTIVATE_EMPTY); -@@ -2022,10 +2052,10 @@ static void unfreeze_partials(struct kme - n2 = get_node(s, page_to_nid(page)); - if (n != n2) { - if (n) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - n = n2; -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - - do { -@@ -2054,7 +2084,7 @@ static void unfreeze_partials(struct kme - } - - if (n) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - while (discard_page) { - page = discard_page; -@@ -2093,14 +2123,21 @@ static void put_cpu_partial(struct kmem_ - pobjects = oldpage->pobjects; - pages = oldpage->pages; - if (drain && pobjects > s->cpu_partial) { -+ struct slub_free_list *f; - unsigned long flags; -+ LIST_HEAD(tofree); - /* - * partial array is full. Move the existing - * set to the per node partial list. - */ - local_irq_save(flags); - unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); -+ f = this_cpu_ptr(&slub_free_list); -+ raw_spin_lock(&f->lock); -+ list_splice_init(&f->list, &tofree); -+ raw_spin_unlock(&f->lock); - local_irq_restore(flags); -+ free_delayed(&tofree); - oldpage = NULL; - pobjects = 0; - pages = 0; -@@ -2172,7 +2209,22 @@ static bool has_cpu_slab(int cpu, void * - - static void flush_all(struct kmem_cache *s) - { -+ LIST_HEAD(tofree); -+ int cpu; -+ - on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); -+ for_each_online_cpu(cpu) { -+ struct slub_free_list *f; -+ -+ if (!has_cpu_slab(cpu, s)) -+ continue; -+ -+ f = &per_cpu(slub_free_list, cpu); -+ raw_spin_lock_irq(&f->lock); -+ list_splice_init(&f->list, &tofree); -+ raw_spin_unlock_irq(&f->lock); -+ free_delayed(&tofree); -+ } - } - - /* -@@ -2208,10 +2260,10 @@ static unsigned long count_partial(struc - unsigned long x = 0; - struct page *page; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += get_count(page); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return x; - } - #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ -@@ -2349,8 +2401,10 @@ static inline void *get_freelist(struct - * already disabled (which is the case for bulk allocation). - */ - static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, -- unsigned long addr, struct kmem_cache_cpu *c) -+ unsigned long addr, struct kmem_cache_cpu *c, -+ struct list_head *to_free) - { -+ struct slub_free_list *f; - void *freelist; - struct page *page; - -@@ -2410,6 +2464,13 @@ static void *___slab_alloc(struct kmem_c - VM_BUG_ON(!c->page->frozen); - c->freelist = get_freepointer(s, freelist); - c->tid = next_tid(c->tid); -+ -+out: -+ f = this_cpu_ptr(&slub_free_list); -+ raw_spin_lock(&f->lock); -+ list_splice_init(&f->list, to_free); -+ raw_spin_unlock(&f->lock); -+ - return freelist; - - new_slab: -@@ -2441,7 +2502,7 @@ static void *___slab_alloc(struct kmem_c - deactivate_slab(s, page, get_freepointer(s, freelist)); - c->page = NULL; - c->freelist = NULL; -- return freelist; -+ goto out; - } - - /* -@@ -2453,6 +2514,7 @@ static void *__slab_alloc(struct kmem_ca - { - void *p; - unsigned long flags; -+ LIST_HEAD(tofree); - - local_irq_save(flags); - #ifdef CONFIG_PREEMPT -@@ -2464,8 +2526,9 @@ static void *__slab_alloc(struct kmem_ca - c = this_cpu_ptr(s->cpu_slab); - #endif - -- p = ___slab_alloc(s, gfpflags, node, addr, c); -+ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); - local_irq_restore(flags); -+ free_delayed(&tofree); - return p; - } - -@@ -2652,7 +2715,7 @@ static void __slab_free(struct kmem_cach - - do { - if (unlikely(n)) { -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - n = NULL; - } - prior = page->freelist; -@@ -2684,7 +2747,7 @@ static void __slab_free(struct kmem_cach - * Otherwise the list_lock will synchronize with - * other processors updating the list of slabs. - */ -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - } - } -@@ -2726,7 +2789,7 @@ static void __slab_free(struct kmem_cach - add_partial(n, page, DEACTIVATE_TO_TAIL); - stat(s, FREE_ADD_PARTIAL); - } -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return; - - slab_empty: -@@ -2741,7 +2804,7 @@ static void __slab_free(struct kmem_cach - remove_full(s, n, page); - } - -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - stat(s, FREE_SLAB); - discard_slab(s, page); - } -@@ -2913,6 +2976,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca - void **p) - { - struct kmem_cache_cpu *c; -+ LIST_HEAD(to_free); - int i; - - /* memcg and kmem_cache debug support */ -@@ -2936,7 +3000,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca - * of re-populating per CPU c->freelist - */ - p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, -- _RET_IP_, c); -+ _RET_IP_, c, &to_free); - if (unlikely(!p[i])) - goto error; - -@@ -2948,6 +3012,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca - } - c->tid = next_tid(c->tid); - local_irq_enable(); -+ free_delayed(&to_free); - - /* Clear memory outside IRQ disabled fastpath loop */ - if (unlikely(flags & __GFP_ZERO)) { -@@ -3095,7 +3160,7 @@ static void - init_kmem_cache_node(struct kmem_cache_node *n) - { - n->nr_partial = 0; -- spin_lock_init(&n->list_lock); -+ raw_spin_lock_init(&n->list_lock); - INIT_LIST_HEAD(&n->partial); - #ifdef CONFIG_SLUB_DEBUG - atomic_long_set(&n->nr_slabs, 0); -@@ -3677,7 +3742,7 @@ int __kmem_cache_shrink(struct kmem_cach - for (i = 0; i < SHRINK_PROMOTE_MAX; i++) - INIT_LIST_HEAD(promote + i); - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - /* - * Build lists of slabs to discard or promote. -@@ -3708,7 +3773,7 @@ int __kmem_cache_shrink(struct kmem_cach - for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) - list_splice(promote + i, &n->partial); - -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - - /* Release empty slabs */ - list_for_each_entry_safe(page, t, &discard, lru) -@@ -3884,6 +3949,12 @@ void __init kmem_cache_init(void) - { - static __initdata struct kmem_cache boot_kmem_cache, - boot_kmem_cache_node; -+ int cpu; -+ -+ for_each_possible_cpu(cpu) { -+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); -+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); -+ } - - if (debug_guardpage_minorder()) - slub_max_order = 0; -@@ -4127,7 +4198,7 @@ static int validate_slab_node(struct kme - struct page *page; - unsigned long flags; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - list_for_each_entry(page, &n->partial, lru) { - validate_slab_slab(s, page, map); -@@ -4149,7 +4220,7 @@ static int validate_slab_node(struct kme - s->name, count, atomic_long_read(&n->nr_slabs)); - - out: -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return count; - } - -@@ -4337,12 +4408,12 @@ static int list_locations(struct kmem_ca - if (!atomic_long_read(&n->nr_slabs)) - continue; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - process_slab(&t, s, page, alloc, map); - list_for_each_entry(page, &n->full, lru) - process_slab(&t, s, page, alloc, map); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - } - - for (i = 0; i < t.count; i++) { diff --git a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch deleted file mode 100644 index 23754b29c..000000000 --- a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch +++ /dev/null @@ -1,89 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:30:13 -0500 -Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Disable preemption on -RT for the vmstat code. On vanila the code runs in -IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the -same ressources is not updated in parallel due to preemption. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - include/linux/vmstat.h | 4 ++++ - mm/vmstat.c | 6 ++++++ - 2 files changed, 10 insertions(+) - ---- a/include/linux/vmstat.h -+++ b/include/linux/vmstat.h -@@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, v - */ - static inline void __count_vm_event(enum vm_event_item item) - { -+ preempt_disable_rt(); - raw_cpu_inc(vm_event_states.event[item]); -+ preempt_enable_rt(); - } - - static inline void count_vm_event(enum vm_event_item item) -@@ -43,7 +45,9 @@ static inline void count_vm_event(enum v - - static inline void __count_vm_events(enum vm_event_item item, long delta) - { -+ preempt_disable_rt(); - raw_cpu_add(vm_event_states.event[item], delta); -+ preempt_enable_rt(); - } - - static inline void count_vm_events(enum vm_event_item item, long delta) ---- a/mm/vmstat.c -+++ b/mm/vmstat.c -@@ -226,6 +226,7 @@ void __mod_zone_page_state(struct zone * - long x; - long t; - -+ preempt_disable_rt(); - x = delta + __this_cpu_read(*p); - - t = __this_cpu_read(pcp->stat_threshold); -@@ -235,6 +236,7 @@ void __mod_zone_page_state(struct zone * - x = 0; - } - __this_cpu_write(*p, x); -+ preempt_enable_rt(); - } - EXPORT_SYMBOL(__mod_zone_page_state); - -@@ -267,6 +269,7 @@ void __inc_zone_state(struct zone *zone, - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; - -+ preempt_disable_rt(); - v = __this_cpu_inc_return(*p); - t = __this_cpu_read(pcp->stat_threshold); - if (unlikely(v > t)) { -@@ -275,6 +278,7 @@ void __inc_zone_state(struct zone *zone, - zone_page_state_add(v + overstep, zone, item); - __this_cpu_write(*p, -overstep); - } -+ preempt_enable_rt(); - } - - void __inc_zone_page_state(struct page *page, enum zone_stat_item item) -@@ -289,6 +293,7 @@ void __dec_zone_state(struct zone *zone, - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; - -+ preempt_disable_rt(); - v = __this_cpu_dec_return(*p); - t = __this_cpu_read(pcp->stat_threshold); - if (unlikely(v < - t)) { -@@ -297,6 +302,7 @@ void __dec_zone_state(struct zone *zone, - zone_page_state_add(v - overstep, zone, item); - __this_cpu_write(*p, overstep); - } -+ preempt_enable_rt(); - } - - void __dec_zone_page_state(struct page *page, enum zone_stat_item item) diff --git a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch deleted file mode 100644 index c671d67b2..000000000 --- a/debian/patches/features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch +++ /dev/null @@ -1,69 +0,0 @@ -From: Yang Shi -Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context -Date: Wed, 30 Oct 2013 11:48:33 -0700 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The following trace is triggered when running ltp oom test cases: - -BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 -in_atomic(): 1, irqs_disabled(): 0, pid: 17188, name: oom03 -Preemption disabled at:[] mem_cgroup_reclaim+0x90/0xe0 - -CPU: 2 PID: 17188 Comm: oom03 Not tainted 3.10.10-rt3 #2 -Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 -ffff88007684d730 ffff880070df9b58 ffffffff8169918d ffff880070df9b70 -ffffffff8106db31 ffff88007688b4a0 ffff880070df9b88 ffffffff8169d9c0 -ffff88007688b4a0 ffff880070df9bc8 ffffffff81059da1 0000000170df9bb0 -Call Trace: -[] dump_stack+0x19/0x1b -[] __might_sleep+0xf1/0x170 -[] rt_spin_lock+0x20/0x50 -[] queue_work_on+0x61/0x100 -[] drain_all_stock+0xe1/0x1c0 -[] mem_cgroup_reclaim+0x90/0xe0 -[] __mem_cgroup_try_charge+0x41a/0xc40 -[] ? release_pages+0x1b1/0x1f0 -[] ? sched_exec+0x40/0xb0 -[] mem_cgroup_charge_common+0x37/0x70 -[] mem_cgroup_newpage_charge+0x26/0x30 -[] handle_pte_fault+0x618/0x840 -[] ? unpin_current_cpu+0x16/0x70 -[] ? migrate_enable+0xd4/0x200 -[] handle_mm_fault+0x145/0x1e0 -[] __do_page_fault+0x1a1/0x4c0 -[] ? preempt_schedule_irq+0x4b/0x70 -[] ? retint_kernel+0x37/0x40 -[] do_page_fault+0xe/0x10 -[] page_fault+0x22/0x30 - -So, to prevent schedule_work_on from being called in preempt disabled context, -replace the pair of get/put_cpu() to get/put_cpu_light(). - - -Signed-off-by: Yang Shi -Signed-off-by: Sebastian Andrzej Siewior ---- - - mm/memcontrol.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -1957,7 +1957,7 @@ static void drain_all_stock(struct mem_c - return; - /* Notify other cpus that system-wide "drain" is running */ - get_online_cpus(); -- curcpu = get_cpu(); -+ curcpu = get_cpu_light(); - for_each_online_cpu(cpu) { - struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); - struct mem_cgroup *memcg; -@@ -1974,7 +1974,7 @@ static void drain_all_stock(struct mem_c - schedule_work_on(cpu, &stock->work); - } - } -- put_cpu(); -+ put_cpu_light(); - put_online_cpus(); - mutex_unlock(&percpu_charge_mutex); - } diff --git a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch b/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch deleted file mode 100644 index ac84808d5..000000000 --- a/debian/patches/features/all/rt/mm-memcontrol-do_not_disable_irq.patch +++ /dev/null @@ -1,140 +0,0 @@ -From: Sebastian Andrzej Siewior -Subject: mm/memcontrol: Replace local_irq_disable with local locks -Date: Wed, 28 Jan 2015 17:14:16 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -There are a few local_irq_disable() which then take sleeping locks. This -patch converts them local locks. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/swap.h | 1 + - mm/compaction.c | 6 ++++-- - mm/memcontrol.c | 20 ++++++++++++++------ - mm/swap.c | 2 +- - 4 files changed, 20 insertions(+), 9 deletions(-) - ---- a/include/linux/swap.h -+++ b/include/linux/swap.h -@@ -298,6 +298,7 @@ extern unsigned long nr_free_pagecache_p - - - /* linux/mm/swap.c */ -+DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); - extern void lru_cache_add(struct page *); - extern void lru_cache_add_anon(struct page *page); - extern void lru_cache_add_file(struct page *page); ---- a/mm/compaction.c -+++ b/mm/compaction.c -@@ -1443,10 +1443,12 @@ static int compact_zone(struct zone *zon - cc->migrate_pfn & ~((1UL << cc->order) - 1); - - if (cc->last_migrated_pfn < current_block_start) { -- cpu = get_cpu(); -+ cpu = get_cpu_light(); -+ local_lock_irq(swapvec_lock); - lru_add_drain_cpu(cpu); -+ local_unlock_irq(swapvec_lock); - drain_local_pages(zone); -- put_cpu(); -+ put_cpu_light(); - /* No more flushing until we migrate again */ - cc->last_migrated_pfn = 0; - } ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -67,6 +67,8 @@ - #include - #include - #include -+#include -+ - #include "slab.h" - - #include -@@ -87,6 +89,7 @@ int do_swap_account __read_mostly; - #define do_swap_account 0 - #endif - -+static DEFINE_LOCAL_IRQ_LOCK(event_lock); - static const char * const mem_cgroup_stat_names[] = { - "cache", - "rss", -@@ -4615,12 +4618,12 @@ static int mem_cgroup_move_account(struc - - ret = 0; - -- local_irq_disable(); -+ local_lock_irq(event_lock); - mem_cgroup_charge_statistics(to, page, nr_pages); - memcg_check_events(to, page); - mem_cgroup_charge_statistics(from, page, -nr_pages); - memcg_check_events(from, page); -- local_irq_enable(); -+ local_unlock_irq(event_lock); - out_unlock: - unlock_page(page); - out: -@@ -5373,10 +5376,10 @@ void mem_cgroup_commit_charge(struct pag - VM_BUG_ON_PAGE(!PageTransHuge(page), page); - } - -- local_irq_disable(); -+ local_lock_irq(event_lock); - mem_cgroup_charge_statistics(memcg, page, nr_pages); - memcg_check_events(memcg, page); -- local_irq_enable(); -+ local_unlock_irq(event_lock); - - if (do_swap_account && PageSwapCache(page)) { - swp_entry_t entry = { .val = page_private(page) }; -@@ -5432,14 +5435,14 @@ static void uncharge_batch(struct mem_cg - memcg_oom_recover(memcg); - } - -- local_irq_save(flags); -+ local_lock_irqsave(event_lock, flags); - __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); - __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); - __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); - __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); - __this_cpu_add(memcg->stat->nr_page_events, nr_pages); - memcg_check_events(memcg, dummy_page); -- local_irq_restore(flags); -+ local_unlock_irqrestore(event_lock, flags); - - if (!mem_cgroup_is_root(memcg)) - css_put_many(&memcg->css, nr_pages); -@@ -5631,6 +5634,7 @@ void mem_cgroup_swapout(struct page *pag - { - struct mem_cgroup *memcg; - unsigned short oldid; -+ unsigned long flags; - - VM_BUG_ON_PAGE(PageLRU(page), page); - VM_BUG_ON_PAGE(page_count(page), page); -@@ -5659,9 +5663,13 @@ void mem_cgroup_swapout(struct page *pag - * important here to have the interrupts disabled because it is the - * only synchronisation we have for udpating the per-CPU variables. - */ -+ local_lock_irqsave(event_lock, flags); -+#ifndef CONFIG_PREEMPT_RT_BASE - VM_BUG_ON(!irqs_disabled()); -+#endif - mem_cgroup_charge_statistics(memcg, page, -1); - memcg_check_events(memcg, page); -+ local_unlock_irqrestore(event_lock, flags); - } - - /** ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -48,7 +48,7 @@ static DEFINE_PER_CPU(struct pagevec, lr - static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); - - static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); --static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); -+DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); - - /* - * This path almost never happens for VM activity - pages are normally diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch deleted file mode 100644 index 2ce219499..000000000 --- a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch +++ /dev/null @@ -1,28 +0,0 @@ -Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock -From: Thomas Gleixner -Date: Thu, 27 Sep 2012 11:11:46 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The plain spinlock while sufficient does not update the local_lock -internals. Use a proper local_lock function instead to ease debugging. - -Signed-off-by: Thomas Gleixner - ---- - mm/page_alloc.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -269,9 +269,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock); - - #ifdef CONFIG_PREEMPT_RT_BASE - # define cpu_lock_irqsave(cpu, flags) \ -- spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) -+ local_lock_irqsave_on(pa_lock, flags, cpu) - # define cpu_unlock_irqrestore(cpu, flags) \ -- spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) -+ local_unlock_irqrestore_on(pa_lock, flags, cpu) - #else - # define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) - # define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) diff --git a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch deleted file mode 100644 index 26679507b..000000000 --- a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch +++ /dev/null @@ -1,198 +0,0 @@ -From: Peter Zijlstra -Date: Fri Jul 3 08:44:37 2009 -0500 -Subject: mm: page_alloc: Reduce lock sections further -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Split out the pages which are to be freed into a separate list and -call free_pages_bulk() outside of the percpu page allocator locks. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Thomas Gleixner ---- - mm/page_alloc.c | 89 +++++++++++++++++++++++++++++++++++++++----------------- - 1 file changed, 63 insertions(+), 26 deletions(-) - ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -777,7 +777,7 @@ static inline int free_pages_check(struc - } - - /* -- * Frees a number of pages from the PCP lists -+ * Frees a number of pages which have been collected from the pcp lists. - * Assumes all pages on list are in same zone, and of same order. - * count is the number of pages to free. - * -@@ -788,18 +788,53 @@ static inline int free_pages_check(struc - * pinned" detection logic. - */ - static void free_pcppages_bulk(struct zone *zone, int count, -- struct per_cpu_pages *pcp) -+ struct list_head *list) - { -- int migratetype = 0; -- int batch_free = 0; - int to_free = count; - unsigned long nr_scanned; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&zone->lock, flags); - -- spin_lock(&zone->lock); - nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); - if (nr_scanned) - __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); - -+ while (!list_empty(list)) { -+ struct page *page = list_first_entry(list, struct page, lru); -+ int mt; /* migratetype of the to-be-freed page */ -+ -+ /* must delete as __free_one_page list manipulates */ -+ list_del(&page->lru); -+ -+ mt = get_pcppage_migratetype(page); -+ /* MIGRATE_ISOLATE page should not go to pcplists */ -+ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); -+ /* Pageblock could have been isolated meanwhile */ -+ if (unlikely(has_isolate_pageblock(zone))) -+ mt = get_pageblock_migratetype(page); -+ -+ __free_one_page(page, page_to_pfn(page), zone, 0, mt); -+ trace_mm_page_pcpu_drain(page, 0, mt); -+ to_free--; -+ } -+ WARN_ON(to_free != 0); -+ spin_unlock_irqrestore(&zone->lock, flags); -+} -+ -+/* -+ * Moves a number of pages from the PCP lists to free list which -+ * is freed outside of the locked region. -+ * -+ * Assumes all pages on list are in same zone, and of same order. -+ * count is the number of pages to free. -+ */ -+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, -+ struct list_head *dst) -+{ -+ int migratetype = 0; -+ int batch_free = 0; -+ - while (to_free) { - struct page *page; - struct list_head *list; -@@ -815,7 +850,7 @@ static void free_pcppages_bulk(struct zo - batch_free++; - if (++migratetype == MIGRATE_PCPTYPES) - migratetype = 0; -- list = &pcp->lists[migratetype]; -+ list = &src->lists[migratetype]; - } while (list_empty(list)); - - /* This is the only non-empty list. Free them all. */ -@@ -823,24 +858,12 @@ static void free_pcppages_bulk(struct zo - batch_free = to_free; - - do { -- int mt; /* migratetype of the to-be-freed page */ -- -- page = list_entry(list->prev, struct page, lru); -- /* must delete as __free_one_page list manipulates */ -+ page = list_last_entry(list, struct page, lru); - list_del(&page->lru); - -- mt = get_pcppage_migratetype(page); -- /* MIGRATE_ISOLATE page should not go to pcplists */ -- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); -- /* Pageblock could have been isolated meanwhile */ -- if (unlikely(has_isolate_pageblock(zone))) -- mt = get_pageblock_migratetype(page); -- -- __free_one_page(page, page_to_pfn(page), zone, 0, mt); -- trace_mm_page_pcpu_drain(page, 0, mt); -+ list_add(&page->lru, dst); - } while (--to_free && --batch_free && !list_empty(list)); - } -- spin_unlock(&zone->lock); - } - - static void free_one_page(struct zone *zone, -@@ -849,7 +872,9 @@ static void free_one_page(struct zone *z - int migratetype) - { - unsigned long nr_scanned; -- spin_lock(&zone->lock); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&zone->lock, flags); - nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); - if (nr_scanned) - __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); -@@ -859,7 +884,7 @@ static void free_one_page(struct zone *z - migratetype = get_pfnblock_migratetype(page, pfn); - } - __free_one_page(page, pfn, zone, order, migratetype); -- spin_unlock(&zone->lock); -+ spin_unlock_irqrestore(&zone->lock, flags); - } - - static int free_tail_pages_check(struct page *head_page, struct page *page) -@@ -1870,16 +1895,18 @@ static int rmqueue_bulk(struct zone *zon - void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) - { - unsigned long flags; -+ LIST_HEAD(dst); - int to_drain, batch; - - local_lock_irqsave(pa_lock, flags); - batch = READ_ONCE(pcp->batch); - to_drain = min(pcp->count, batch); - if (to_drain > 0) { -- free_pcppages_bulk(zone, to_drain, pcp); -+ isolate_pcp_pages(to_drain, pcp, &dst); - pcp->count -= to_drain; - } - local_unlock_irqrestore(pa_lock, flags); -+ free_pcppages_bulk(zone, to_drain, &dst); - } - #endif - -@@ -1895,16 +1922,21 @@ static void drain_pages_zone(unsigned in - unsigned long flags; - struct per_cpu_pageset *pset; - struct per_cpu_pages *pcp; -+ LIST_HEAD(dst); -+ int count; - - cpu_lock_irqsave(cpu, flags); - pset = per_cpu_ptr(zone->pageset, cpu); - - pcp = &pset->pcp; -- if (pcp->count) { -- free_pcppages_bulk(zone, pcp->count, pcp); -+ count = pcp->count; -+ if (count) { -+ isolate_pcp_pages(count, pcp, &dst); - pcp->count = 0; - } - cpu_unlock_irqrestore(cpu, flags); -+ if (count) -+ free_pcppages_bulk(zone, count, &dst); - } - - /* -@@ -2082,8 +2114,13 @@ void free_hot_cold_page(struct page *pag - pcp->count++; - if (pcp->count >= pcp->high) { - unsigned long batch = READ_ONCE(pcp->batch); -- free_pcppages_bulk(zone, batch, pcp); -+ LIST_HEAD(dst); -+ -+ isolate_pcp_pages(batch, pcp, &dst); - pcp->count -= batch; -+ local_unlock_irqrestore(pa_lock, flags); -+ free_pcppages_bulk(zone, batch, &dst); -+ return; - } - - out: diff --git a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch deleted file mode 100644 index 6af2d2868..000000000 --- a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ /dev/null @@ -1,207 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:37 -0500 -Subject: mm: page_alloc: rt-friendly per-cpu pages -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -rt-friendly per-cpu pages: convert the irqs-off per-cpu locking -method into a preemptible, explicit-per-cpu-locks method. - -Contains fixes from: - Peter Zijlstra - Thomas Gleixner - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - mm/page_alloc.c | 57 ++++++++++++++++++++++++++++++++++++++++---------------- - 1 file changed, 41 insertions(+), 16 deletions(-) - ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -60,6 +60,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -264,6 +265,18 @@ EXPORT_SYMBOL(nr_node_ids); - EXPORT_SYMBOL(nr_online_nodes); - #endif - -+static DEFINE_LOCAL_IRQ_LOCK(pa_lock); -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define cpu_lock_irqsave(cpu, flags) \ -+ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) -+# define cpu_unlock_irqrestore(cpu, flags) \ -+ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) -+#else -+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) -+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) -+#endif -+ - int page_group_by_mobility_disabled __read_mostly; - - #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -@@ -997,10 +1010,10 @@ static void __free_pages_ok(struct page - return; - - migratetype = get_pfnblock_migratetype(page, pfn); -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - __count_vm_events(PGFREE, 1 << order); - free_one_page(page_zone(page), page, pfn, order, migratetype); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - - static void __init __free_pages_boot_core(struct page *page, -@@ -1859,14 +1872,14 @@ void drain_zone_pages(struct zone *zone, - unsigned long flags; - int to_drain, batch; - -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - batch = READ_ONCE(pcp->batch); - to_drain = min(pcp->count, batch); - if (to_drain > 0) { - free_pcppages_bulk(zone, to_drain, pcp); - pcp->count -= to_drain; - } -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - #endif - -@@ -1883,7 +1896,7 @@ static void drain_pages_zone(unsigned in - struct per_cpu_pageset *pset; - struct per_cpu_pages *pcp; - -- local_irq_save(flags); -+ cpu_lock_irqsave(cpu, flags); - pset = per_cpu_ptr(zone->pageset, cpu); - - pcp = &pset->pcp; -@@ -1891,7 +1904,7 @@ static void drain_pages_zone(unsigned in - free_pcppages_bulk(zone, pcp->count, pcp); - pcp->count = 0; - } -- local_irq_restore(flags); -+ cpu_unlock_irqrestore(cpu, flags); - } - - /* -@@ -1977,8 +1990,17 @@ void drain_all_pages(struct zone *zone) - else - cpumask_clear_cpu(cpu, &cpus_with_pcps); - } -+#ifndef CONFIG_PREEMPT_RT_BASE - on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, - zone, 1); -+#else -+ for_each_cpu(cpu, &cpus_with_pcps) { -+ if (zone) -+ drain_pages_zone(cpu, zone); -+ else -+ drain_pages(cpu); -+ } -+#endif - } - - #ifdef CONFIG_HIBERNATION -@@ -2034,7 +2056,7 @@ void free_hot_cold_page(struct page *pag - - migratetype = get_pfnblock_migratetype(page, pfn); - set_pcppage_migratetype(page, migratetype); -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - __count_vm_event(PGFREE); - - /* -@@ -2065,7 +2087,7 @@ void free_hot_cold_page(struct page *pag - } - - out: -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - - /* -@@ -2200,7 +2222,7 @@ struct page *buffered_rmqueue(struct zon - struct per_cpu_pages *pcp; - struct list_head *list; - -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - pcp = &this_cpu_ptr(zone->pageset)->pcp; - list = &pcp->lists[migratetype]; - if (list_empty(list)) { -@@ -2232,7 +2254,7 @@ struct page *buffered_rmqueue(struct zon - */ - WARN_ON_ONCE(order > 1); - } -- spin_lock_irqsave(&zone->lock, flags); -+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); - - page = NULL; - if (alloc_flags & ALLOC_HARDER) { -@@ -2242,11 +2264,13 @@ struct page *buffered_rmqueue(struct zon - } - if (!page) - page = __rmqueue(zone, order, migratetype, gfp_flags); -- spin_unlock(&zone->lock); -- if (!page) -+ if (!page) { -+ spin_unlock(&zone->lock); - goto failed; -+ } - __mod_zone_freepage_state(zone, -(1 << order), - get_pcppage_migratetype(page)); -+ spin_unlock(&zone->lock); - } - - __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); -@@ -2256,13 +2280,13 @@ struct page *buffered_rmqueue(struct zon - - __count_zone_vm_events(PGALLOC, zone, 1 << order); - zone_statistics(preferred_zone, zone, gfp_flags); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - - VM_BUG_ON_PAGE(bad_range(zone, page), page); - return page; - - failed: -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - return NULL; - } - -@@ -5928,6 +5952,7 @@ static int page_alloc_cpu_notify(struct - void __init page_alloc_init(void) - { - hotcpu_notifier(page_alloc_cpu_notify, 0); -+ local_irq_lock_init(pa_lock); - } - - /* -@@ -6822,7 +6847,7 @@ void zone_pcp_reset(struct zone *zone) - struct per_cpu_pageset *pset; - - /* avoid races with drain_pages() */ -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - if (zone->pageset != &boot_pageset) { - for_each_online_cpu(cpu) { - pset = per_cpu_ptr(zone->pageset, cpu); -@@ -6831,7 +6856,7 @@ void zone_pcp_reset(struct zone *zone) - free_percpu(zone->pageset); - zone->pageset = &boot_pageset; - } -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - - #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch deleted file mode 100644 index fc4cb7f67..000000000 --- a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch +++ /dev/null @@ -1,72 +0,0 @@ -From: Yong Zhang -Date: Tue, 15 May 2012 13:53:56 +0800 -Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -User preempt_*_rt instead of local_irq_*_rt or otherwise there will be -warning on ARM like below: - -WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264() -Modules linked in: -[] (unwind_backtrace+0x0/0xe4) from [] (warn_slowpath_common+0x4c/0x64) -[] (warn_slowpath_common+0x4c/0x64) from [] (warn_slowpath_null+0x18/0x1c) -[] (warn_slowpath_null+0x18/0x1c) from [](smp_call_function_many+0x98/0x264) -[] (smp_call_function_many+0x98/0x264) from [] (smp_call_function+0x44/0x6c) -[] (smp_call_function+0x44/0x6c) from [] (__new_context+0xbc/0x124) -[] (__new_context+0xbc/0x124) from [] (flush_old_exec+0x460/0x5e4) -[] (flush_old_exec+0x460/0x5e4) from [] (load_elf_binary+0x2e0/0x11ac) -[] (load_elf_binary+0x2e0/0x11ac) from [] (search_binary_handler+0x94/0x2a4) -[] (search_binary_handler+0x94/0x2a4) from [] (do_execve+0x254/0x364) -[] (do_execve+0x254/0x364) from [] (sys_execve+0x34/0x54) -[] (sys_execve+0x34/0x54) from [] (ret_fast_syscall+0x0/0x30) ----[ end trace 0000000000000002 ]--- - -The reason is that ARM need irq enabled when doing activate_mm(). -According to mm-protect-activate-switch-mm.patch, actually -preempt_[disable|enable]_rt() is sufficient. - -Inspired-by: Steven Rostedt -Signed-off-by: Yong Zhang -Cc: Steven Rostedt -Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com -Signed-off-by: Thomas Gleixner ---- - fs/exec.c | 2 ++ - mm/mmu_context.c | 2 ++ - 2 files changed, 4 insertions(+) - ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -865,12 +865,14 @@ static int exec_mmap(struct mm_struct *m - } - } - task_lock(tsk); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - tsk->mm = mm; - tsk->active_mm = mm; - activate_mm(active_mm, mm); - tsk->mm->vmacache_seqnum = 0; - vmacache_flush(tsk); -+ preempt_enable_rt(); - task_unlock(tsk); - if (old_mm) { - up_read(&old_mm->mmap_sem); ---- a/mm/mmu_context.c -+++ b/mm/mmu_context.c -@@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm) - struct task_struct *tsk = current; - - task_lock(tsk); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - if (active_mm != mm) { - atomic_inc(&mm->mm_count); -@@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm) - } - tsk->mm = mm; - switch_mm(active_mm, mm, tsk); -+ preempt_enable_rt(); - task_unlock(tsk); - #ifdef finish_arch_post_lock_switch - finish_arch_post_lock_switch(); diff --git a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch deleted file mode 100644 index 5a953cd7a..000000000 --- a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch +++ /dev/null @@ -1,289 +0,0 @@ -Subject: mm, rt: kmap_atomic scheduling -From: Peter Zijlstra -Date: Thu, 28 Jul 2011 10:43:51 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -In fact, with migrate_disable() existing one could play games with -kmap_atomic. You could save/restore the kmap_atomic slots on context -switch (if there are any in use of course), this should be esp easy now -that we have a kmap_atomic stack. - -Something like the below.. it wants replacing all the preempt_disable() -stuff with pagefault_disable() && migrate_disable() of course, but then -you can flip kmaps around like below. - -Signed-off-by: Peter Zijlstra -[dvhart@linux.intel.com: build fix] -Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins - -[tglx@linutronix.de: Get rid of the per cpu variable and store the idx - and the pte content right away in the task struct. - Shortens the context switch code. ] ---- - arch/x86/kernel/process_32.c | 32 ++++++++++++++++++++++++++++++++ - arch/x86/mm/highmem_32.c | 13 ++++++++++--- - arch/x86/mm/iomap_32.c | 9 ++++++++- - include/linux/highmem.h | 27 +++++++++++++++++++++++---- - include/linux/sched.h | 7 +++++++ - include/linux/uaccess.h | 2 ++ - mm/highmem.c | 6 ++++-- - 7 files changed, 86 insertions(+), 10 deletions(-) - ---- a/arch/x86/kernel/process_32.c -+++ b/arch/x86/kernel/process_32.c -@@ -35,6 +35,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -210,6 +211,35 @@ start_thread(struct pt_regs *regs, unsig - } - EXPORT_SYMBOL_GPL(start_thread); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ int i; -+ -+ /* -+ * Clear @prev's kmap_atomic mappings -+ */ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ pte_t *ptep = kmap_pte - idx; -+ -+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); -+ } -+ /* -+ * Restore @next_p's kmap_atomic mappings -+ */ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ -+ if (!pte_none(next_p->kmap_pte[i])) -+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); -+ } -+} -+#else -+static inline void -+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -+#endif -+ - - /* - * switch_to(x,y) should switch tasks from x to y. -@@ -286,6 +316,8 @@ EXPORT_SYMBOL_GPL(start_thread); - task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) - __switch_to_xtra(prev_p, next_p, tss); - -+ switch_kmaps(prev_p, next_p); -+ - /* - * Leave lazy mode, flushing any hypercalls made here. - * This must be done before restoring TLS segments so ---- a/arch/x86/mm/highmem_32.c -+++ b/arch/x86/mm/highmem_32.c -@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap); - */ - void *kmap_atomic_prot(struct page *page, pgprot_t prot) - { -+ pte_t pte = mk_pte(page, prot); - unsigned long vaddr; - int idx, type; - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - - if (!PageHighMem(page)) -@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte-idx))); -- set_pte(kmap_pte-idx, mk_pte(page, prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_pte(kmap_pte-idx, pte); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr) - * is a bad idea also, in case the page changes cacheability - * attributes or becomes a protected page in a hypervisor. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - kpte_clear_flush(kmap_pte-idx, vaddr); - kmap_atomic_idx_pop(); - arch_flush_lazy_mmu_mode(); -@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr) - #endif - - pagefault_enable(); -- preempt_enable(); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(__kunmap_atomic); - ---- a/arch/x86/mm/iomap_32.c -+++ b/arch/x86/mm/iomap_32.c -@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free); - - void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) - { -+ pte_t pte = pfn_pte(pfn, prot); - unsigned long vaddr; - int idx, type; - -@@ -65,7 +66,10 @@ void *kmap_atomic_prot_pfn(unsigned long - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_pte(kmap_pte - idx, pte); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -@@ -113,6 +117,9 @@ iounmap_atomic(void __iomem *kvaddr) - * is a bad idea also, in case the page changes cacheability - * attributes or becomes a protected page in a hypervisor. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - kpte_clear_flush(kmap_pte-idx, vaddr); - kmap_atomic_idx_pop(); - } ---- a/include/linux/highmem.h -+++ b/include/linux/highmem.h -@@ -86,32 +86,51 @@ static inline void __kunmap_atomic(void - - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - -+#ifndef CONFIG_PREEMPT_RT_FULL - DECLARE_PER_CPU(int, __kmap_atomic_idx); -+#endif - - static inline int kmap_atomic_idx_push(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; - --#ifdef CONFIG_DEBUG_HIGHMEM -+# ifdef CONFIG_DEBUG_HIGHMEM - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - BUG_ON(idx >= KM_TYPE_NR); --#endif -+# endif - return idx; -+#else -+ current->kmap_idx++; -+ BUG_ON(current->kmap_idx > KM_TYPE_NR); -+ return current->kmap_idx - 1; -+#endif - } - - static inline int kmap_atomic_idx(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - return __this_cpu_read(__kmap_atomic_idx) - 1; -+#else -+ return current->kmap_idx - 1; -+#endif - } - - static inline void kmap_atomic_idx_pop(void) - { --#ifdef CONFIG_DEBUG_HIGHMEM -+#ifndef CONFIG_PREEMPT_RT_FULL -+# ifdef CONFIG_DEBUG_HIGHMEM - int idx = __this_cpu_dec_return(__kmap_atomic_idx); - - BUG_ON(idx < 0); --#else -+# else - __this_cpu_dec(__kmap_atomic_idx); -+# endif -+#else -+ current->kmap_idx--; -+# ifdef CONFIG_DEBUG_HIGHMEM -+ BUG_ON(current->kmap_idx < 0); -+# endif - #endif - } - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -26,6 +26,7 @@ struct sched_param { - #include - #include - #include -+#include - - #include - #include -@@ -1849,6 +1850,12 @@ struct task_struct { - int softirq_nestcnt; - unsigned int softirqs_raised; - #endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 -+ int kmap_idx; -+ pte_t kmap_pte[KM_TYPE_NR]; -+# endif -+#endif - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; - #endif ---- a/include/linux/uaccess.h -+++ b/include/linux/uaccess.h -@@ -24,6 +24,7 @@ static __always_inline void pagefault_di - */ - static inline void pagefault_disable(void) - { -+ migrate_disable(); - pagefault_disabled_inc(); - /* - * make sure to have issued the store before a pagefault -@@ -40,6 +41,7 @@ static inline void pagefault_enable(void - */ - barrier(); - pagefault_disabled_dec(); -+ migrate_enable(); - } - - /* ---- a/mm/highmem.c -+++ b/mm/highmem.c -@@ -29,10 +29,11 @@ - #include - #include - -- -+#ifndef CONFIG_PREEMPT_RT_FULL - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - DEFINE_PER_CPU(int, __kmap_atomic_idx); - #endif -+#endif - - /* - * Virtual_count is not a pure "count". -@@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkm - unsigned long totalhigh_pages __read_mostly; - EXPORT_SYMBOL(totalhigh_pages); - -- -+#ifndef CONFIG_PREEMPT_RT_FULL - EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); -+#endif - - unsigned int nr_free_highpages (void) - { diff --git a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch deleted file mode 100644 index eafe1fa4c..000000000 --- a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch +++ /dev/null @@ -1,44 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 3 Jul 2009 08:44:34 -0500 -Subject: mm/scatterlist: Do not disable irqs on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The local_irq_save() is not only used to get things done "fast" but -also to ensure that in case of SG_MITER_ATOMIC we are in "atomic" -context for kmap_atomic(). For -RT it is enough to keep pagefault -disabled (which is currently handled by kmap_atomic()). - -Signed-off-by: Thomas Gleixner ---- - lib/scatterlist.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/lib/scatterlist.c -+++ b/lib/scatterlist.c -@@ -620,7 +620,7 @@ void sg_miter_stop(struct sg_mapping_ite - flush_kernel_dcache_page(miter->page); - - if (miter->__flags & SG_MITER_ATOMIC) { -- WARN_ON_ONCE(preemptible()); -+ WARN_ON_ONCE(!pagefault_disabled()); - kunmap_atomic(miter->addr); - } else - kunmap(miter->page); -@@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist - if (!sg_miter_skip(&miter, skip)) - return false; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - while (sg_miter_next(&miter) && offset < buflen) { - unsigned int len; -@@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist - - sg_miter_stop(&miter); - -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return offset; - } - EXPORT_SYMBOL(sg_copy_buffer); diff --git a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch deleted file mode 100644 index 7a66138bf..000000000 --- a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch +++ /dev/null @@ -1,66 +0,0 @@ -Subject: mm/vmalloc: Another preempt disable region which sucks -From: Thomas Gleixner -Date: Tue, 12 Jul 2011 11:39:36 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Avoid the preempt disable version of get_cpu_var(). The inner-lock should -provide enough serialisation. - -Signed-off-by: Thomas Gleixner ---- - mm/vmalloc.c | 13 ++++++++----- - 1 file changed, 8 insertions(+), 5 deletions(-) - ---- a/mm/vmalloc.c -+++ b/mm/vmalloc.c -@@ -821,7 +821,7 @@ static void *new_vmap_block(unsigned int - struct vmap_block *vb; - struct vmap_area *va; - unsigned long vb_idx; -- int node, err; -+ int node, err, cpu; - void *vaddr; - - node = numa_node_id(); -@@ -864,11 +864,12 @@ static void *new_vmap_block(unsigned int - BUG_ON(err); - radix_tree_preload_end(); - -- vbq = &get_cpu_var(vmap_block_queue); -+ cpu = get_cpu_light(); -+ vbq = this_cpu_ptr(&vmap_block_queue); - spin_lock(&vbq->lock); - list_add_tail_rcu(&vb->free_list, &vbq->free); - spin_unlock(&vbq->lock); -- put_cpu_var(vmap_block_queue); -+ put_cpu_light(); - - return vaddr; - } -@@ -937,6 +938,7 @@ static void *vb_alloc(unsigned long size - struct vmap_block *vb; - void *vaddr = NULL; - unsigned int order; -+ int cpu; - - BUG_ON(offset_in_page(size)); - BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); -@@ -951,7 +953,8 @@ static void *vb_alloc(unsigned long size - order = get_order(size); - - rcu_read_lock(); -- vbq = &get_cpu_var(vmap_block_queue); -+ cpu = get_cpu_light(); -+ vbq = this_cpu_ptr(&vmap_block_queue); - list_for_each_entry_rcu(vb, &vbq->free, free_list) { - unsigned long pages_off; - -@@ -974,7 +977,7 @@ static void *vb_alloc(unsigned long size - break; - } - -- put_cpu_var(vmap_block_queue); -+ put_cpu_light(); - rcu_read_unlock(); - - /* Allocate new block if nothing was found */ diff --git a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch deleted file mode 100644 index 70f0361a0..000000000 --- a/debian/patches/features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch +++ /dev/null @@ -1,151 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 29 Jan 2015 17:19:44 +0100 -Subject: mm/workingset: Do not protect workingset_shadow_nodes with irq off -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -workingset_shadow_nodes is protected by local_irq_disable(). Some users -use spin_lock_irq(). -Replace the irq/on with a local_lock(). Rename workingset_shadow_nodes -so I catch users of it which will be introduced later. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/swap.h | 4 +++- - mm/filemap.c | 11 ++++++++--- - mm/truncate.c | 7 +++++-- - mm/workingset.c | 23 ++++++++++++----------- - 4 files changed, 28 insertions(+), 17 deletions(-) - ---- a/include/linux/swap.h -+++ b/include/linux/swap.h -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - - struct notifier_block; -@@ -252,7 +253,8 @@ struct swap_info_struct { - void *workingset_eviction(struct address_space *mapping, struct page *page); - bool workingset_refault(void *shadow); - void workingset_activation(struct page *page); --extern struct list_lru workingset_shadow_nodes; -+extern struct list_lru __workingset_shadow_nodes; -+DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock); - - static inline unsigned int workingset_node_pages(struct radix_tree_node *node) - { ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -168,7 +168,9 @@ static void page_cache_tree_delete(struc - if (!workingset_node_pages(node) && - list_empty(&node->private_list)) { - node->private_data = mapping; -- list_lru_add(&workingset_shadow_nodes, &node->private_list); -+ local_lock(workingset_shadow_lock); -+ list_lru_add(&__workingset_shadow_nodes, &node->private_list); -+ local_unlock(workingset_shadow_lock); - } - } - -@@ -597,9 +599,12 @@ static int page_cache_tree_insert(struct - * node->private_list is protected by - * mapping->tree_lock. - */ -- if (!list_empty(&node->private_list)) -- list_lru_del(&workingset_shadow_nodes, -+ if (!list_empty(&node->private_list)) { -+ local_lock(workingset_shadow_lock); -+ list_lru_del(&__workingset_shadow_nodes, - &node->private_list); -+ local_unlock(workingset_shadow_lock); -+ } - } - return 0; - } ---- a/mm/truncate.c -+++ b/mm/truncate.c -@@ -56,8 +56,11 @@ static void clear_exceptional_entry(stru - * protected by mapping->tree_lock. - */ - if (!workingset_node_shadows(node) && -- !list_empty(&node->private_list)) -- list_lru_del(&workingset_shadow_nodes, &node->private_list); -+ !list_empty(&node->private_list)) { -+ local_lock(workingset_shadow_lock); -+ list_lru_del(&__workingset_shadow_nodes, &node->private_list); -+ local_unlock(workingset_shadow_lock); -+ } - __radix_tree_delete_node(&mapping->page_tree, node); - unlock: - spin_unlock_irq(&mapping->tree_lock); ---- a/mm/workingset.c -+++ b/mm/workingset.c -@@ -264,7 +264,8 @@ void workingset_activation(struct page * - * point where they would still be useful. - */ - --struct list_lru workingset_shadow_nodes; -+struct list_lru __workingset_shadow_nodes; -+DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock); - - static unsigned long count_shadow_nodes(struct shrinker *shrinker, - struct shrink_control *sc) -@@ -274,9 +275,9 @@ static unsigned long count_shadow_nodes( - unsigned long pages; - - /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ -- local_irq_disable(); -- shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); -- local_irq_enable(); -+ local_lock_irq(workingset_shadow_lock); -+ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc); -+ local_unlock_irq(workingset_shadow_lock); - - pages = node_present_pages(sc->nid); - /* -@@ -363,9 +364,9 @@ static enum lru_status shadow_lru_isolat - spin_unlock(&mapping->tree_lock); - ret = LRU_REMOVED_RETRY; - out: -- local_irq_enable(); -+ local_unlock_irq(workingset_shadow_lock); - cond_resched(); -- local_irq_disable(); -+ local_lock_irq(workingset_shadow_lock); - spin_lock(lru_lock); - return ret; - } -@@ -376,10 +377,10 @@ static unsigned long scan_shadow_nodes(s - unsigned long ret; - - /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ -- local_irq_disable(); -- ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc, -+ local_lock_irq(workingset_shadow_lock); -+ ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc, - shadow_lru_isolate, NULL); -- local_irq_enable(); -+ local_unlock_irq(workingset_shadow_lock); - return ret; - } - -@@ -400,7 +401,7 @@ static int __init workingset_init(void) - { - int ret; - -- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); -+ ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key); - if (ret) - goto err; - ret = register_shrinker(&workingset_shadow_shrinker); -@@ -408,7 +409,7 @@ static int __init workingset_init(void) - goto err_list_lru; - return 0; - err_list_lru: -- list_lru_destroy(&workingset_shadow_nodes); -+ list_lru_destroy(&__workingset_shadow_nodes); - err: - return ret; - } diff --git a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch deleted file mode 100644 index 3d4490cb7..000000000 --- a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch +++ /dev/null @@ -1,40 +0,0 @@ -Subject: mmci: Remove bogus local_irq_save() -From: Thomas Gleixner -Date: Wed, 09 Jan 2013 12:11:12 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On !RT interrupt runs with interrupts disabled. On RT it's in a -thread, so no need to disable interrupts at all. - -Signed-off-by: Thomas Gleixner ---- - drivers/mmc/host/mmci.c | 5 ----- - 1 file changed, 5 deletions(-) - ---- a/drivers/mmc/host/mmci.c -+++ b/drivers/mmc/host/mmci.c -@@ -1155,15 +1155,12 @@ static irqreturn_t mmci_pio_irq(int irq, - struct sg_mapping_iter *sg_miter = &host->sg_miter; - struct variant_data *variant = host->variant; - void __iomem *base = host->base; -- unsigned long flags; - u32 status; - - status = readl(base + MMCISTATUS); - - dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); - -- local_irq_save(flags); -- - do { - unsigned int remain, len; - char *buffer; -@@ -1203,8 +1200,6 @@ static irqreturn_t mmci_pio_irq(int irq, - - sg_miter_stop(sg_miter); - -- local_irq_restore(flags); -- - /* - * If we have less than the fifo 'half-full' threshold to transfer, - * trigger a PIO interrupt as soon as any data is available. diff --git a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch deleted file mode 100644 index 764691e87..000000000 --- a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch +++ /dev/null @@ -1,89 +0,0 @@ -Date: Wed, 26 Jun 2013 15:28:11 -0400 -From: Steven Rostedt -Subject: rt,ntp: Move call to schedule_delayed_work() to helper thread -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The ntp code for notify_cmos_timer() is called from a hard interrupt -context. schedule_delayed_work() under PREEMPT_RT_FULL calls spinlocks -that have been converted to mutexes, thus calling schedule_delayed_work() -from interrupt is not safe. - -Add a helper thread that does the call to schedule_delayed_work and wake -up that thread instead of calling schedule_delayed_work() directly. -This is only for CONFIG_PREEMPT_RT_FULL, otherwise the code still calls -schedule_delayed_work() directly in irq context. - -Note: There's a few places in the kernel that do this. Perhaps the RT -code should have a dedicated thread that does the checks. Just register -a notifier on boot up for your check and wake up the thread when -needed. This will be a todo. - -Signed-off-by: Steven Rostedt - ---- - kernel/time/ntp.c | 43 +++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 43 insertions(+) - ---- a/kernel/time/ntp.c -+++ b/kernel/time/ntp.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -562,10 +563,52 @@ static void sync_cmos_clock(struct work_ - &sync_cmos_work, timespec64_to_jiffies(&next)); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * RT can not call schedule_delayed_work from real interrupt context. -+ * Need to make a thread to do the real work. -+ */ -+static struct task_struct *cmos_delay_thread; -+static bool do_cmos_delay; -+ -+static int run_cmos_delay(void *ignore) -+{ -+ while (!kthread_should_stop()) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ if (do_cmos_delay) { -+ do_cmos_delay = false; -+ queue_delayed_work(system_power_efficient_wq, -+ &sync_cmos_work, 0); -+ } -+ schedule(); -+ } -+ __set_current_state(TASK_RUNNING); -+ return 0; -+} -+ -+void ntp_notify_cmos_timer(void) -+{ -+ do_cmos_delay = true; -+ /* Make visible before waking up process */ -+ smp_wmb(); -+ wake_up_process(cmos_delay_thread); -+} -+ -+static __init int create_cmos_delay_thread(void) -+{ -+ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd"); -+ BUG_ON(!cmos_delay_thread); -+ return 0; -+} -+early_initcall(create_cmos_delay_thread); -+ -+#else -+ - void ntp_notify_cmos_timer(void) - { - queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); - } -+#endif /* CONFIG_PREEMPT_RT_FULL */ - - #else - void ntp_notify_cmos_timer(void) { } diff --git a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch deleted file mode 100644 index a3760000c..000000000 --- a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch +++ /dev/null @@ -1,29 +0,0 @@ -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:51:45 +0200 -Subject: locking: Disable spin on owner for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Drop spin on owner for mutex / rwsem. We are most likely not using it -but… - -Signed-off-by: Thomas Gleixner ---- - kernel/Kconfig.locks | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/kernel/Kconfig.locks -+++ b/kernel/Kconfig.locks -@@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW - - config MUTEX_SPIN_ON_OWNER - def_bool y -- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW -+ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL - - config RWSEM_SPIN_ON_OWNER - def_bool y -- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW -+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL - - config LOCK_SPIN_ON_OWNER - def_bool y diff --git a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch deleted file mode 100644 index 2bfde666a..000000000 --- a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch +++ /dev/null @@ -1,59 +0,0 @@ -From: Thomas Gleixner -Date: Wed, 26 Sep 2012 16:21:08 +0200 -Subject: net: Another local_irq_disable/kmalloc headache -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Replace it by a local lock. Though that's pretty inefficient :( - -Signed-off-by: Thomas Gleixner ---- - net/core/skbuff.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -63,6 +63,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -349,6 +350,7 @@ EXPORT_SYMBOL(build_skb); - - static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); - static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache); -+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); - - static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -@@ -356,10 +358,10 @@ static void *__netdev_alloc_frag(unsigne - unsigned long flags; - void *data; - -- local_irq_save(flags); -+ local_lock_irqsave(netdev_alloc_lock, flags); - nc = this_cpu_ptr(&netdev_alloc_cache); - data = __alloc_page_frag(nc, fragsz, gfp_mask); -- local_irq_restore(flags); -+ local_unlock_irqrestore(netdev_alloc_lock, flags); - return data; - } - -@@ -427,13 +429,13 @@ struct sk_buff *__netdev_alloc_skb(struc - if (sk_memalloc_socks()) - gfp_mask |= __GFP_MEMALLOC; - -- local_irq_save(flags); -+ local_lock_irqsave(netdev_alloc_lock, flags); - - nc = this_cpu_ptr(&netdev_alloc_cache); - data = __alloc_page_frag(nc, len, gfp_mask); - pfmemalloc = nc->pfmemalloc; - -- local_irq_restore(flags); -+ local_unlock_irqrestore(netdev_alloc_lock, flags); - - if (unlikely(!data)) - return NULL; diff --git a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch deleted file mode 100644 index b9f4f7705..000000000 --- a/debian/patches/features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch +++ /dev/null @@ -1,47 +0,0 @@ -Subject: net/core/cpuhotplug: Drain input_pkt_queue lockless -From: Grygorii Strashko -Date: Fri, 9 Oct 2015 09:25:49 -0500 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -I can constantly see below error report with 4.1 RT-kernel on TI ARM dra7-evm -if I'm trying to unplug cpu1: - -[ 57.737589] CPU1: shutdown -[ 57.767537] BUG: spinlock bad magic on CPU#0, sh/137 -[ 57.767546] lock: 0xee994730, .magic: 00000000, .owner: /-1, .owner_cpu: 0 -[ 57.767552] CPU: 0 PID: 137 Comm: sh Not tainted 4.1.10-rt8-01700-g2c38702-dirty #55 -[ 57.767555] Hardware name: Generic DRA74X (Flattened Device Tree) -[ 57.767568] [] (unwind_backtrace) from [] (show_stack+0x20/0x24) -[ 57.767579] [] (show_stack) from [] (dump_stack+0x84/0xa0) -[ 57.767593] [] (dump_stack) from [] (spin_dump+0x84/0xac) -[ 57.767603] [] (spin_dump) from [] (spin_bug+0x34/0x38) -[ 57.767614] [] (spin_bug) from [] (do_raw_spin_lock+0x168/0x1c0) -[ 57.767624] [] (do_raw_spin_lock) from [] (_raw_spin_lock+0x4c/0x54) -[ 57.767631] [] (_raw_spin_lock) from [] (rt_spin_lock_slowlock+0x5c/0x374) -[ 57.767638] [] (rt_spin_lock_slowlock) from [] (rt_spin_lock+0x38/0x70) -[ 57.767649] [] (rt_spin_lock) from [] (skb_dequeue+0x28/0x7c) -[ 57.767662] [] (skb_dequeue) from [] (dev_cpu_callback+0x1b8/0x240) -[ 57.767673] [] (dev_cpu_callback) from [] (notifier_call_chain+0x3c/0xb4) - -The reason is that skb_dequeue is taking skb->lock, but RT changed the -core code to use a raw spinlock. The non-raw lock is not initialized -on purpose to catch exactly this kind of problem. - -Fixes: 91df05da13a6 'net: Use skbufhead with raw lock' -Signed-off-by: Thomas Gleixner -Cc: stable-rt@vger.kernel.org ---- - net/core/dev.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -7468,7 +7468,7 @@ static int dev_cpu_callback(struct notif - netif_rx_ni(skb); - input_queue_head_incr(oldsd); - } -- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { -+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { - netif_rx_ni(skb); - input_queue_head_incr(oldsd); - } diff --git a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch deleted file mode 100644 index aa199ab70..000000000 --- a/debian/patches/features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch +++ /dev/null @@ -1,77 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 15 Jan 2016 16:33:34 +0100 -Subject: net/core: protect users of napi_alloc_cache against - reentrance -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On -RT the code running in BH can not be moved to another CPU so CPU -local variable remain local. However the code can be preempted -and another task may enter BH accessing the same CPU using the same -napi_alloc_cache variable. -This patch ensures that each user of napi_alloc_cache uses a local lock. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - net/core/skbuff.c | 18 ++++++++++++++---- - 1 file changed, 14 insertions(+), 4 deletions(-) - ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -351,6 +351,7 @@ EXPORT_SYMBOL(build_skb); - static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); - static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache); - static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); -+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); - - static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -@@ -380,9 +381,13 @@ EXPORT_SYMBOL(netdev_alloc_frag); - - static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); -+ struct page_frag_cache *nc; -+ void *data; - -- return __alloc_page_frag(nc, fragsz, gfp_mask); -+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); -+ data = __alloc_page_frag(nc, fragsz, gfp_mask); -+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); -+ return data; - } - - void *napi_alloc_frag(unsigned int fragsz) -@@ -476,9 +481,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb); - struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, - gfp_t gfp_mask) - { -- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); -+ struct page_frag_cache *nc; - struct sk_buff *skb; - void *data; -+ bool pfmemalloc; - - len += NET_SKB_PAD + NET_IP_ALIGN; - -@@ -496,7 +502,11 @@ struct sk_buff *__napi_alloc_skb(struct - if (sk_memalloc_socks()) - gfp_mask |= __GFP_MEMALLOC; - -+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); - data = __alloc_page_frag(nc, len, gfp_mask); -+ pfmemalloc = nc->pfmemalloc; -+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); -+ - if (unlikely(!data)) - return NULL; - -@@ -507,7 +517,7 @@ struct sk_buff *__napi_alloc_skb(struct - } - - /* use OR instead of assignment to avoid clearing of bits in mask */ -- if (nc->pfmemalloc) -+ if (pfmemalloc) - skb->pfmemalloc = 1; - skb->head_frag = 1; - diff --git a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch deleted file mode 100644 index d7f0e87a3..000000000 --- a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch +++ /dev/null @@ -1,74 +0,0 @@ -Subject: net: netfilter: Serialize xt_write_recseq sections on RT -From: Thomas Gleixner -Date: Sun, 28 Oct 2012 11:18:08 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The netfilter code relies only on the implicit semantics of -local_bh_disable() for serializing wt_write_recseq sections. RT breaks -that and needs explicit serialization here. - -Reported-by: Peter LaDow -Signed-off-by: Thomas Gleixner - ---- - include/linux/netfilter/x_tables.h | 7 +++++++ - net/netfilter/core.c | 6 ++++++ - 2 files changed, 13 insertions(+) - ---- a/include/linux/netfilter/x_tables.h -+++ b/include/linux/netfilter/x_tables.h -@@ -4,6 +4,7 @@ - - #include - #include -+#include - #include - - /** -@@ -282,6 +283,8 @@ void xt_free_table_info(struct xt_table_ - */ - DECLARE_PER_CPU(seqcount_t, xt_recseq); - -+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); -+ - /* xt_tee_enabled - true if x_tables needs to handle reentrancy - * - * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. -@@ -302,6 +305,9 @@ static inline unsigned int xt_write_recs - { - unsigned int addend; - -+ /* RT protection */ -+ local_lock(xt_write_lock); -+ - /* - * Low order bit of sequence is set if we already - * called xt_write_recseq_begin(). -@@ -332,6 +338,7 @@ static inline void xt_write_recseq_end(u - /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ - smp_wmb(); - __this_cpu_add(xt_recseq.sequence, addend); -+ local_unlock(xt_write_lock); - } - - /* ---- a/net/netfilter/core.c -+++ b/net/netfilter/core.c -@@ -22,11 +22,17 @@ - #include - #include - #include -+#include - #include - #include - - #include "nf_internals.h" - -+#ifdef CONFIG_PREEMPT_RT_BASE -+DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); -+EXPORT_PER_CPU_SYMBOL(xt_write_lock); -+#endif -+ - static DEFINE_MUTEX(afinfo_mutex); - - const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; diff --git a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch deleted file mode 100644 index c0785f184..000000000 --- a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch +++ /dev/null @@ -1,107 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 20 Mar 2013 18:06:20 +0100 -Subject: net: Add a mutex around devnet_rename_seq -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On RT write_seqcount_begin() disables preemption and device_rename() -allocates memory with GFP_KERNEL and grabs later the sysfs_mutex -mutex. Serialize with a mutex and add use the non preemption disabling -__write_seqcount_begin(). - -To avoid writer starvation, let the reader grab the mutex and release -it when it detects a writer in progress. This keeps the normal case -(no reader on the fly) fast. - -[ tglx: Instead of replacing the seqcount by a mutex, add the mutex ] - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Thomas Gleixner ---- - net/core/dev.c | 34 ++++++++++++++++++++-------------- - 1 file changed, 20 insertions(+), 14 deletions(-) - ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -186,6 +186,7 @@ static unsigned int napi_gen_id; - static DEFINE_HASHTABLE(napi_hash, 8); - - static seqcount_t devnet_rename_seq; -+static DEFINE_MUTEX(devnet_rename_mutex); - - static inline void dev_base_seq_inc(struct net *net) - { -@@ -884,7 +885,8 @@ int netdev_get_name(struct net *net, cha - strcpy(name, dev->name); - rcu_read_unlock(); - if (read_seqcount_retry(&devnet_rename_seq, seq)) { -- cond_resched(); -+ mutex_lock(&devnet_rename_mutex); -+ mutex_unlock(&devnet_rename_mutex); - goto retry; - } - -@@ -1153,20 +1155,17 @@ int dev_change_name(struct net_device *d - if (dev->flags & IFF_UP) - return -EBUSY; - -- write_seqcount_begin(&devnet_rename_seq); -+ mutex_lock(&devnet_rename_mutex); -+ __raw_write_seqcount_begin(&devnet_rename_seq); - -- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { -- write_seqcount_end(&devnet_rename_seq); -- return 0; -- } -+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) -+ goto outunlock; - - memcpy(oldname, dev->name, IFNAMSIZ); - - err = dev_get_valid_name(net, dev, newname); -- if (err < 0) { -- write_seqcount_end(&devnet_rename_seq); -- return err; -- } -+ if (err < 0) -+ goto outunlock; - - if (oldname[0] && !strchr(oldname, '%')) - netdev_info(dev, "renamed from %s\n", oldname); -@@ -1179,11 +1178,12 @@ int dev_change_name(struct net_device *d - if (ret) { - memcpy(dev->name, oldname, IFNAMSIZ); - dev->name_assign_type = old_assign_type; -- write_seqcount_end(&devnet_rename_seq); -- return ret; -+ err = ret; -+ goto outunlock; - } - -- write_seqcount_end(&devnet_rename_seq); -+ __raw_write_seqcount_end(&devnet_rename_seq); -+ mutex_unlock(&devnet_rename_mutex); - - netdev_adjacent_rename_links(dev, oldname); - -@@ -1204,7 +1204,8 @@ int dev_change_name(struct net_device *d - /* err >= 0 after dev_alloc_name() or stores the first errno */ - if (err >= 0) { - err = ret; -- write_seqcount_begin(&devnet_rename_seq); -+ mutex_lock(&devnet_rename_mutex); -+ __raw_write_seqcount_begin(&devnet_rename_seq); - memcpy(dev->name, oldname, IFNAMSIZ); - memcpy(oldname, newname, IFNAMSIZ); - dev->name_assign_type = old_assign_type; -@@ -1217,6 +1218,11 @@ int dev_change_name(struct net_device *d - } - - return err; -+ -+outunlock: -+ __raw_write_seqcount_end(&devnet_rename_seq); -+ mutex_unlock(&devnet_rename_mutex); -+ return err; - } - - /** diff --git a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch deleted file mode 100644 index 56094064d..000000000 --- a/debian/patches/features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ /dev/null @@ -1,126 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 13 Jan 2016 15:55:02 +0100 -Subject: net: move xmit_recursion to per-task variable on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -A softirq on -RT can be preempted. That means one task is in -__dev_queue_xmit(), gets preempted and another task may enter -__dev_queue_xmit() aw well. netperf together with a bridge device -will then trigger the `recursion alert` because each task increments -the xmit_recursion variable which is per-CPU. -A virtual device like br0 is required to trigger this warning. - -This patch moves the counter to per task instead per-CPU so it counts -the recursion properly on -RT. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/netdevice.h | 9 +++++++++ - include/linux/sched.h | 3 +++ - net/core/dev.c | 41 ++++++++++++++++++++++++++++++++++++++--- - 3 files changed, 50 insertions(+), 3 deletions(-) - ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -2249,11 +2249,20 @@ void netdev_freemem(struct net_device *d - void synchronize_net(void); - int init_dummy_netdev(struct net_device *dev); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline int dev_recursion_level(void) -+{ -+ return current->xmit_recursion; -+} -+ -+#else -+ - DECLARE_PER_CPU(int, xmit_recursion); - static inline int dev_recursion_level(void) - { - return this_cpu_read(xmit_recursion); - } -+#endif - - struct net_device *dev_get_by_index(struct net *net, int ifindex); - struct net_device *__dev_get_by_index(struct net *net, int ifindex); ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1852,6 +1852,9 @@ struct task_struct { - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; - #endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int xmit_recursion; -+#endif - int pagefault_disabled; - /* CPU-specific state of this task */ - struct thread_struct thread; ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -2945,9 +2945,44 @@ static void skb_update_prio(struct sk_bu - #define skb_update_prio(skb) - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+static inline int xmit_rec_read(void) -+{ -+ return current->xmit_recursion; -+} -+ -+static inline void xmit_rec_inc(void) -+{ -+ current->xmit_recursion++; -+} -+ -+static inline void xmit_rec_dec(void) -+{ -+ current->xmit_recursion--; -+} -+ -+#else -+ - DEFINE_PER_CPU(int, xmit_recursion); - EXPORT_SYMBOL(xmit_recursion); - -+static inline int xmit_rec_read(void) -+{ -+ return __this_cpu_read(xmit_recursion); -+} -+ -+static inline void xmit_rec_inc(void) -+{ -+ __this_cpu_inc(xmit_recursion); -+} -+ -+static inline void xmit_rec_dec(void) -+{ -+ __this_cpu_dec(xmit_recursion); -+} -+#endif -+ - #define RECURSION_LIMIT 10 - - /** -@@ -3140,7 +3175,7 @@ static int __dev_queue_xmit(struct sk_bu - - if (txq->xmit_lock_owner != cpu) { - -- if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) -+ if (xmit_rec_read() > RECURSION_LIMIT) - goto recursion_alert; - - skb = validate_xmit_skb(skb, dev); -@@ -3150,9 +3185,9 @@ static int __dev_queue_xmit(struct sk_bu - HARD_TX_LOCK(dev, txq, cpu); - - if (!netif_xmit_stopped(txq)) { -- __this_cpu_inc(xmit_recursion); -+ xmit_rec_inc(); - skb = dev_hard_start_xmit(skb, dev, txq, &rc); -- __this_cpu_dec(xmit_recursion); -+ xmit_rec_dec(); - if (dev_xmit_complete(rc)) { - HARD_TX_UNLOCK(dev, txq); - goto out; diff --git a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch b/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch deleted file mode 100644 index 7a97713bc..000000000 --- a/debian/patches/features/all/rt/net-prevent-abba-deadlock.patch +++ /dev/null @@ -1,112 +0,0 @@ -Subject: net-flip-lock-dep-thingy.patch -From: Thomas Gleixner -Date: Tue, 28 Jun 2011 10:59:58 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -======================================================= -[ INFO: possible circular locking dependency detected ] -3.0.0-rc3+ #26 -------------------------------------------------------- -ip/1104 is trying to acquire lock: - (local_softirq_lock){+.+...}, at: [] __local_lock+0x25/0x68 - -but task is already holding lock: - (sk_lock-AF_INET){+.+...}, at: [] lock_sock+0x10/0x12 - -which lock already depends on the new lock. - - -the existing dependency chain (in reverse order) is: - --> #1 (sk_lock-AF_INET){+.+...}: - [] lock_acquire+0x103/0x12e - [] lock_sock_nested+0x82/0x92 - [] lock_sock+0x10/0x12 - [] tcp_close+0x1b/0x355 - [] inet_release+0xc3/0xcd - [] sock_release+0x1f/0x74 - [] sock_close+0x27/0x2b - [] fput+0x11d/0x1e3 - [] filp_close+0x70/0x7b - [] sys_close+0xf8/0x13d - [] system_call_fastpath+0x16/0x1b - --> #0 (local_softirq_lock){+.+...}: - [] __lock_acquire+0xacc/0xdc8 - [] lock_acquire+0x103/0x12e - [] _raw_spin_lock+0x3b/0x4a - [] __local_lock+0x25/0x68 - [] local_bh_disable+0x36/0x3b - [] _raw_write_lock_bh+0x16/0x4f - [] tcp_close+0x159/0x355 - [] inet_release+0xc3/0xcd - [] sock_release+0x1f/0x74 - [] sock_close+0x27/0x2b - [] fput+0x11d/0x1e3 - [] filp_close+0x70/0x7b - [] sys_close+0xf8/0x13d - [] system_call_fastpath+0x16/0x1b - -other info that might help us debug this: - - Possible unsafe locking scenario: - - CPU0 CPU1 - ---- ---- - lock(sk_lock-AF_INET); - lock(local_softirq_lock); - lock(sk_lock-AF_INET); - lock(local_softirq_lock); - - *** DEADLOCK *** - -1 lock held by ip/1104: - #0: (sk_lock-AF_INET){+.+...}, at: [] lock_sock+0x10/0x12 - -stack backtrace: -Pid: 1104, comm: ip Not tainted 3.0.0-rc3+ #26 -Call Trace: - [] print_circular_bug+0x1f8/0x209 - [] __lock_acquire+0xacc/0xdc8 - [] ? __local_lock+0x25/0x68 - [] lock_acquire+0x103/0x12e - [] ? __local_lock+0x25/0x68 - [] ? get_parent_ip+0x11/0x41 - [] _raw_spin_lock+0x3b/0x4a - [] ? __local_lock+0x25/0x68 - [] ? get_parent_ip+0x28/0x41 - [] __local_lock+0x25/0x68 - [] local_bh_disable+0x36/0x3b - [] ? lock_sock+0x10/0x12 - [] _raw_write_lock_bh+0x16/0x4f - [] tcp_close+0x159/0x355 - [] inet_release+0xc3/0xcd - [] sock_release+0x1f/0x74 - [] sock_close+0x27/0x2b - [] fput+0x11d/0x1e3 - [] filp_close+0x70/0x7b - [] sys_close+0xf8/0x13d - [] system_call_fastpath+0x16/0x1b - - -Signed-off-by: Thomas Gleixner ---- - net/core/sock.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/net/core/sock.c -+++ b/net/core/sock.c -@@ -2435,12 +2435,11 @@ void lock_sock_nested(struct sock *sk, i - if (sk->sk_lock.owned) - __lock_sock(sk); - sk->sk_lock.owned = 1; -- spin_unlock(&sk->sk_lock.slock); -+ spin_unlock_bh(&sk->sk_lock.slock); - /* - * The sk_lock has mutex_lock() semantics here: - */ - mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); -- local_bh_enable(); - } - EXPORT_SYMBOL(lock_sock_nested); - diff --git a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch deleted file mode 100644 index 24dd4358a..000000000 --- a/debian/patches/features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ /dev/null @@ -1,79 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 20 Jan 2016 15:39:05 +0100 -Subject: net: provide a way to delegate processing a softirq to - ksoftirqd -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -If the NET_RX uses up all of his budget it moves the following NAPI -invocations into the `ksoftirqd`. On -RT it does not do so. Instead it -rises the NET_RX softirq in its current context again. - -In order to get closer to mainline's behaviour this patch provides -__raise_softirq_irqoff_ksoft() which raises the softirq in the ksoftird. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/interrupt.h | 8 ++++++++ - kernel/softirq.c | 21 +++++++++++++++++++++ - net/core/dev.c | 2 +- - 3 files changed, 30 insertions(+), 1 deletion(-) - ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -465,6 +465,14 @@ extern void thread_do_softirq(void); - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); - extern void softirq_init(void); - extern void __raise_softirq_irqoff(unsigned int nr); -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void __raise_softirq_irqoff_ksoft(unsigned int nr); -+#else -+static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) -+{ -+ __raise_softirq_irqoff(nr); -+} -+#endif - - extern void raise_softirq_irqoff(unsigned int nr); - extern void raise_softirq(unsigned int nr); ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -673,6 +673,27 @@ void __raise_softirq_irqoff(unsigned int - } - - /* -+ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd -+ */ -+void __raise_softirq_irqoff_ksoft(unsigned int nr) -+{ -+ unsigned int mask; -+ -+ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || -+ !__this_cpu_read(ktimer_softirqd))) -+ return; -+ mask = 1UL << nr; -+ -+ trace_softirq_raise(nr); -+ or_softirq_pending(mask); -+ if (mask & TIMER_SOFTIRQS) -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; -+ else -+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; -+ wakeup_proper_softirq(nr); -+} -+ -+/* - * This function must run with irqs disabled! - */ - void raise_softirq_irqoff(unsigned int nr) ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -4924,7 +4924,7 @@ static void net_rx_action(struct softirq - list_splice_tail(&repoll, &list); - list_splice(&list, &sd->poll_list); - if (!list_empty(&sd->poll_list)) -- __raise_softirq_irqoff(NET_RX_SOFTIRQ); -+ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); - - net_rps_action_and_irq_enable(sd); - } diff --git a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch deleted file mode 100644 index 289203405..000000000 --- a/debian/patches/features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch +++ /dev/null @@ -1,58 +0,0 @@ -From: Marc Kleine-Budde -Date: Wed, 5 Mar 2014 00:49:47 +0100 -Subject: net: sched: Use msleep() instead of yield() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50 -(by default). If a high priority userspace process tries to shut down a busy -network interface it might spin in a yield loop waiting for the device to -become idle. With the interrupt thread having a lower priority than the -looping process it might never be scheduled and so result in a deadlock on UP -systems. - -With Magic SysRq the following backtrace can be produced: - -> test_app R running 0 174 168 0x00000000 -> [] (__schedule+0x220/0x3fc) from [] (preempt_schedule_irq+0x48/0x80) -> [] (preempt_schedule_irq+0x48/0x80) from [] (svc_preempt+0x8/0x20) -> [] (svc_preempt+0x8/0x20) from [] (local_bh_enable+0x18/0x88) -> [] (local_bh_enable+0x18/0x88) from [] (dev_deactivate_many+0x220/0x264) -> [] (dev_deactivate_many+0x220/0x264) from [] (__dev_close_many+0x64/0xd4) -> [] (__dev_close_many+0x64/0xd4) from [] (__dev_close+0x28/0x3c) -> [] (__dev_close+0x28/0x3c) from [] (__dev_change_flags+0x88/0x130) -> [] (__dev_change_flags+0x88/0x130) from [] (dev_change_flags+0x10/0x48) -> [] (dev_change_flags+0x10/0x48) from [] (do_setlink+0x370/0x7ec) -> [] (do_setlink+0x370/0x7ec) from [] (rtnl_newlink+0x2b4/0x450) -> [] (rtnl_newlink+0x2b4/0x450) from [] (rtnetlink_rcv_msg+0x158/0x1f4) -> [] (rtnetlink_rcv_msg+0x158/0x1f4) from [] (netlink_rcv_skb+0xac/0xc0) -> [] (netlink_rcv_skb+0xac/0xc0) from [] (rtnetlink_rcv+0x18/0x24) -> [] (rtnetlink_rcv+0x18/0x24) from [] (netlink_unicast+0x13c/0x198) -> [] (netlink_unicast+0x13c/0x198) from [] (netlink_sendmsg+0x264/0x2e0) -> [] (netlink_sendmsg+0x264/0x2e0) from [] (sock_sendmsg+0x78/0x98) -> [] (sock_sendmsg+0x78/0x98) from [] (___sys_sendmsg.part.25+0x268/0x278) -> [] (___sys_sendmsg.part.25+0x268/0x278) from [] (__sys_sendmsg+0x48/0x78) -> [] (__sys_sendmsg+0x48/0x78) from [] (ret_fast_syscall+0x0/0x2c) - -This patch works around the problem by replacing yield() by msleep(1), giving -the interrupt thread time to finish, similar to other changes contained in the -rt patch set. Using wait_for_completion() instead would probably be a better -solution. - - -Signed-off-by: Marc Kleine-Budde -Signed-off-by: Sebastian Andrzej Siewior ---- - net/sched/sch_generic.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -890,7 +890,7 @@ void dev_deactivate_many(struct list_hea - /* Wait for outstanding qdisc_run calls. */ - list_for_each_entry(dev, head, close_list) - while (some_qdisc_is_busy(dev)) -- yield(); -+ msleep(1); - } - - void dev_deactivate(struct net_device *dev) diff --git a/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch b/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch deleted file mode 100644 index f7587f2d5..000000000 --- a/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch +++ /dev/null @@ -1,93 +0,0 @@ -Subject: net: Avoid livelock in net_tx_action() on RT -From: Steven Rostedt -Date: Thu, 06 Oct 2011 10:48:39 -0400 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -qdisc_lock is taken w/o disabling interrupts or bottom halfs. So code -holding a qdisc_lock() can be interrupted and softirqs can run on the -return of interrupt in !RT. - -The spin_trylock() in net_tx_action() makes sure, that the softirq -does not deadlock. When the lock can't be acquired q is requeued and -the NET_TX softirq is raised. That causes the softirq to run over and -over. - -That works in mainline as do_softirq() has a retry loop limit and -leaves the softirq processing in the interrupt return path and -schedules ksoftirqd. The task which holds qdisc_lock cannot be -preempted, so the lock is released and either ksoftirqd or the next -softirq in the return from interrupt path can proceed. Though it's a -bit strange to actually run MAX_SOFTIRQ_RESTART (10) loops before it -decides to bail out even if it's clear in the first iteration :) - -On RT all softirq processing is done in a FIFO thread and we don't -have a loop limit, so ksoftirqd preempts the lock holder forever and -unqueues and requeues until the reset button is hit. - -Due to the forced threading of ksoftirqd on RT we actually cannot -deadlock on qdisc_lock because it's a "sleeping lock". So it's safe to -replace the spin_trylock() with a spin_lock(). When contended, -ksoftirqd is scheduled out and the lock holder can proceed. - -[ tglx: Massaged changelog and code comments ] - -Solved-by: Thomas Gleixner -Signed-off-by: Steven Rostedt -Tested-by: Carsten Emde -Cc: Clark Williams -Cc: John Kacur -Cc: Luis Claudio R. Goncalves -Signed-off-by: Thomas Gleixner - ---- - net/core/dev.c | 32 +++++++++++++++++++++++++++++++- - 1 file changed, 31 insertions(+), 1 deletion(-) - ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -3638,6 +3638,36 @@ int netif_rx_ni(struct sk_buff *skb) - } - EXPORT_SYMBOL(netif_rx_ni); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * RT runs ksoftirqd as a real time thread and the root_lock is a -+ * "sleeping spinlock". If the trylock fails then we can go into an -+ * infinite loop when ksoftirqd preempted the task which actually -+ * holds the lock, because we requeue q and raise NET_TX softirq -+ * causing ksoftirqd to loop forever. -+ * -+ * It's safe to use spin_lock on RT here as softirqs run in thread -+ * context and cannot deadlock against the thread which is holding -+ * root_lock. -+ * -+ * On !RT the trylock might fail, but there we bail out from the -+ * softirq loop after 10 attempts which we can't do on RT. And the -+ * task holding root_lock cannot be preempted, so the only downside of -+ * that trylock is that we need 10 loops to decide that we should have -+ * given up in the first one :) -+ */ -+static inline int take_root_lock(spinlock_t *lock) -+{ -+ spin_lock(lock); -+ return 1; -+} -+#else -+static inline int take_root_lock(spinlock_t *lock) -+{ -+ return spin_trylock(lock); -+} -+#endif -+ - static void net_tx_action(struct softirq_action *h) - { - struct softnet_data *sd = this_cpu_ptr(&softnet_data); -@@ -3679,7 +3709,7 @@ static void net_tx_action(struct softirq - head = head->next_sched; - - root_lock = qdisc_lock(q); -- if (spin_trylock(root_lock)) { -+ if (take_root_lock(root_lock)) { - smp_mb__before_atomic(); - clear_bit(__QDISC_STATE_SCHED, - &q->state); diff --git a/debian/patches/features/all/rt/net-use-cpu-chill.patch b/debian/patches/features/all/rt/net-use-cpu-chill.patch deleted file mode 100644 index ca1b0bd02..000000000 --- a/debian/patches/features/all/rt/net-use-cpu-chill.patch +++ /dev/null @@ -1,63 +0,0 @@ -Subject: net: Use cpu_chill() instead of cpu_relax() -From: Thomas Gleixner -Date: Wed, 07 Mar 2012 21:10:04 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Retry loops on RT might loop forever when the modifying side was -preempted. Use cpu_chill() instead of cpu_relax() to let the system -make progress. - -Signed-off-by: Thomas Gleixner - ---- - net/packet/af_packet.c | 5 +++-- - net/rds/ib_rdma.c | 3 ++- - 2 files changed, 5 insertions(+), 3 deletions(-) - ---- a/net/packet/af_packet.c -+++ b/net/packet/af_packet.c -@@ -63,6 +63,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -695,7 +696,7 @@ static void prb_retire_rx_blk_timer_expi - if (BLOCK_NUM_PKTS(pbd)) { - while (atomic_read(&pkc->blk_fill_in_prog)) { - /* Waiting for skb_copy_bits to finish... */ -- cpu_relax(); -+ cpu_chill(); - } - } - -@@ -957,7 +958,7 @@ static void prb_retire_current_block(str - if (!(status & TP_STATUS_BLK_TMO)) { - while (atomic_read(&pkc->blk_fill_in_prog)) { - /* Waiting for skb_copy_bits to finish... */ -- cpu_relax(); -+ cpu_chill(); - } - } - prb_close_block(pkc, pbd, po, status); ---- a/net/rds/ib_rdma.c -+++ b/net/rds/ib_rdma.c -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - - #include "rds.h" - #include "ib.h" -@@ -313,7 +314,7 @@ static inline void wait_clean_list_grace - for_each_online_cpu(cpu) { - flag = &per_cpu(clean_list_grace, cpu); - while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) -- cpu_relax(); -+ cpu_chill(); - } - } - diff --git a/debian/patches/features/all/rt/net-wireless-warn-nort.patch b/debian/patches/features/all/rt/net-wireless-warn-nort.patch deleted file mode 100644 index 76fdcf86a..000000000 --- a/debian/patches/features/all/rt/net-wireless-warn-nort.patch +++ /dev/null @@ -1,24 +0,0 @@ -Subject: net/wireless: Use WARN_ON_NORT() -From: Thomas Gleixner -Date: Thu, 21 Jul 2011 21:05:33 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The softirq counter is meaningless on RT, so the check triggers a -false positive. - -Signed-off-by: Thomas Gleixner ---- - net/mac80211/rx.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/net/mac80211/rx.c -+++ b/net/mac80211/rx.c -@@ -3554,7 +3554,7 @@ void ieee80211_rx_napi(struct ieee80211_ - struct ieee80211_supported_band *sband; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - -- WARN_ON_ONCE(softirq_count() == 0); -+ WARN_ON_ONCE_NONRT(softirq_count() == 0); - - if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) - goto drop; diff --git a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch deleted file mode 100644 index e4fb7cb73..000000000 --- a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch +++ /dev/null @@ -1,143 +0,0 @@ -From: Oleg Nesterov -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: signal/x86: Delay calling signals in atomic -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On x86_64 we must disable preemption before we enable interrupts -for stack faults, int3 and debugging, because the current task is using -a per CPU debug stack defined by the IST. If we schedule out, another task -can come in and use the same stack and cause the stack to be corrupted -and crash the kernel on return. - -When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and -one of these is the spin lock used in signal handling. - -Some of the debug code (int3) causes do_trap() to send a signal. -This function calls a spin lock that has been converted to a mutex -and has the possibility to sleep. If this happens, the above issues with -the corrupted stack is possible. - -Instead of calling the signal right away, for PREEMPT_RT and x86_64, -the signal information is stored on the stacks task_struct and -TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume -code will send the signal when preemption is enabled. - -[ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT_FULL to - ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ] - - -Signed-off-by: Oleg Nesterov -Signed-off-by: Steven Rostedt -Signed-off-by: Thomas Gleixner ---- - - arch/x86/entry/common.c | 7 +++++++ - arch/x86/include/asm/signal.h | 13 +++++++++++++ - include/linux/sched.h | 4 ++++ - kernel/signal.c | 37 +++++++++++++++++++++++++++++++++++-- - 4 files changed, 59 insertions(+), 2 deletions(-) - ---- a/arch/x86/entry/common.c -+++ b/arch/x86/entry/common.c -@@ -239,6 +239,13 @@ static void exit_to_usermode_loop(struct - if (cached_flags & _TIF_NEED_RESCHED) - schedule(); - -+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND -+ if (unlikely(current->forced_info.si_signo)) { -+ struct task_struct *t = current; -+ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); -+ t->forced_info.si_signo = 0; -+ } -+#endif - if (cached_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - ---- a/arch/x86/include/asm/signal.h -+++ b/arch/x86/include/asm/signal.h -@@ -23,6 +23,19 @@ typedef struct { - unsigned long sig[_NSIG_WORDS]; - } sigset_t; - -+/* -+ * Because some traps use the IST stack, we must keep preemption -+ * disabled while calling do_trap(), but do_trap() may call -+ * force_sig_info() which will grab the signal spin_locks for the -+ * task, which in PREEMPT_RT_FULL are mutexes. By defining -+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set -+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the -+ * trap. -+ */ -+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) -+#define ARCH_RT_DELAYS_SIGNAL_SEND -+#endif -+ - #ifndef CONFIG_COMPAT - typedef sigset_t compat_sigset_t; - #endif ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1579,6 +1579,10 @@ struct task_struct { - sigset_t blocked, real_blocked; - sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ - struct sigpending pending; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* TODO: move me into ->restart_block ? */ -+ struct siginfo forced_info; -+#endif - - unsigned long sas_ss_sp; - size_t sas_ss_size; ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -1216,8 +1216,8 @@ int do_send_sig_info(int sig, struct sig - * We don't want to have recursive SIGSEGV's etc, for example, - * that is why we also clear SIGNAL_UNKILLABLE. - */ --int --force_sig_info(int sig, struct siginfo *info, struct task_struct *t) -+static int -+do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) - { - unsigned long int flags; - int ret, blocked, ignored; -@@ -1242,6 +1242,39 @@ force_sig_info(int sig, struct siginfo * - return ret; - } - -+int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) -+{ -+/* -+ * On some archs, PREEMPT_RT has to delay sending a signal from a trap -+ * since it can not enable preemption, and the signal code's spin_locks -+ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will -+ * send the signal on exit of the trap. -+ */ -+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND -+ if (in_atomic()) { -+ if (WARN_ON_ONCE(t != current)) -+ return 0; -+ if (WARN_ON_ONCE(t->forced_info.si_signo)) -+ return 0; -+ -+ if (is_si_special(info)) { -+ WARN_ON_ONCE(info != SEND_SIG_PRIV); -+ t->forced_info.si_signo = sig; -+ t->forced_info.si_errno = 0; -+ t->forced_info.si_code = SI_KERNEL; -+ t->forced_info.si_pid = 0; -+ t->forced_info.si_uid = 0; -+ } else { -+ t->forced_info = *info; -+ } -+ -+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); -+ return 0; -+ } -+#endif -+ return do_force_sig_info(sig, info, t); -+} -+ - /* - * Nuke all other threads in the group. - */ diff --git a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch deleted file mode 100644 index 9d700fabe..000000000 --- a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: Thomas Gleixner -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Disable on -RT. If this is invoked from irq-context we will have problems -to acquire the sleeping lock. - -Signed-off-by: Thomas Gleixner ---- - kernel/panic.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -401,9 +401,11 @@ static u64 oops_id; - - static int init_oops_id(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - if (!oops_id) - get_random_bytes(&oops_id, sizeof(oops_id)); - else -+#endif - oops_id++; - - return 0; diff --git a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch deleted file mode 100644 index 2fcba0e1c..000000000 --- a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch +++ /dev/null @@ -1,109 +0,0 @@ -Subject: rcu: Make ksoftirqd do RCU quiescent states -From: "Paul E. McKenney" -Date: Wed, 5 Oct 2011 11:45:18 -0700 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable -to network-based denial-of-service attacks. This patch therefore -makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq() -is running in ksoftirqd context. A wrapper layer in interposed so that -other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying -function __do_softirq_common() does the actual work. - -The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is -that there might be a local_bh_enable() inside an RCU-preempt read-side -critical section. This local_bh_enable() can invoke __do_softirq() -directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just -calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be -an illegal RCU-preempt quiescent state in the middle of an RCU-preempt -read-side critical section. Therefore, quiescent states can only happen -in cases where __do_softirq() is invoked directly from ksoftirqd. - -Signed-off-by: Paul E. McKenney -Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com -Signed-off-by: Thomas Gleixner - ---- - include/linux/rcupdate.h | 4 ---- - kernel/rcu/tree.c | 9 ++++++++- - kernel/rcu/tree_plugin.h | 8 +++++++- - 3 files changed, 15 insertions(+), 6 deletions(-) - ---- a/include/linux/rcupdate.h -+++ b/include/linux/rcupdate.h -@@ -334,11 +334,7 @@ static inline int rcu_preempt_depth(void - void rcu_init(void); - void rcu_end_inkernel_boot(void); - void rcu_sched_qs(void); --#ifdef CONFIG_PREEMPT_RT_FULL --static inline void rcu_bh_qs(void) { } --#else - void rcu_bh_qs(void); --#endif - void rcu_check_callbacks(int user); - struct notifier_block; - int rcu_cpu_notify(struct notifier_block *self, ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -266,7 +266,14 @@ void rcu_sched_qs(void) - } - } - --#ifndef CONFIG_PREEMPT_RT_FULL -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void rcu_preempt_qs(void); -+ -+void rcu_bh_qs(void) -+{ -+ rcu_preempt_qs(); -+} -+#else - void rcu_bh_qs(void) - { - if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { ---- a/kernel/rcu/tree_plugin.h -+++ b/kernel/rcu/tree_plugin.h -@@ -28,6 +28,7 @@ - #include - #include - #include -+#include - #include "../time/tick-internal.h" - - #ifdef CONFIG_RCU_BOOST -@@ -1346,7 +1347,7 @@ static void rcu_prepare_kthreads(int cpu - - #endif /* #else #ifdef CONFIG_RCU_BOOST */ - --#if !defined(CONFIG_RCU_FAST_NO_HZ) -+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) - - /* - * Check to see if any future RCU-related work will need to be done -@@ -1363,7 +1364,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex - return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) - ? 0 : rcu_cpu_has_callbacks(NULL); - } -+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ - -+#if !defined(CONFIG_RCU_FAST_NO_HZ) - /* - * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up - * after it. -@@ -1459,6 +1462,8 @@ static bool __maybe_unused rcu_try_advan - return cbs_ready; - } - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready - * to invoke. If the CPU has callbacks, try to advance them. Tell the -@@ -1504,6 +1509,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex - *nextevt = basemono + dj * TICK_NSEC; - return 0; - } -+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ - - /* - * Prepare a CPU for idle from an RCU perspective. The first major task diff --git a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch deleted file mode 100644 index fd1fe7e99..000000000 --- a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch +++ /dev/null @@ -1,26 +0,0 @@ -Subject: pci: Use __wake_up_all_locked in pci_unblock_user_cfg_access() -From: Thomas Gleixner -Date: Thu, 01 Dec 2011 00:07:16 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The waitqueue is protected by the pci_lock, so we can just avoid to -lock the waitqueue lock itself. That prevents the -might_sleep()/scheduling while atomic problem on RT - -Signed-off-by: Thomas Gleixner - ---- - drivers/pci/access.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/pci/access.c -+++ b/drivers/pci/access.c -@@ -561,7 +561,7 @@ void pci_cfg_access_unlock(struct pci_de - WARN_ON(!dev->block_cfg_access); - - dev->block_cfg_access = 0; -- wake_up_all(&pci_cfg_wait); -+ wake_up_all_locked(&pci_cfg_wait); - raw_spin_unlock_irqrestore(&pci_lock, flags); - } - EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); diff --git a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch b/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch deleted file mode 100644 index 982f2b4fd..000000000 --- a/debian/patches/features/all/rt/percpu_ida-use-locklocks.patch +++ /dev/null @@ -1,102 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 9 Apr 2014 11:58:17 +0200 -Subject: percpu_ida: Use local locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -the local_irq_save() + spin_lock() does not work that well on -RT - -Signed-off-by: Sebastian Andrzej Siewior ---- - lib/percpu_ida.c | 20 ++++++++++++-------- - 1 file changed, 12 insertions(+), 8 deletions(-) - ---- a/lib/percpu_ida.c -+++ b/lib/percpu_ida.c -@@ -26,6 +26,9 @@ - #include - #include - #include -+#include -+ -+static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock); - - struct percpu_ida_cpu { - /* -@@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida * - unsigned long flags; - int tag; - -- local_irq_save(flags); -+ local_lock_irqsave(irq_off_lock, flags); - tags = this_cpu_ptr(pool->tag_cpu); - - /* Fastpath */ - tag = alloc_local_tag(tags); - if (likely(tag >= 0)) { -- local_irq_restore(flags); -+ local_unlock_irqrestore(irq_off_lock, flags); - return tag; - } - -@@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida * - - if (!tags->nr_free) - alloc_global_tags(pool, tags); -+ - if (!tags->nr_free) - steal_tags(pool, tags); - -@@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida * - } - - spin_unlock(&pool->lock); -- local_irq_restore(flags); -+ local_unlock_irqrestore(irq_off_lock, flags); - - if (tag >= 0 || state == TASK_RUNNING) - break; -@@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida * - - schedule(); - -- local_irq_save(flags); -+ local_lock_irqsave(irq_off_lock, flags); - tags = this_cpu_ptr(pool->tag_cpu); - } - if (state != TASK_RUNNING) -@@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida * - - BUG_ON(tag >= pool->nr_tags); - -- local_irq_save(flags); -+ local_lock_irqsave(irq_off_lock, flags); - tags = this_cpu_ptr(pool->tag_cpu); - - spin_lock(&tags->lock); -@@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida * - spin_unlock(&pool->lock); - } - -- local_irq_restore(flags); -+ local_unlock_irqrestore(irq_off_lock, flags); - } - EXPORT_SYMBOL_GPL(percpu_ida_free); - -@@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct perc - struct percpu_ida_cpu *remote; - unsigned cpu, i, err = 0; - -- local_irq_save(flags); -+ local_lock_irqsave(irq_off_lock, flags); - for_each_possible_cpu(cpu) { - remote = per_cpu_ptr(pool->tag_cpu, cpu); - spin_lock(&remote->lock); -@@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct perc - } - spin_unlock(&pool->lock); - out: -- local_irq_restore(flags); -+ local_unlock_irqrestore(irq_off_lock, flags); - return err; - } - EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); diff --git a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch deleted file mode 100644 index 65ae7a185..000000000 --- a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch +++ /dev/null @@ -1,69 +0,0 @@ -From: Yong Zhang -Date: Wed, 11 Jul 2012 22:05:21 +0000 -Subject: perf: Make swevent hrtimer run in irq instead of softirq -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Otherwise we get a deadlock like below: - -[ 1044.042749] BUG: scheduling while atomic: ksoftirqd/21/141/0x00010003 -[ 1044.042752] INFO: lockdep is turned off. -[ 1044.042754] Modules linked in: -[ 1044.042757] Pid: 141, comm: ksoftirqd/21 Tainted: G W 3.4.0-rc2-rt3-23676-ga723175-dirty #29 -[ 1044.042759] Call Trace: -[ 1044.042761] [] __schedule_bug+0x65/0x80 -[ 1044.042770] [] __schedule+0x83c/0xa70 -[ 1044.042775] [] ? prepare_to_wait+0x32/0xb0 -[ 1044.042779] [] schedule+0x2e/0xa0 -[ 1044.042782] [] hrtimer_wait_for_timer+0x6d/0xb0 -[ 1044.042786] [] ? wake_up_bit+0x40/0x40 -[ 1044.042790] [] hrtimer_cancel+0x20/0x40 -[ 1044.042794] [] perf_swevent_cancel_hrtimer+0x3c/0x50 -[ 1044.042798] [] task_clock_event_stop+0x11/0x40 -[ 1044.042802] [] task_clock_event_del+0xe/0x10 -[ 1044.042805] [] event_sched_out+0x118/0x1d0 -[ 1044.042809] [] group_sched_out+0x29/0x90 -[ 1044.042813] [] __perf_event_disable+0x18e/0x200 -[ 1044.042817] [] remote_function+0x63/0x70 -[ 1044.042821] [] generic_smp_call_function_single_interrupt+0xce/0x120 -[ 1044.042826] [] smp_call_function_single_interrupt+0x27/0x40 -[ 1044.042831] [] call_function_single_interrupt+0x6c/0x80 -[ 1044.042833] [] ? perf_event_overflow+0x20/0x20 -[ 1044.042840] [] ? _raw_spin_unlock_irq+0x30/0x70 -[ 1044.042844] [] ? _raw_spin_unlock_irq+0x36/0x70 -[ 1044.042848] [] run_hrtimer_softirq+0xc2/0x200 -[ 1044.042853] [] ? perf_event_overflow+0x20/0x20 -[ 1044.042857] [] __do_softirq_common+0xf5/0x3a0 -[ 1044.042862] [] __thread_do_softirq+0x15d/0x200 -[ 1044.042865] [] run_ksoftirqd+0xfa/0x210 -[ 1044.042869] [] ? __thread_do_softirq+0x200/0x200 -[ 1044.042873] [] ? __thread_do_softirq+0x200/0x200 -[ 1044.042877] [] kthread+0xb6/0xc0 -[ 1044.042881] [] ? _raw_spin_unlock_irq+0x3b/0x70 -[ 1044.042886] [] kernel_thread_helper+0x4/0x10 -[ 1044.042889] [] ? finish_task_switch+0x8c/0x110 -[ 1044.042894] [] ? _raw_spin_unlock_irq+0x3b/0x70 -[ 1044.042897] [] ? retint_restore_args+0xe/0xe -[ 1044.042900] [] ? kthreadd+0x1e0/0x1e0 -[ 1044.042902] [] ? gs_change+0xb/0xb - -Signed-off-by: Yong Zhang -Cc: Peter Zijlstra -Cc: Steven Rostedt -Link: http://lkml.kernel.org/r/1341476476-5666-1-git-send-email-yong.zhang0@gmail.com -Signed-off-by: Thomas Gleixner -Signed-off-by: Steven Rostedt - ---- - kernel/events/core.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/kernel/events/core.c -+++ b/kernel/events/core.c -@@ -7228,6 +7228,7 @@ static void perf_swevent_init_hrtimer(st - - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swevent_hrtimer; -+ hwc->hrtimer.irqsafe = 1; - - /* - * Since hrtimers have a fixed rate, we can do a static freq->period diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch deleted file mode 100644 index 4f68e88d7..000000000 --- a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch +++ /dev/null @@ -1,167 +0,0 @@ -Subject: rcu: Frob softirq test -From: Peter Zijlstra -Date: Sat Aug 13 00:23:17 CEST 2011 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -With RT_FULL we get the below wreckage: - -[ 126.060484] ======================================================= -[ 126.060486] [ INFO: possible circular locking dependency detected ] -[ 126.060489] 3.0.1-rt10+ #30 -[ 126.060490] ------------------------------------------------------- -[ 126.060492] irq/24-eth0/1235 is trying to acquire lock: -[ 126.060495] (&(lock)->wait_lock#2){+.+...}, at: [] rt_mutex_slowunlock+0x16/0x55 -[ 126.060503] -[ 126.060504] but task is already holding lock: -[ 126.060506] (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 -[ 126.060511] -[ 126.060511] which lock already depends on the new lock. -[ 126.060513] -[ 126.060514] -[ 126.060514] the existing dependency chain (in reverse order) is: -[ 126.060516] -[ 126.060516] -> #1 (&p->pi_lock){-...-.}: -[ 126.060519] [] lock_acquire+0x145/0x18a -[ 126.060524] [] _raw_spin_lock_irqsave+0x4b/0x85 -[ 126.060527] [] task_blocks_on_rt_mutex+0x36/0x20f -[ 126.060531] [] rt_mutex_slowlock+0xd1/0x15a -[ 126.060534] [] rt_mutex_lock+0x2d/0x2f -[ 126.060537] [] rcu_boost+0xad/0xde -[ 126.060541] [] rcu_boost_kthread+0x7d/0x9b -[ 126.060544] [] kthread+0x99/0xa1 -[ 126.060547] [] kernel_thread_helper+0x4/0x10 -[ 126.060551] -[ 126.060552] -> #0 (&(lock)->wait_lock#2){+.+...}: -[ 126.060555] [] __lock_acquire+0x1157/0x1816 -[ 126.060558] [] lock_acquire+0x145/0x18a -[ 126.060561] [] _raw_spin_lock+0x40/0x73 -[ 126.060564] [] rt_mutex_slowunlock+0x16/0x55 -[ 126.060566] [] rt_mutex_unlock+0x27/0x29 -[ 126.060569] [] rcu_read_unlock_special+0x17e/0x1c4 -[ 126.060573] [] __rcu_read_unlock+0x48/0x89 -[ 126.060576] [] select_task_rq_rt+0xc7/0xd5 -[ 126.060580] [] try_to_wake_up+0x175/0x429 -[ 126.060583] [] wake_up_process+0x15/0x17 -[ 126.060585] [] wakeup_softirqd+0x24/0x26 -[ 126.060590] [] irq_exit+0x49/0x55 -[ 126.060593] [] smp_apic_timer_interrupt+0x8a/0x98 -[ 126.060597] [] apic_timer_interrupt+0x13/0x20 -[ 126.060600] [] irq_forced_thread_fn+0x1b/0x44 -[ 126.060603] [] irq_thread+0xde/0x1af -[ 126.060606] [] kthread+0x99/0xa1 -[ 126.060608] [] kernel_thread_helper+0x4/0x10 -[ 126.060611] -[ 126.060612] other info that might help us debug this: -[ 126.060614] -[ 126.060615] Possible unsafe locking scenario: -[ 126.060616] -[ 126.060617] CPU0 CPU1 -[ 126.060619] ---- ---- -[ 126.060620] lock(&p->pi_lock); -[ 126.060623] lock(&(lock)->wait_lock); -[ 126.060625] lock(&p->pi_lock); -[ 126.060627] lock(&(lock)->wait_lock); -[ 126.060629] -[ 126.060629] *** DEADLOCK *** -[ 126.060630] -[ 126.060632] 1 lock held by irq/24-eth0/1235: -[ 126.060633] #0: (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 -[ 126.060638] -[ 126.060638] stack backtrace: -[ 126.060641] Pid: 1235, comm: irq/24-eth0 Not tainted 3.0.1-rt10+ #30 -[ 126.060643] Call Trace: -[ 126.060644] [] print_circular_bug+0x289/0x29a -[ 126.060651] [] __lock_acquire+0x1157/0x1816 -[ 126.060655] [] ? trace_hardirqs_off_caller+0x1f/0x99 -[ 126.060658] [] ? rt_mutex_slowunlock+0x16/0x55 -[ 126.060661] [] lock_acquire+0x145/0x18a -[ 126.060664] [] ? rt_mutex_slowunlock+0x16/0x55 -[ 126.060668] [] _raw_spin_lock+0x40/0x73 -[ 126.060671] [] ? rt_mutex_slowunlock+0x16/0x55 -[ 126.060674] [] ? rcu_report_qs_rsp+0x87/0x8c -[ 126.060677] [] rt_mutex_slowunlock+0x16/0x55 -[ 126.060680] [] ? rcu_read_unlock_special+0x9b/0x1c4 -[ 126.060683] [] rt_mutex_unlock+0x27/0x29 -[ 126.060687] [] rcu_read_unlock_special+0x17e/0x1c4 -[ 126.060690] [] __rcu_read_unlock+0x48/0x89 -[ 126.060693] [] select_task_rq_rt+0xc7/0xd5 -[ 126.060696] [] ? select_task_rq_rt+0x27/0xd5 -[ 126.060701] [] ? clockevents_program_event+0x8e/0x90 -[ 126.060704] [] try_to_wake_up+0x175/0x429 -[ 126.060708] [] ? tick_program_event+0x1f/0x21 -[ 126.060711] [] wake_up_process+0x15/0x17 -[ 126.060715] [] wakeup_softirqd+0x24/0x26 -[ 126.060718] [] irq_exit+0x49/0x55 -[ 126.060721] [] smp_apic_timer_interrupt+0x8a/0x98 -[ 126.060724] [] apic_timer_interrupt+0x13/0x20 -[ 126.060726] [] ? migrate_disable+0x75/0x12d -[ 126.060733] [] ? local_bh_disable+0xe/0x1f -[ 126.060736] [] ? local_bh_disable+0x1d/0x1f -[ 126.060739] [] irq_forced_thread_fn+0x1b/0x44 -[ 126.060742] [] ? _raw_spin_unlock_irq+0x3b/0x59 -[ 126.060745] [] irq_thread+0xde/0x1af -[ 126.060748] [] ? irq_thread_fn+0x3a/0x3a -[ 126.060751] [] ? irq_finalize_oneshot+0xd1/0xd1 -[ 126.060754] [] ? irq_finalize_oneshot+0xd1/0xd1 -[ 126.060757] [] kthread+0x99/0xa1 -[ 126.060761] [] kernel_thread_helper+0x4/0x10 -[ 126.060764] [] ? finish_task_switch+0x87/0x10a -[ 126.060768] [] ? retint_restore_args+0xe/0xe -[ 126.060771] [] ? __init_kthread_worker+0x8c/0x8c -[ 126.060774] [] ? gs_change+0xb/0xb - -Because irq_exit() does: - -void irq_exit(void) -{ - account_system_vtime(current); - trace_hardirq_exit(); - sub_preempt_count(IRQ_EXIT_OFFSET); - if (!in_interrupt() && local_softirq_pending()) - invoke_softirq(); - - ... -} - -Which triggers a wakeup, which uses RCU, now if the interrupted task has -t->rcu_read_unlock_special set, the rcu usage from the wakeup will end -up in rcu_read_unlock_special(). rcu_read_unlock_special() will test -for in_irq(), which will fail as we just decremented preempt_count -with IRQ_EXIT_OFFSET, and in_sering_softirq(), which for -PREEMPT_RT_FULL reads: - -int in_serving_softirq(void) -{ - int res; - - preempt_disable(); - res = __get_cpu_var(local_softirq_runner) == current; - preempt_enable(); - return res; -} - -Which will thus also fail, resulting in the above wreckage. - -The 'somewhat' ugly solution is to open-code the preempt_count() test -in rcu_read_unlock_special(). - -Also, we're not at all sure how ->rcu_read_unlock_special gets set -here... so this is very likely a bandaid and more thought is required. - -Cc: Paul E. McKenney -Signed-off-by: Peter Zijlstra ---- - kernel/rcu/tree_plugin.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/rcu/tree_plugin.h -+++ b/kernel/rcu/tree_plugin.h -@@ -432,7 +432,7 @@ void rcu_read_unlock_special(struct task - } - - /* Hardware IRQ handlers cannot block, complain if they get here. */ -- if (in_irq() || in_serving_softirq()) { -+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { - lockdep_rcu_suspicious(__FILE__, __LINE__, - "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); - pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", diff --git a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch deleted file mode 100644 index 72f8a2c65..000000000 --- a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch +++ /dev/null @@ -1,183 +0,0 @@ -Subject: crypto: Convert crypto notifier chain to SRCU -From: Peter Zijlstra -Date: Fri, 05 Oct 2012 09:03:24 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The crypto notifier deadlocks on RT. Though this can be a real deadlock -on mainline as well due to fifo fair rwsems. - -The involved parties here are: - -[ 82.172678] swapper/0 S 0000000000000001 0 1 0 0x00000000 -[ 82.172682] ffff88042f18fcf0 0000000000000046 ffff88042f18fc80 ffffffff81491238 -[ 82.172685] 0000000000011cc0 0000000000011cc0 ffff88042f18c040 ffff88042f18ffd8 -[ 82.172688] 0000000000011cc0 0000000000011cc0 ffff88042f18ffd8 0000000000011cc0 -[ 82.172689] Call Trace: -[ 82.172697] [] ? _raw_spin_unlock_irqrestore+0x6c/0x7a -[ 82.172701] [] schedule+0x64/0x66 -[ 82.172704] [] schedule_timeout+0x27/0xd0 -[ 82.172708] [] ? unpin_current_cpu+0x1a/0x6c -[ 82.172713] [] ? migrate_enable+0x12f/0x141 -[ 82.172716] [] wait_for_common+0xbb/0x11f -[ 82.172719] [] ? try_to_wake_up+0x182/0x182 -[ 82.172722] [] wait_for_completion_interruptible+0x1d/0x2e -[ 82.172726] [] crypto_wait_for_test+0x49/0x6b -[ 82.172728] [] crypto_register_alg+0x53/0x5a -[ 82.172730] [] crypto_register_algs+0x33/0x72 -[ 82.172734] [] ? aes_init+0x12/0x12 -[ 82.172737] [] aesni_init+0x64/0x66 -[ 82.172741] [] do_one_initcall+0x7f/0x13b -[ 82.172744] [] kernel_init+0x199/0x22c -[ 82.172747] [] ? loglevel+0x31/0x31 -[ 82.172752] [] kernel_thread_helper+0x4/0x10 -[ 82.172755] [] ? retint_restore_args+0x13/0x13 -[ 82.172759] [] ? start_kernel+0x3ca/0x3ca -[ 82.172761] [] ? gs_change+0x13/0x13 - -[ 82.174186] cryptomgr_test S 0000000000000001 0 41 2 0x00000000 -[ 82.174189] ffff88042c971980 0000000000000046 ffffffff81d74830 0000000000000292 -[ 82.174192] 0000000000011cc0 0000000000011cc0 ffff88042c96eb80 ffff88042c971fd8 -[ 82.174195] 0000000000011cc0 0000000000011cc0 ffff88042c971fd8 0000000000011cc0 -[ 82.174195] Call Trace: -[ 82.174198] [] schedule+0x64/0x66 -[ 82.174201] [] schedule_timeout+0x27/0xd0 -[ 82.174204] [] ? unpin_current_cpu+0x1a/0x6c -[ 82.174206] [] ? migrate_enable+0x12f/0x141 -[ 82.174209] [] wait_for_common+0xbb/0x11f -[ 82.174212] [] ? try_to_wake_up+0x182/0x182 -[ 82.174215] [] wait_for_completion_interruptible+0x1d/0x2e -[ 82.174218] [] cryptomgr_notify+0x280/0x385 -[ 82.174221] [] notifier_call_chain+0x6b/0x98 -[ 82.174224] [] ? rt_down_read+0x10/0x12 -[ 82.174227] [] __blocking_notifier_call_chain+0x70/0x8d -[ 82.174230] [] blocking_notifier_call_chain+0x14/0x16 -[ 82.174234] [] crypto_probing_notify+0x24/0x50 -[ 82.174236] [] crypto_alg_mod_lookup+0x3e/0x74 -[ 82.174238] [] crypto_alloc_base+0x36/0x8f -[ 82.174241] [] cryptd_alloc_ablkcipher+0x6e/0xb5 -[ 82.174243] [] ? kzalloc.clone.5+0xe/0x10 -[ 82.174246] [] ablk_init_common+0x1d/0x38 -[ 82.174249] [] ablk_ecb_init+0x15/0x17 -[ 82.174251] [] __crypto_alloc_tfm+0xc7/0x114 -[ 82.174254] [] ? crypto_lookup_skcipher+0x1f/0xe4 -[ 82.174256] [] crypto_alloc_ablkcipher+0x60/0xa5 -[ 82.174258] [] alg_test_skcipher+0x24/0x9b -[ 82.174261] [] ? finish_task_switch+0x3f/0xfa -[ 82.174263] [] alg_test+0x16f/0x1d7 -[ 82.174267] [] ? cryptomgr_probe+0xac/0xac -[ 82.174269] [] cryptomgr_test+0x2c/0x47 -[ 82.174272] [] kthread+0x7e/0x86 -[ 82.174275] [] ? finish_task_switch+0xaf/0xfa -[ 82.174278] [] kernel_thread_helper+0x4/0x10 -[ 82.174281] [] ? retint_restore_args+0x13/0x13 -[ 82.174284] [] ? __init_kthread_worker+0x8c/0x8c -[ 82.174287] [] ? gs_change+0x13/0x13 - -[ 82.174329] cryptomgr_probe D 0000000000000002 0 47 2 0x00000000 -[ 82.174332] ffff88042c991b70 0000000000000046 ffff88042c991bb0 0000000000000006 -[ 82.174335] 0000000000011cc0 0000000000011cc0 ffff88042c98ed00 ffff88042c991fd8 -[ 82.174338] 0000000000011cc0 0000000000011cc0 ffff88042c991fd8 0000000000011cc0 -[ 82.174338] Call Trace: -[ 82.174342] [] schedule+0x64/0x66 -[ 82.174344] [] __rt_mutex_slowlock+0x85/0xbe -[ 82.174347] [] rt_mutex_slowlock+0xec/0x159 -[ 82.174351] [] rt_mutex_fastlock.clone.8+0x29/0x2f -[ 82.174353] [] rt_mutex_lock+0x33/0x37 -[ 82.174356] [] __rt_down_read+0x50/0x5a -[ 82.174358] [] ? rt_down_read+0x10/0x12 -[ 82.174360] [] rt_down_read+0x10/0x12 -[ 82.174363] [] __blocking_notifier_call_chain+0x58/0x8d -[ 82.174366] [] blocking_notifier_call_chain+0x14/0x16 -[ 82.174369] [] crypto_probing_notify+0x24/0x50 -[ 82.174372] [] crypto_wait_for_test+0x22/0x6b -[ 82.174374] [] crypto_register_instance+0xb4/0xc0 -[ 82.174377] [] cryptd_create+0x378/0x3b6 -[ 82.174379] [] ? __crypto_lookup_template+0x5b/0x63 -[ 82.174382] [] cryptomgr_probe+0x45/0xac -[ 82.174385] [] ? crypto_alloc_pcomp+0x1b/0x1b -[ 82.174388] [] kthread+0x7e/0x86 -[ 82.174391] [] ? finish_task_switch+0xaf/0xfa -[ 82.174394] [] kernel_thread_helper+0x4/0x10 -[ 82.174398] [] ? retint_restore_args+0x13/0x13 -[ 82.174401] [] ? __init_kthread_worker+0x8c/0x8c -[ 82.174403] [] ? gs_change+0x13/0x13 - -cryptomgr_test spawns the cryptomgr_probe thread from the notifier -call. The probe thread fires the same notifier as the test thread and -deadlocks on the rwsem on RT. - -Now this is a potential deadlock in mainline as well, because we have -fifo fair rwsems. If another thread blocks with a down_write() on the -notifier chain before the probe thread issues the down_read() it will -block the probe thread and the whole party is dead locked. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Thomas Gleixner ---- - crypto/algapi.c | 4 ++-- - crypto/api.c | 6 +++--- - crypto/internal.h | 4 ++-- - 3 files changed, 7 insertions(+), 7 deletions(-) - ---- a/crypto/algapi.c -+++ b/crypto/algapi.c -@@ -719,13 +719,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); - - int crypto_register_notifier(struct notifier_block *nb) - { -- return blocking_notifier_chain_register(&crypto_chain, nb); -+ return srcu_notifier_chain_register(&crypto_chain, nb); - } - EXPORT_SYMBOL_GPL(crypto_register_notifier); - - int crypto_unregister_notifier(struct notifier_block *nb) - { -- return blocking_notifier_chain_unregister(&crypto_chain, nb); -+ return srcu_notifier_chain_unregister(&crypto_chain, nb); - } - EXPORT_SYMBOL_GPL(crypto_unregister_notifier); - ---- a/crypto/api.c -+++ b/crypto/api.c -@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list); - DECLARE_RWSEM(crypto_alg_sem); - EXPORT_SYMBOL_GPL(crypto_alg_sem); - --BLOCKING_NOTIFIER_HEAD(crypto_chain); -+SRCU_NOTIFIER_HEAD(crypto_chain); - EXPORT_SYMBOL_GPL(crypto_chain); - - static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); -@@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long - { - int ok; - -- ok = blocking_notifier_call_chain(&crypto_chain, val, v); -+ ok = srcu_notifier_call_chain(&crypto_chain, val, v); - if (ok == NOTIFY_DONE) { - request_module("cryptomgr"); -- ok = blocking_notifier_call_chain(&crypto_chain, val, v); -+ ok = srcu_notifier_call_chain(&crypto_chain, val, v); - } - - return ok; ---- a/crypto/internal.h -+++ b/crypto/internal.h -@@ -47,7 +47,7 @@ struct crypto_larval { - - extern struct list_head crypto_alg_list; - extern struct rw_semaphore crypto_alg_sem; --extern struct blocking_notifier_head crypto_chain; -+extern struct srcu_notifier_head crypto_chain; - - #ifdef CONFIG_PROC_FS - void __init crypto_init_proc(void); -@@ -143,7 +143,7 @@ static inline int crypto_is_moribund(str - - static inline void crypto_notify(unsigned long val, void *v) - { -- blocking_notifier_call_chain(&crypto_chain, val, v); -+ srcu_notifier_call_chain(&crypto_chain, val, v); - } - - #endif /* _CRYPTO_INTERNAL_H */ diff --git a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch b/debian/patches/features/all/rt/pid.h-include-atomic.h.patch deleted file mode 100644 index e29b4c4bc..000000000 --- a/debian/patches/features/all/rt/pid.h-include-atomic.h.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Grygorii Strashko -Date: Tue, 21 Jul 2015 19:43:56 +0300 -Subject: wait.h: include atomic.h -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -This patch fixes build error: - CC kernel/pid_namespace.o -In file included from kernel/pid_namespace.c:11:0: -include/linux/pid.h: In function 'get_pid': -include/linux/pid.h:78:3: error: implicit declaration of function 'atomic_inc' [-Werror=implicit-function-declaration] - atomic_inc(&pid->count); - ^ -which happens when - CONFIG_PROVE_LOCKING=n - CONFIG_DEBUG_SPINLOCK=n - CONFIG_DEBUG_MUTEXES=n - CONFIG_DEBUG_LOCK_ALLOC=n - CONFIG_PID_NS=y - -Vanilla gets this via spinlock.h. - -Signed-off-by: Grygorii Strashko -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/pid.h | 1 + - 1 file changed, 1 insertion(+) - ---- a/include/linux/pid.h -+++ b/include/linux/pid.h -@@ -2,6 +2,7 @@ - #define _LINUX_PID_H - - #include -+#include - - enum pid_type - { diff --git a/debian/patches/features/all/rt/ping-sysrq.patch b/debian/patches/features/all/rt/ping-sysrq.patch deleted file mode 100644 index 583769219..000000000 --- a/debian/patches/features/all/rt/ping-sysrq.patch +++ /dev/null @@ -1,122 +0,0 @@ -Subject: net: sysrq via icmp -From: Carsten Emde -Date: Tue, 19 Jul 2011 13:51:17 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -There are (probably rare) situations when a system crashed and the system -console becomes unresponsive but the network icmp layer still is alive. -Wouldn't it be wonderful, if we then could submit a sysreq command via ping? - -This patch provides this facility. Please consult the updated documentation -Documentation/sysrq.txt for details. - -Signed-off-by: Carsten Emde - ---- - Documentation/sysrq.txt | 11 +++++++++-- - include/net/netns/ipv4.h | 1 + - net/ipv4/icmp.c | 30 ++++++++++++++++++++++++++++++ - net/ipv4/sysctl_net_ipv4.c | 7 +++++++ - 4 files changed, 47 insertions(+), 2 deletions(-) - ---- a/Documentation/sysrq.txt -+++ b/Documentation/sysrq.txt -@@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen ( - On other - If you know of the key combos for other architectures, please - let me know so I can add them to this section. - --On all - write a character to /proc/sysrq-trigger. e.g.: -- -+On all - write a character to /proc/sysrq-trigger, e.g.: - echo t > /proc/sysrq-trigger - -+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. -+ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq -+ Send an ICMP echo request with this pattern plus the particular -+ SysRq command key. Example: -+ # ping -c1 -s57 -p0102030468 -+ will trigger the SysRq-H (help) command. -+ -+ - * What are the 'command' keys? - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - 'b' - Will immediately reboot the system without syncing or unmounting ---- a/include/net/netns/ipv4.h -+++ b/include/net/netns/ipv4.h -@@ -70,6 +70,7 @@ struct netns_ipv4 { - - int sysctl_icmp_echo_ignore_all; - int sysctl_icmp_echo_ignore_broadcasts; -+ int sysctl_icmp_echo_sysrq; - int sysctl_icmp_ignore_bogus_error_responses; - int sysctl_icmp_ratelimit; - int sysctl_icmp_ratemask; ---- a/net/ipv4/icmp.c -+++ b/net/ipv4/icmp.c -@@ -69,6 +69,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -891,6 +892,30 @@ static bool icmp_redirect(struct sk_buff - } - - /* -+ * 32bit and 64bit have different timestamp length, so we check for -+ * the cookie at offset 20 and verify it is repeated at offset 50 -+ */ -+#define CO_POS0 20 -+#define CO_POS1 50 -+#define CO_SIZE sizeof(int) -+#define ICMP_SYSRQ_SIZE 57 -+ -+/* -+ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie -+ * pattern and if it matches send the next byte as a trigger to sysrq. -+ */ -+static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) -+{ -+ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); -+ char *p = skb->data; -+ -+ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && -+ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && -+ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) -+ handle_sysrq(p[CO_POS0 + CO_SIZE]); -+} -+ -+/* - * Handle ICMP_ECHO ("ping") requests. - * - * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo -@@ -917,6 +942,11 @@ static bool icmp_echo(struct sk_buff *sk - icmp_param.data_len = skb->len; - icmp_param.head_len = sizeof(struct icmphdr); - icmp_reply(&icmp_param, skb); -+ -+ if (skb->len == ICMP_SYSRQ_SIZE && -+ net->ipv4.sysctl_icmp_echo_sysrq) { -+ icmp_check_sysrq(net, skb); -+ } - } - /* should there be an ICMP stat for ignored echos? */ - return true; ---- a/net/ipv4/sysctl_net_ipv4.c -+++ b/net/ipv4/sysctl_net_ipv4.c -@@ -818,6 +818,13 @@ static struct ctl_table ipv4_net_table[] - .proc_handler = proc_dointvec - }, - { -+ .procname = "icmp_echo_sysrq", -+ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = proc_dointvec -+ }, -+ { - .procname = "icmp_ignore_bogus_error_responses", - .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, - .maxlen = sizeof(int), diff --git a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch deleted file mode 100644 index 4c18351c6..000000000 --- a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch +++ /dev/null @@ -1,34 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 3 Jul 2009 08:29:20 -0500 -Subject: posix-timers: Prevent broadcast signals -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Posix timers should not send broadcast signals and kernel only -signals. Prevent it. - -Signed-off-by: Thomas Gleixner - ---- - kernel/time/posix-timers.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/kernel/time/posix-timers.c -+++ b/kernel/time/posix-timers.c -@@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_ - static struct pid *good_sigevent(sigevent_t * event) - { - struct task_struct *rtn = current->group_leader; -+ int sig = event->sigev_signo; - - if ((event->sigev_notify & SIGEV_THREAD_ID ) && - (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || -@@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigeven - return NULL; - - if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && -- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) -+ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || -+ sig_kernel_coredump(sig))) - return NULL; - - return task_pid(rtn); diff --git a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch deleted file mode 100644 index 9f6fa70e8..000000000 --- a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch +++ /dev/null @@ -1,302 +0,0 @@ -From: John Stultz -Date: Fri, 3 Jul 2009 08:29:58 -0500 -Subject: posix-timers: Thread posix-cpu-timers on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -posix-cpu-timer code takes non -rt safe locks in hard irq -context. Move it to a thread. - -[ 3.0 fixes from Peter Zijlstra ] - -Signed-off-by: John Stultz -Signed-off-by: Thomas Gleixner - ---- - include/linux/init_task.h | 7 + - include/linux/sched.h | 3 - kernel/fork.c | 3 - kernel/time/posix-cpu-timers.c | 193 ++++++++++++++++++++++++++++++++++++++++- - 4 files changed, 202 insertions(+), 4 deletions(-) - ---- a/include/linux/init_task.h -+++ b/include/linux/init_task.h -@@ -148,6 +148,12 @@ extern struct task_group root_task_group - # define INIT_PERF_EVENTS(tsk) - #endif - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define INIT_TIMER_LIST .posix_timer_list = NULL, -+#else -+# define INIT_TIMER_LIST -+#endif -+ - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - # define INIT_VTIME(tsk) \ - .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ -@@ -240,6 +246,7 @@ extern struct task_group root_task_group - .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ - .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ - .timer_slack_ns = 50000, /* 50 usec default slack */ \ -+ INIT_TIMER_LIST \ - .pids = { \ - [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ - [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1544,6 +1544,9 @@ struct task_struct { - - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct task_struct *posix_timer_list; -+#endif - - /* process credentials */ - const struct cred __rcu *real_cred; /* objective and real subjective task ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -1218,6 +1218,9 @@ static void rt_mutex_init_task(struct ta - */ - static void posix_cpu_timers_init(struct task_struct *tsk) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ tsk->posix_timer_list = NULL; -+#endif - tsk->cputime_expires.prof_exp = 0; - tsk->cputime_expires.virt_exp = 0; - tsk->cputime_expires.sched_exp = 0; ---- a/kernel/time/posix-cpu-timers.c -+++ b/kernel/time/posix-cpu-timers.c -@@ -3,6 +3,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -650,7 +651,7 @@ static int posix_cpu_timer_set(struct k_ - /* - * Disarm any old timer after extracting its expiry time. - */ -- WARN_ON_ONCE(!irqs_disabled()); -+ WARN_ON_ONCE_NONRT(!irqs_disabled()); - - ret = 0; - old_incr = timer->it.cpu.incr; -@@ -1091,7 +1092,7 @@ void posix_cpu_timer_schedule(struct k_i - /* - * Now re-arm for the new expiry time. - */ -- WARN_ON_ONCE(!irqs_disabled()); -+ WARN_ON_ONCE_NONRT(!irqs_disabled()); - arm_timer(timer); - unlock_task_sighand(p, &flags); - -@@ -1182,13 +1183,13 @@ static inline int fastpath_timer_check(s - * already updated our counts. We need to check if any timers fire now. - * Interrupts are disabled. - */ --void run_posix_cpu_timers(struct task_struct *tsk) -+static void __run_posix_cpu_timers(struct task_struct *tsk) - { - LIST_HEAD(firing); - struct k_itimer *timer, *next; - unsigned long flags; - -- WARN_ON_ONCE(!irqs_disabled()); -+ WARN_ON_ONCE_NONRT(!irqs_disabled()); - - /* - * The fast path checks that there are no expired thread or thread -@@ -1242,6 +1243,190 @@ void run_posix_cpu_timers(struct task_st - } - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+#include -+#include -+DEFINE_PER_CPU(struct task_struct *, posix_timer_task); -+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); -+ -+static int posix_cpu_timers_thread(void *data) -+{ -+ int cpu = (long)data; -+ -+ BUG_ON(per_cpu(posix_timer_task,cpu) != current); -+ -+ while (!kthread_should_stop()) { -+ struct task_struct *tsk = NULL; -+ struct task_struct *next = NULL; -+ -+ if (cpu_is_offline(cpu)) -+ goto wait_to_die; -+ -+ /* grab task list */ -+ raw_local_irq_disable(); -+ tsk = per_cpu(posix_timer_tasklist, cpu); -+ per_cpu(posix_timer_tasklist, cpu) = NULL; -+ raw_local_irq_enable(); -+ -+ /* its possible the list is empty, just return */ -+ if (!tsk) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ schedule(); -+ __set_current_state(TASK_RUNNING); -+ continue; -+ } -+ -+ /* Process task list */ -+ while (1) { -+ /* save next */ -+ next = tsk->posix_timer_list; -+ -+ /* run the task timers, clear its ptr and -+ * unreference it -+ */ -+ __run_posix_cpu_timers(tsk); -+ tsk->posix_timer_list = NULL; -+ put_task_struct(tsk); -+ -+ /* check if this is the last on the list */ -+ if (next == tsk) -+ break; -+ tsk = next; -+ } -+ } -+ return 0; -+ -+wait_to_die: -+ /* Wait for kthread_stop */ -+ set_current_state(TASK_INTERRUPTIBLE); -+ while (!kthread_should_stop()) { -+ schedule(); -+ set_current_state(TASK_INTERRUPTIBLE); -+ } -+ __set_current_state(TASK_RUNNING); -+ return 0; -+} -+ -+static inline int __fastpath_timer_check(struct task_struct *tsk) -+{ -+ /* tsk == current, ensure it is safe to use ->signal/sighand */ -+ if (unlikely(tsk->exit_state)) -+ return 0; -+ -+ if (!task_cputime_zero(&tsk->cputime_expires)) -+ return 1; -+ -+ if (!task_cputime_zero(&tsk->signal->cputime_expires)) -+ return 1; -+ -+ return 0; -+} -+ -+void run_posix_cpu_timers(struct task_struct *tsk) -+{ -+ unsigned long cpu = smp_processor_id(); -+ struct task_struct *tasklist; -+ -+ BUG_ON(!irqs_disabled()); -+ if(!per_cpu(posix_timer_task, cpu)) -+ return; -+ /* get per-cpu references */ -+ tasklist = per_cpu(posix_timer_tasklist, cpu); -+ -+ /* check to see if we're already queued */ -+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { -+ get_task_struct(tsk); -+ if (tasklist) { -+ tsk->posix_timer_list = tasklist; -+ } else { -+ /* -+ * The list is terminated by a self-pointing -+ * task_struct -+ */ -+ tsk->posix_timer_list = tsk; -+ } -+ per_cpu(posix_timer_tasklist, cpu) = tsk; -+ -+ wake_up_process(per_cpu(posix_timer_task, cpu)); -+ } -+} -+ -+/* -+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added. -+ * Here we can start up the necessary migration thread for the new CPU. -+ */ -+static int posix_cpu_thread_call(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) -+{ -+ int cpu = (long)hcpu; -+ struct task_struct *p; -+ struct sched_param param; -+ -+ switch (action) { -+ case CPU_UP_PREPARE: -+ p = kthread_create(posix_cpu_timers_thread, hcpu, -+ "posixcputmr/%d",cpu); -+ if (IS_ERR(p)) -+ return NOTIFY_BAD; -+ p->flags |= PF_NOFREEZE; -+ kthread_bind(p, cpu); -+ /* Must be high prio to avoid getting starved */ -+ param.sched_priority = MAX_RT_PRIO-1; -+ sched_setscheduler(p, SCHED_FIFO, ¶m); -+ per_cpu(posix_timer_task,cpu) = p; -+ break; -+ case CPU_ONLINE: -+ /* Strictly unneccessary, as first user will wake it. */ -+ wake_up_process(per_cpu(posix_timer_task,cpu)); -+ break; -+#ifdef CONFIG_HOTPLUG_CPU -+ case CPU_UP_CANCELED: -+ /* Unbind it from offline cpu so it can run. Fall thru. */ -+ kthread_bind(per_cpu(posix_timer_task, cpu), -+ cpumask_any(cpu_online_mask)); -+ kthread_stop(per_cpu(posix_timer_task,cpu)); -+ per_cpu(posix_timer_task,cpu) = NULL; -+ break; -+ case CPU_DEAD: -+ kthread_stop(per_cpu(posix_timer_task,cpu)); -+ per_cpu(posix_timer_task,cpu) = NULL; -+ break; -+#endif -+ } -+ return NOTIFY_OK; -+} -+ -+/* Register at highest priority so that task migration (migrate_all_tasks) -+ * happens before everything else. -+ */ -+static struct notifier_block posix_cpu_thread_notifier = { -+ .notifier_call = posix_cpu_thread_call, -+ .priority = 10 -+}; -+ -+static int __init posix_cpu_thread_init(void) -+{ -+ void *hcpu = (void *)(long)smp_processor_id(); -+ /* Start one for boot CPU. */ -+ unsigned long cpu; -+ -+ /* init the per-cpu posix_timer_tasklets */ -+ for_each_possible_cpu(cpu) -+ per_cpu(posix_timer_tasklist, cpu) = NULL; -+ -+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); -+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); -+ register_cpu_notifier(&posix_cpu_thread_notifier); -+ return 0; -+} -+early_initcall(posix_cpu_thread_init); -+#else /* CONFIG_PREEMPT_RT_BASE */ -+void run_posix_cpu_timers(struct task_struct *tsk) -+{ -+ __run_posix_cpu_timers(tsk); -+} -+#endif /* CONFIG_PREEMPT_RT_BASE */ -+ - /* - * Set one of the process-wide special case CPU timers or RLIMIT_CPU. - * The tsk->sighand->siglock must be held by the caller. diff --git a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch deleted file mode 100644 index 94da283cd..000000000 --- a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch +++ /dev/null @@ -1,23 +0,0 @@ -Subject: powerpc: Disable highmem on RT -From: Thomas Gleixner -Date: Mon, 18 Jul 2011 17:08:34 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The current highmem handling on -RT is not compatible and needs fixups. - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -320,7 +320,7 @@ menu "Kernel options" - - config HIGHMEM - bool "High memory support" -- depends on PPC32 -+ depends on PPC32 && !PREEMPT_RT_FULL - - source kernel/Kconfig.hz - source kernel/Kconfig.preempt diff --git a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch deleted file mode 100644 index 81e739f66..000000000 --- a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: Thomas Gleixner -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: powerpc: Use generic rwsem on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use generic code which uses rtmutex - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/Kconfig | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT - - config RWSEM_GENERIC_SPINLOCK - bool -+ default y if PREEMPT_RT_FULL - - config RWSEM_XCHGADD_ALGORITHM - bool -- default y -+ default y if !PREEMPT_RT_FULL - - config GENERIC_LOCKBREAK - bool diff --git a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch deleted file mode 100644 index ec47c9697..000000000 --- a/debian/patches/features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch +++ /dev/null @@ -1,38 +0,0 @@ -From: Bogdan Purcareata -Date: Fri, 24 Apr 2015 15:53:13 +0000 -Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -While converting the openpic emulation code to use a raw_spinlock_t enables -guests to run on RT, there's still a performance issue. For interrupts sent in -directed delivery mode with a multiple CPU mask, the emulated openpic will loop -through all of the VCPUs, and for each VCPUs, it call IRQ_check, which will loop -through all the pending interrupts for that VCPU. This is done while holding the -raw_lock, meaning that in all this time the interrupts and preemption are -disabled on the host Linux. A malicious user app can max both these number and -cause a DoS. - -This temporary fix is sent for two reasons. First is so that users who want to -use the in-kernel MPIC emulation are aware of the potential latencies, thus -making sure that the hardware MPIC and their usage scenario does not involve -interrupts sent in directed delivery mode, and the number of possible pending -interrupts is kept small. Secondly, this should incentivize the development of a -proper openpic emulation that would be better suited for RT. - -Acked-by: Scott Wood -Signed-off-by: Bogdan Purcareata -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/powerpc/kvm/Kconfig | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/powerpc/kvm/Kconfig -+++ b/arch/powerpc/kvm/Kconfig -@@ -172,6 +172,7 @@ config KVM_E500MC - config KVM_MPIC - bool "KVM in-kernel MPIC emulation" - depends on KVM && E500 -+ depends on !PREEMPT_RT_FULL - select HAVE_KVM_IRQCHIP - select HAVE_KVM_IRQFD - select HAVE_KVM_IRQ_ROUTING diff --git a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch deleted file mode 100644 index a6c2dda78..000000000 --- a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch +++ /dev/null @@ -1,174 +0,0 @@ -From: Thomas Gleixner -Date: Thu, 1 Nov 2012 10:14:11 +0100 -Subject: powerpc: Add support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Implement the powerpc pieces for lazy preempt. - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/Kconfig | 1 + - arch/powerpc/include/asm/thread_info.h | 11 ++++++++--- - arch/powerpc/kernel/asm-offsets.c | 1 + - arch/powerpc/kernel/entry_32.S | 17 ++++++++++++----- - arch/powerpc/kernel/entry_64.S | 14 +++++++++++--- - 5 files changed, 33 insertions(+), 11 deletions(-) - ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -142,6 +142,7 @@ config PPC - select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select GENERIC_STRNCPY_FROM_USER - select GENERIC_STRNLEN_USER -+ select HAVE_PREEMPT_LAZY - select HAVE_MOD_ARCH_SPECIFIC - select MODULES_USE_ELF_RELA - select CLONE_BACKWARDS ---- a/arch/powerpc/include/asm/thread_info.h -+++ b/arch/powerpc/include/asm/thread_info.h -@@ -42,6 +42,8 @@ struct thread_info { - int cpu; /* cpu we're on */ - int preempt_count; /* 0 => preemptable, - <0 => BUG */ -+ int preempt_lazy_count; /* 0 => preemptable, -+ <0 => BUG */ - unsigned long local_flags; /* private flags for thread */ - - /* low level flags - has atomic operations done on it */ -@@ -82,8 +84,7 @@ static inline struct thread_info *curren - #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ - #define TIF_SIGPENDING 1 /* signal pending */ - #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ --#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling -- TIF_NEED_RESCHED */ -+#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ - #define TIF_32BIT 4 /* 32 bit binary */ - #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ - #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ -@@ -101,6 +102,8 @@ static inline struct thread_info *curren - #if defined(CONFIG_PPC64) - #define TIF_ELF2ABI 18 /* function descriptors must die! */ - #endif -+#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling -+ TIF_NEED_RESCHED */ - - /* as above, but as bit values */ - #define _TIF_SYSCALL_TRACE (1< -Date: Sun, 31 May 2015 14:44:42 -0400 -Subject: powerpc: ps3/device-init.c - adapt to completions using swait vs wait -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -To fix: - - cc1: warnings being treated as errors - arch/powerpc/platforms/ps3/device-init.c: In function 'ps3_notification_read_write': - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'prepare_to_wait_event' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'abort_exclusive_wait' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'finish_wait' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.o] Error 1 - make[3]: *** Waiting for unfinished jobs.... - -Signed-off-by: Paul Gortmaker -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/powerpc/platforms/ps3/device-init.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/powerpc/platforms/ps3/device-init.c -+++ b/arch/powerpc/platforms/ps3/device-init.c -@@ -752,7 +752,7 @@ static int ps3_notification_read_write(s - } - pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); - -- res = wait_event_interruptible(dev->done.wait, -+ res = swait_event_interruptible(dev->done.wait, - dev->done.done || kthread_should_stop()); - if (kthread_should_stop()) - res = -EINTR; diff --git a/debian/patches/features/all/rt/preempt-lazy-check-preempt_schedule.patch b/debian/patches/features/all/rt/preempt-lazy-check-preempt_schedule.patch deleted file mode 100644 index 6e226d261..000000000 --- a/debian/patches/features/all/rt/preempt-lazy-check-preempt_schedule.patch +++ /dev/null @@ -1,74 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 20 Jan 2016 15:13:30 +0100 -Subject: preempt-lazy: Add the lazy-preemption check to preempt_schedule() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Probably in the rebase onto v4.1 this check got moved into less commonly used -preempt_schedule_notrace(). This patch ensures that both functions use it. - -Reported-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/core.c | 36 ++++++++++++++++++++++++++++-------- - 1 file changed, 28 insertions(+), 8 deletions(-) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3460,6 +3460,30 @@ static void __sched notrace preempt_sche - } while (need_resched()); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+/* -+ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is -+ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as -+ * preempt_lazy_count counter >0. -+ */ -+static int preemptible_lazy(void) -+{ -+ if (test_thread_flag(TIF_NEED_RESCHED)) -+ return 1; -+ if (current_thread_info()->preempt_lazy_count) -+ return 0; -+ return 1; -+} -+ -+#else -+ -+static int preemptible_lazy(void) -+{ -+ return 1; -+} -+ -+#endif -+ - #ifdef CONFIG_PREEMPT - /* - * this is the entry point to schedule() from in-kernel preemption -@@ -3474,6 +3498,8 @@ asmlinkage __visible void __sched notrac - */ - if (likely(!preemptible())) - return; -+ if (!preemptible_lazy()) -+ return; - - preempt_schedule_common(); - } -@@ -3500,15 +3526,9 @@ asmlinkage __visible void __sched notrac - - if (likely(!preemptible())) - return; -- --#ifdef CONFIG_PREEMPT_LAZY -- /* -- * Check for lazy preemption -- */ -- if (current_thread_info()->preempt_lazy_count && -- !test_thread_flag(TIF_NEED_RESCHED)) -+ if (!preemptible_lazy()) - return; --#endif -+ - do { - preempt_disable_notrace(); - /* diff --git a/debian/patches/features/all/rt/preempt-lazy-support.patch b/debian/patches/features/all/rt/preempt-lazy-support.patch deleted file mode 100644 index 0a2ca198a..000000000 --- a/debian/patches/features/all/rt/preempt-lazy-support.patch +++ /dev/null @@ -1,590 +0,0 @@ -Subject: sched: Add support for lazy preemption -From: Thomas Gleixner -Date: Fri, 26 Oct 2012 18:50:54 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -It has become an obsession to mitigate the determinism vs. throughput -loss of RT. Looking at the mainline semantics of preemption points -gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER -tasks. One major issue is the wakeup of tasks which are right away -preempting the waking task while the waking task holds a lock on which -the woken task will block right after having preempted the wakee. In -mainline this is prevented due to the implicit preemption disable of -spin/rw_lock held regions. On RT this is not possible due to the fully -preemptible nature of sleeping spinlocks. - -Though for a SCHED_OTHER task preempting another SCHED_OTHER task this -is really not a correctness issue. RT folks are concerned about -SCHED_FIFO/RR tasks preemption and not about the purely fairness -driven SCHED_OTHER preemption latencies. - -So I introduced a lazy preemption mechanism which only applies to -SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the -existing preempt_count each tasks sports now a preempt_lazy_count -which is manipulated on lock acquiry and release. This is slightly -incorrect as for lazyness reasons I coupled this on -migrate_disable/enable so some other mechanisms get the same treatment -(e.g. get_cpu_light). - -Now on the scheduler side instead of setting NEED_RESCHED this sets -NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and -therefor allows to exit the waking task the lock held region before -the woken task preempts. That also works better for cross CPU wakeups -as the other side can stay in the adaptive spinning loop. - -For RT class preemption there is no change. This simply sets -NEED_RESCHED and forgoes the lazy preemption counter. - - Initial test do not expose any observable latency increasement, but -history shows that I've been proven wrong before :) - -The lazy preemption mode is per default on, but with -CONFIG_SCHED_DEBUG enabled it can be disabled via: - - # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features - -and reenabled via - - # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features - -The test results so far are very machine and workload dependent, but -there is a clear trend that it enhances the non RT workload -performance. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/include/asm/preempt.h | 18 +++++++++++++- - include/linux/preempt.h | 29 ++++++++++++++++++++++- - include/linux/sched.h | 37 ++++++++++++++++++++++++++++++ - include/linux/thread_info.h | 12 +++++++++ - include/linux/trace_events.h | 1 - kernel/Kconfig.preempt | 6 ++++ - kernel/sched/core.c | 50 ++++++++++++++++++++++++++++++++++++++++- - kernel/sched/fair.c | 16 ++++++------- - kernel/sched/features.h | 3 ++ - kernel/sched/sched.h | 9 +++++++ - kernel/trace/trace.c | 37 ++++++++++++++++++------------ - kernel/trace/trace.h | 2 + - kernel/trace/trace_output.c | 13 +++++++++- - 13 files changed, 204 insertions(+), 29 deletions(-) - ---- a/arch/x86/include/asm/preempt.h -+++ b/arch/x86/include/asm/preempt.h -@@ -79,17 +79,33 @@ static __always_inline void __preempt_co - * a decrement which hits zero means we have no preempt_count and should - * reschedule. - */ --static __always_inline bool __preempt_count_dec_and_test(void) -+static __always_inline bool ____preempt_count_dec_and_test(void) - { - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); - } - -+static __always_inline bool __preempt_count_dec_and_test(void) -+{ -+ if (____preempt_count_dec_and_test()) -+ return true; -+#ifdef CONFIG_PREEMPT_LAZY -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+#else -+ return false; -+#endif -+} -+ - /* - * Returns true when we need to resched and can (barring IRQ state). - */ - static __always_inline bool should_resched(int preempt_offset) - { -+#ifdef CONFIG_PREEMPT_LAZY -+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset || -+ test_thread_flag(TIF_NEED_RESCHED_LAZY)); -+#else - return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); -+#endif - } - - #ifdef CONFIG_PREEMPT ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -153,6 +153,20 @@ extern void preempt_count_sub(int val); - #define preempt_count_inc() preempt_count_add(1) - #define preempt_count_dec() preempt_count_sub(1) - -+#ifdef CONFIG_PREEMPT_LAZY -+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) -+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) -+#define inc_preempt_lazy_count() add_preempt_lazy_count(1) -+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) -+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) -+#else -+#define add_preempt_lazy_count(val) do { } while (0) -+#define sub_preempt_lazy_count(val) do { } while (0) -+#define inc_preempt_lazy_count() do { } while (0) -+#define dec_preempt_lazy_count() do { } while (0) -+#define preempt_lazy_count() (0) -+#endif -+ - #ifdef CONFIG_PREEMPT_COUNT - - #define preempt_disable() \ -@@ -161,6 +175,12 @@ do { \ - barrier(); \ - } while (0) - -+#define preempt_lazy_disable() \ -+do { \ -+ inc_preempt_lazy_count(); \ -+ barrier(); \ -+} while (0) -+ - #define sched_preempt_enable_no_resched() \ - do { \ - barrier(); \ -@@ -198,6 +218,13 @@ do { \ - __preempt_schedule(); \ - } while (0) - -+#define preempt_lazy_enable() \ -+do { \ -+ dec_preempt_lazy_count(); \ -+ barrier(); \ -+ preempt_check_resched(); \ -+} while (0) -+ - #else /* !CONFIG_PREEMPT */ - #define preempt_enable() \ - do { \ -@@ -264,7 +291,7 @@ do { \ - } while (0) - #define preempt_fold_need_resched() \ - do { \ -- if (tif_need_resched()) \ -+ if (tif_need_resched_now()) \ - set_preempt_need_resched(); \ - } while (0) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -2967,6 +2967,43 @@ static inline int test_tsk_need_resched( - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); -+} -+ -+static inline int need_resched_lazy(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline int need_resched_now(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED); -+} -+ -+#else -+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } -+static inline int need_resched_lazy(void) { return 0; } -+ -+static inline int need_resched_now(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED); -+} -+ -+#endif -+ - static inline int restart_syscall(void) - { - set_tsk_thread_flag(current, TIF_SIGPENDING); ---- a/include/linux/thread_info.h -+++ b/include/linux/thread_info.h -@@ -102,7 +102,17 @@ static inline int test_ti_thread_flag(st - #define test_thread_flag(flag) \ - test_ti_thread_flag(current_thread_info(), flag) - --#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#ifdef CONFIG_PREEMPT_LAZY -+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ -+ test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) -+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+ -+#else -+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_lazy() 0 -+#endif - - #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK - /* ---- a/include/linux/trace_events.h -+++ b/include/linux/trace_events.h -@@ -68,6 +68,7 @@ struct trace_entry { - int pid; - unsigned short migrate_disable; - unsigned short padding; -+ unsigned char preempt_lazy_count; - }; - - #define TRACE_EVENT_TYPE_MAX \ ---- a/kernel/Kconfig.preempt -+++ b/kernel/Kconfig.preempt -@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE - bool - select PREEMPT - -+config HAVE_PREEMPT_LAZY -+ bool -+ -+config PREEMPT_LAZY -+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL -+ - choice - prompt "Preemption Model" - default PREEMPT_NONE ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -603,6 +603,38 @@ void resched_curr(struct rq *rq) - trace_sched_wake_idle_without_ipi(cpu); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+void resched_curr_lazy(struct rq *rq) -+{ -+ struct task_struct *curr = rq->curr; -+ int cpu; -+ -+ if (!sched_feat(PREEMPT_LAZY)) { -+ resched_curr(rq); -+ return; -+ } -+ -+ lockdep_assert_held(&rq->lock); -+ -+ if (test_tsk_need_resched(curr)) -+ return; -+ -+ if (test_tsk_need_resched_lazy(curr)) -+ return; -+ -+ set_tsk_need_resched_lazy(curr); -+ -+ cpu = cpu_of(rq); -+ if (cpu == smp_processor_id()) -+ return; -+ -+ /* NEED_RESCHED_LAZY must be visible before we test polling */ -+ smp_mb(); -+ if (!tsk_is_polling(curr)) -+ smp_send_reschedule(cpu); -+} -+#endif -+ - void resched_cpu(int cpu) - { - struct rq *rq = cpu_rq(cpu); -@@ -2344,6 +2376,9 @@ int sched_fork(unsigned long clone_flags - p->on_cpu = 0; - #endif - init_task_preempt_count(p); -+#ifdef CONFIG_HAVE_PREEMPT_LAZY -+ task_thread_info(p)->preempt_lazy_count = 0; -+#endif - #ifdef CONFIG_SMP - plist_node_init(&p->pushable_tasks, MAX_PRIO); - RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3142,6 +3177,7 @@ void migrate_disable(void) - } - - preempt_disable(); -+ preempt_lazy_disable(); - pin_current_cpu(); - p->migrate_disable = 1; - preempt_enable(); -@@ -3181,6 +3217,7 @@ void migrate_enable(void) - - unpin_current_cpu(); - preempt_enable(); -+ preempt_lazy_enable(); - } - EXPORT_SYMBOL(migrate_enable); - #endif -@@ -3318,6 +3355,7 @@ static void __sched notrace __schedule(b - - next = pick_next_task(rq, prev); - clear_tsk_need_resched(prev); -+ clear_tsk_need_resched_lazy(prev); - clear_preempt_need_resched(); - rq->clock_skip_update = 0; - -@@ -3463,6 +3501,14 @@ asmlinkage __visible void __sched notrac - if (likely(!preemptible())) - return; - -+#ifdef CONFIG_PREEMPT_LAZY -+ /* -+ * Check for lazy preemption -+ */ -+ if (current_thread_info()->preempt_lazy_count && -+ !test_thread_flag(TIF_NEED_RESCHED)) -+ return; -+#endif - do { - preempt_disable_notrace(); - /* -@@ -5203,7 +5249,9 @@ void init_idle(struct task_struct *idle, - - /* Set the preempt count _outside_ the spinlocks! */ - init_idle_preempt_count(idle, cpu); -- -+#ifdef CONFIG_HAVE_PREEMPT_LAZY -+ task_thread_info(idle)->preempt_lazy_count = 0; -+#endif - /* - * The idle tasks have their own, simple scheduling class: - */ ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -3135,7 +3135,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq - ideal_runtime = sched_slice(cfs_rq, curr); - delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime) { -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - /* - * The current task ran long enough, ensure it doesn't get - * re-elected due to buddy favours. -@@ -3159,7 +3159,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq - return; - - if (delta > ideal_runtime) -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - } - - static void -@@ -3299,7 +3299,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc - * validating it and just reschedule. - */ - if (queued) { -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - return; - } - /* -@@ -3481,7 +3481,7 @@ static void __account_cfs_rq_runtime(str - * hierarchy can be throttled - */ - if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - } - - static __always_inline -@@ -4093,7 +4093,7 @@ static void hrtick_start_fair(struct rq - - if (delta < 0) { - if (rq->curr == p) -- resched_curr(rq); -+ resched_curr_lazy(rq); - return; - } - hrtick_start(rq, delta); -@@ -5177,7 +5177,7 @@ static void check_preempt_wakeup(struct - return; - - preempt: -- resched_curr(rq); -+ resched_curr_lazy(rq); - /* - * Only set the backward buddy when the current task is still - * on the rq. This can happen when a wakeup gets interleaved -@@ -7928,7 +7928,7 @@ static void task_fork_fair(struct task_s - * 'current' within the tree based on its new key value. - */ - swap(curr->vruntime, se->vruntime); -- resched_curr(rq); -+ resched_curr_lazy(rq); - } - - se->vruntime -= cfs_rq->min_vruntime; -@@ -7953,7 +7953,7 @@ prio_changed_fair(struct rq *rq, struct - */ - if (rq->curr == p) { - if (p->prio > oldprio) -- resched_curr(rq); -+ resched_curr_lazy(rq); - } else - check_preempt_curr(rq, p, 0); - } ---- a/kernel/sched/features.h -+++ b/kernel/sched/features.h -@@ -47,6 +47,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true) - - #ifdef CONFIG_PREEMPT_RT_FULL - SCHED_FEAT(TTWU_QUEUE, false) -+# ifdef CONFIG_PREEMPT_LAZY -+SCHED_FEAT(PREEMPT_LAZY, true) -+# endif - #else - - /* ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -1300,6 +1300,15 @@ extern void init_sched_fair_class(void); - extern void resched_curr(struct rq *rq); - extern void resched_cpu(int cpu); - -+#ifdef CONFIG_PREEMPT_LAZY -+extern void resched_curr_lazy(struct rq *rq); -+#else -+static inline void resched_curr_lazy(struct rq *rq) -+{ -+ resched_curr(rq); -+} -+#endif -+ - extern struct rt_bandwidth def_rt_bandwidth; - extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); - ---- a/kernel/trace/trace.c -+++ b/kernel/trace/trace.c -@@ -1652,6 +1652,7 @@ tracing_generic_entry_update(struct trac - struct task_struct *tsk = current; - - entry->preempt_count = pc & 0xff; -+ entry->preempt_lazy_count = preempt_lazy_count(); - entry->pid = (tsk) ? tsk->pid : 0; - entry->flags = - #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -@@ -1661,7 +1662,8 @@ tracing_generic_entry_update(struct trac - #endif - ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | - ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | -- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | -+ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | -+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | - (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); - - entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; -@@ -2557,15 +2559,17 @@ get_total_entries(struct trace_buffer *b - - static void print_lat_help_header(struct seq_file *m) - { -- seq_puts(m, "# _------=> CPU# \n" -- "# / _-----=> irqs-off \n" -- "# | / _----=> need-resched \n" -- "# || / _---=> hardirq/softirq \n" -- "# ||| / _--=> preempt-depth \n" -- "# |||| / _--=> migrate-disable\n" -- "# ||||| / delay \n" -- "# cmd pid |||||| time | caller \n" -- "# \\ / ||||| \\ | / \n"); -+ seq_puts(m, "# _--------=> CPU# \n" -+ "# / _-------=> irqs-off \n" -+ "# | / _------=> need-resched \n" -+ "# || / _-----=> need-resched_lazy \n" -+ "# ||| / _----=> hardirq/softirq \n" -+ "# |||| / _---=> preempt-depth \n" -+ "# ||||| / _--=> preempt-lazy-depth\n" -+ "# |||||| / _-=> migrate-disable \n" -+ "# ||||||| / delay \n" -+ "# cmd pid |||||||| time | caller \n" -+ "# \\ / |||||||| \\ | / \n"); - } - - static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -@@ -2591,11 +2595,14 @@ static void print_func_help_header_irq(s - print_event_info(buf, m); - seq_puts(m, "# _-----=> irqs-off\n" - "# / _----=> need-resched\n" -- "# | / _---=> hardirq/softirq\n" -- "# || / _--=> preempt-depth\n" -- "# ||| / delay\n" -- "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" -- "# | | | |||| | |\n"); -+ "# |/ _-----=> need-resched_lazy\n" -+ "# || / _---=> hardirq/softirq\n" -+ "# ||| / _--=> preempt-depth\n" -+ "# |||| /_--=> preempt-lazy-depth\n" -+ "# ||||| _-=> migrate-disable \n" -+ "# ||||| / delay\n" -+ "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n" -+ "# | | | |||||| | |\n"); - } - - void ---- a/kernel/trace/trace.h -+++ b/kernel/trace/trace.h -@@ -117,6 +117,7 @@ struct kretprobe_trace_entry_head { - * NEED_RESCHED - reschedule is requested - * HARDIRQ - inside an interrupt handler - * SOFTIRQ - inside a softirq handler -+ * NEED_RESCHED_LAZY - lazy reschedule is requested - */ - enum trace_flag_type { - TRACE_FLAG_IRQS_OFF = 0x01, -@@ -125,6 +126,7 @@ enum trace_flag_type { - TRACE_FLAG_HARDIRQ = 0x08, - TRACE_FLAG_SOFTIRQ = 0x10, - TRACE_FLAG_PREEMPT_RESCHED = 0x20, -+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x40, - }; - - #define TRACE_BUF_SIZE 1024 ---- a/kernel/trace/trace_output.c -+++ b/kernel/trace/trace_output.c -@@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq - { - char hardsoft_irq; - char need_resched; -+ char need_resched_lazy; - char irqs_off; - int hardirq; - int softirq; -@@ -413,6 +414,8 @@ int trace_print_lat_fmt(struct trace_seq - need_resched = '.'; - break; - } -+ need_resched_lazy = -+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; - - hardsoft_irq = - (hardirq && softirq) ? 'H' : -@@ -420,14 +423,20 @@ int trace_print_lat_fmt(struct trace_seq - softirq ? 's' : - '.'; - -- trace_seq_printf(s, "%c%c%c", -- irqs_off, need_resched, hardsoft_irq); -+ trace_seq_printf(s, "%c%c%c%c", -+ irqs_off, need_resched, need_resched_lazy, -+ hardsoft_irq); - - if (entry->preempt_count) - trace_seq_printf(s, "%x", entry->preempt_count); - else - trace_seq_putc(s, '.'); - -+ if (entry->preempt_lazy_count) -+ trace_seq_printf(s, "%x", entry->preempt_lazy_count); -+ else -+ trace_seq_putc(s, '.'); -+ - if (entry->migrate_disable) - trace_seq_printf(s, "%x", entry->migrate_disable); - else diff --git a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch deleted file mode 100644 index 1d0b1e4cd..000000000 --- a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch +++ /dev/null @@ -1,48 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 24 Jul 2009 12:38:56 +0200 -Subject: preempt: Provide preempt_*_(no)rt variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -RT needs a few preempt_disable/enable points which are not necessary -otherwise. Implement variants to avoid #ifdeffery. - -Signed-off-by: Thomas Gleixner - ---- - include/linux/preempt.h | 18 +++++++++++++++++- - 1 file changed, 17 insertions(+), 1 deletion(-) - ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -154,7 +154,11 @@ do { \ - preempt_count_dec(); \ - } while (0) - --#define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+#else -+# define preempt_enable_no_resched() preempt_enable() -+#endif - - #define preemptible() (preempt_count() == 0 && !irqs_disabled()) - -@@ -248,6 +252,18 @@ do { \ - set_preempt_need_resched(); \ - } while (0) - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define preempt_disable_rt() preempt_disable() -+# define preempt_enable_rt() preempt_enable() -+# define preempt_disable_nort() barrier() -+# define preempt_enable_nort() barrier() -+#else -+# define preempt_disable_rt() barrier() -+# define preempt_enable_rt() barrier() -+# define preempt_disable_nort() preempt_disable() -+# define preempt_enable_nort() preempt_enable() -+#endif -+ - #ifdef CONFIG_PREEMPT_NOTIFIERS - - struct preempt_notifier; diff --git a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch deleted file mode 100644 index f873f38cf..000000000 --- a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch +++ /dev/null @@ -1,32 +0,0 @@ -Subject: printk: Add "force_early_printk" boot param to help with debugging -From: Peter Zijlstra -Date: Fri, 02 Sep 2011 14:41:29 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Gives me an option to screw printk and actually see what the machine -says. - -Signed-off-by: Peter Zijlstra -Link: http://lkml.kernel.org/r/1314967289.1301.11.camel@twins -Signed-off-by: Thomas Gleixner -Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org ---- - kernel/printk/printk.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -271,6 +271,13 @@ asmlinkage void early_printk(const char - */ - static bool __read_mostly printk_killswitch; - -+static int __init force_early_printk_setup(char *str) -+{ -+ printk_killswitch = true; -+ return 0; -+} -+early_param("force_early_printk", force_early_printk_setup); -+ - void printk_kill(void) - { - printk_killswitch = true; diff --git a/debian/patches/features/all/rt/printk-kill.patch b/debian/patches/features/all/rt/printk-kill.patch deleted file mode 100644 index 30d9d56b3..000000000 --- a/debian/patches/features/all/rt/printk-kill.patch +++ /dev/null @@ -1,164 +0,0 @@ -Subject: printk: Add a printk kill switch -From: Ingo Molnar -Date: Fri, 22 Jul 2011 17:58:40 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that -it does not dead-lock with the early printk code. - -Signed-off-by: Thomas Gleixner ---- - include/linux/printk.h | 2 + - kernel/printk/printk.c | 79 ++++++++++++++++++++++++++++++++++++------------- - kernel/watchdog.c | 10 ++++++ - 3 files changed, 71 insertions(+), 20 deletions(-) - ---- a/include/linux/printk.h -+++ b/include/linux/printk.h -@@ -117,9 +117,11 @@ int no_printk(const char *fmt, ...) - #ifdef CONFIG_EARLY_PRINTK - extern asmlinkage __printf(1, 2) - void early_printk(const char *fmt, ...); -+extern void printk_kill(void); - #else - static inline __printf(1, 2) __cold - void early_printk(const char *s, ...) { } -+static inline void printk_kill(void) { } - #endif - - typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -241,6 +241,58 @@ struct printk_log { - */ - static DEFINE_RAW_SPINLOCK(logbuf_lock); - -+#ifdef CONFIG_EARLY_PRINTK -+struct console *early_console; -+ -+static void early_vprintk(const char *fmt, va_list ap) -+{ -+ if (early_console) { -+ char buf[512]; -+ int n = vscnprintf(buf, sizeof(buf), fmt, ap); -+ -+ early_console->write(early_console, buf, n); -+ } -+} -+ -+asmlinkage void early_printk(const char *fmt, ...) -+{ -+ va_list ap; -+ -+ va_start(ap, fmt); -+ early_vprintk(fmt, ap); -+ va_end(ap); -+} -+ -+/* -+ * This is independent of any log levels - a global -+ * kill switch that turns off all of printk. -+ * -+ * Used by the NMI watchdog if early-printk is enabled. -+ */ -+static bool __read_mostly printk_killswitch; -+ -+void printk_kill(void) -+{ -+ printk_killswitch = true; -+} -+ -+#ifdef CONFIG_PRINTK -+static int forced_early_printk(const char *fmt, va_list ap) -+{ -+ if (!printk_killswitch) -+ return 0; -+ early_vprintk(fmt, ap); -+ return 1; -+} -+#endif -+ -+#else -+static inline int forced_early_printk(const char *fmt, va_list ap) -+{ -+ return 0; -+} -+#endif -+ - #ifdef CONFIG_PRINTK - DECLARE_WAIT_QUEUE_HEAD(log_wait); - /* the next printk record to read by syslog(READ) or /proc/kmsg */ -@@ -1672,6 +1724,13 @@ asmlinkage int vprintk_emit(int facility - /* cpu currently holding logbuf_lock in this function */ - static unsigned int logbuf_cpu = UINT_MAX; - -+ /* -+ * Fall back to early_printk if a debugging subsystem has -+ * killed printk output -+ */ -+ if (unlikely(forced_early_printk(fmt, args))) -+ return 1; -+ - if (level == LOGLEVEL_SCHED) { - level = LOGLEVEL_DEFAULT; - in_sched = true; -@@ -1961,26 +2020,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun - - #endif /* CONFIG_PRINTK */ - --#ifdef CONFIG_EARLY_PRINTK --struct console *early_console; -- --asmlinkage __visible void early_printk(const char *fmt, ...) --{ -- va_list ap; -- char buf[512]; -- int n; -- -- if (!early_console) -- return; -- -- va_start(ap, fmt); -- n = vscnprintf(buf, sizeof(buf), fmt, ap); -- va_end(ap); -- -- early_console->write(early_console, buf, n); --} --#endif -- - static int __add_preferred_console(char *name, int idx, char *options, - char *brl_options) - { ---- a/kernel/watchdog.c -+++ b/kernel/watchdog.c -@@ -299,6 +299,8 @@ static int is_softlockup(unsigned long t - - #ifdef CONFIG_HARDLOCKUP_DETECTOR - -+static DEFINE_RAW_SPINLOCK(watchdog_output_lock); -+ - static struct perf_event_attr wd_hw_attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, -@@ -333,6 +335,13 @@ static void watchdog_overflow_callback(s - /* only print hardlockups once */ - if (__this_cpu_read(hard_watchdog_warn) == true) - return; -+ /* -+ * If early-printk is enabled then make sure we do not -+ * lock up in printk() and kill console logging: -+ */ -+ printk_kill(); -+ -+ raw_spin_lock(&watchdog_output_lock); - - pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); - print_modules(); -@@ -350,6 +359,7 @@ static void watchdog_overflow_callback(s - !test_and_set_bit(0, &hardlockup_allcpu_dumped)) - trigger_allbutself_cpu_backtrace(); - -+ raw_spin_unlock(&watchdog_output_lock); - if (hardlockup_panic) - panic("Hard LOCKUP"); - diff --git a/debian/patches/features/all/rt/printk-rt-aware.patch b/debian/patches/features/all/rt/printk-rt-aware.patch deleted file mode 100644 index e8d05f30b..000000000 --- a/debian/patches/features/all/rt/printk-rt-aware.patch +++ /dev/null @@ -1,101 +0,0 @@ -Subject: printk: Make rt aware -From: Thomas Gleixner -Date: Wed, 19 Sep 2012 14:50:37 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Drop the lock before calling the console driver and do not disable -interrupts while printing to a serial console. - -Signed-off-by: Thomas Gleixner ---- - kernel/printk/printk.c | 26 +++++++++++++++++++++++--- - 1 file changed, 23 insertions(+), 3 deletions(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -1502,6 +1502,7 @@ static void call_console_drivers(int lev - if (!console_drivers) - return; - -+ migrate_disable(); - for_each_console(con) { - if (exclusive_console && con != exclusive_console) - continue; -@@ -1517,6 +1518,7 @@ static void call_console_drivers(int lev - else - con->write(con, text, len); - } -+ migrate_enable(); - } - - /* -@@ -1577,6 +1579,15 @@ static inline int can_use_console(unsign - static int console_trylock_for_printk(void) - { - unsigned int cpu = smp_processor_id(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int lock = !early_boot_irqs_disabled && (preempt_count() == 0) && -+ !irqs_disabled(); -+#else -+ int lock = 1; -+#endif -+ -+ if (!lock) -+ return 0; - - if (!console_trylock()) - return 0; -@@ -1879,8 +1890,7 @@ asmlinkage int vprintk_emit(int facility - * console_sem which would prevent anyone from printing to - * console - */ -- preempt_disable(); -- -+ migrate_disable(); - /* - * Try to acquire and then immediately release the console - * semaphore. The release will print out buffers and wake up -@@ -1888,7 +1898,7 @@ asmlinkage int vprintk_emit(int facility - */ - if (console_trylock_for_printk()) - console_unlock(); -- preempt_enable(); -+ migrate_enable(); - lockdep_on(); - } - -@@ -2248,11 +2258,16 @@ static void console_cont_flush(char *tex - goto out; - - len = cont_print_text(text, size); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ raw_spin_unlock_irqrestore(&logbuf_lock, flags); -+ call_console_drivers(cont.level, NULL, 0, text, len); -+#else - raw_spin_unlock(&logbuf_lock); - stop_critical_timings(); - call_console_drivers(cont.level, NULL, 0, text, len); - start_critical_timings(); - local_irq_restore(flags); -+#endif - return; - out: - raw_spin_unlock_irqrestore(&logbuf_lock, flags); -@@ -2351,12 +2366,17 @@ void console_unlock(void) - console_idx = log_next(console_idx); - console_seq++; - console_prev = msg->flags; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ raw_spin_unlock_irqrestore(&logbuf_lock, flags); -+ call_console_drivers(level, ext_text, ext_len, text, len); -+#else - raw_spin_unlock(&logbuf_lock); - - stop_critical_timings(); /* don't trace print latency */ - call_console_drivers(level, ext_text, ext_len, text, len); - start_critical_timings(); - local_irq_restore(flags); -+#endif - } - console_locked = 0; - diff --git a/debian/patches/features/all/rt/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch b/debian/patches/features/all/rt/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch deleted file mode 100644 index 491eb9468..000000000 --- a/debian/patches/features/all/rt/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch +++ /dev/null @@ -1,35 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 13 Jan 2016 14:09:05 +0100 -Subject: ptrace: don't open IRQs in ptrace_freeze_traced() too early -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -In the non-RT case the spin_lock_irq() here disables interrupts as well -as raw_spin_lock_irq(). So in the unlock case the interrupts are enabled -too early. - -Reported-by: kernel test robot -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/ptrace.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -129,12 +129,14 @@ static bool ptrace_freeze_traced(struct - - spin_lock_irq(&task->sighand->siglock); - if (task_is_traced(task) && !__fatal_signal_pending(task)) { -- raw_spin_lock_irq(&task->pi_lock); -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&task->pi_lock, flags); - if (task->state & __TASK_TRACED) - task->state = __TASK_TRACED; - else - task->saved_state = __TASK_TRACED; -- raw_spin_unlock_irq(&task->pi_lock); -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); - ret = true; - } - spin_unlock_irq(&task->sighand->siglock); diff --git a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch deleted file mode 100644 index cc59021e2..000000000 --- a/debian/patches/features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ /dev/null @@ -1,152 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 29 Aug 2013 18:21:04 +0200 -Subject: ptrace: fix ptrace vs tasklist_lock race -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -As explained by Alexander Fyodorov : - -|read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel, -|and it can remove __TASK_TRACED from task->state (by moving it to -|task->saved_state). If parent does wait() on child followed by a sys_ptrace -|call, the following race can happen: -| -|- child sets __TASK_TRACED in ptrace_stop() -|- parent does wait() which eventually calls wait_task_stopped() and returns -| child's pid -|- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves -| __TASK_TRACED flag to saved_state -|- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive() - -The patch is based on his initial patch where an additional check is -added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is -taken in case the caller is interrupted between looking into ->state and -->saved_state. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/sched.h | 48 +++++++++++++++++++++++++++++++++++++++++++++--- - kernel/ptrace.c | 7 ++++++- - kernel/sched/core.c | 17 +++++++++++++++-- - 3 files changed, 66 insertions(+), 6 deletions(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -242,10 +242,7 @@ extern char ___assert_task_state[1 - 2*! - TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) - --#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) - #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) --#define task_is_stopped_or_traced(task) \ -- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) - #define task_contributes_to_load(task) \ - ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ -@@ -2984,6 +2981,51 @@ static inline int signal_pending_state(l - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); - } - -+static inline bool __task_is_stopped_or_traced(struct task_struct *task) -+{ -+ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) -+ return true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) -+ return true; -+#endif -+ return false; -+} -+ -+static inline bool task_is_stopped_or_traced(struct task_struct *task) -+{ -+ bool traced_stopped; -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&task->pi_lock, flags); -+ traced_stopped = __task_is_stopped_or_traced(task); -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+#else -+ traced_stopped = __task_is_stopped_or_traced(task); -+#endif -+ return traced_stopped; -+} -+ -+static inline bool task_is_traced(struct task_struct *task) -+{ -+ bool traced = false; -+ -+ if (task->state & __TASK_TRACED) -+ return true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* in case the task is sleeping on tasklist_lock */ -+ raw_spin_lock_irq(&task->pi_lock); -+ if (task->state & __TASK_TRACED) -+ traced = true; -+ else if (task->saved_state & __TASK_TRACED) -+ traced = true; -+ raw_spin_unlock_irq(&task->pi_lock); -+#endif -+ return traced; -+} -+ - /* - * cond_resched() and cond_resched_lock(): latency reduction via - * explicit rescheduling in places that are safe. The return ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -129,7 +129,12 @@ static bool ptrace_freeze_traced(struct - - spin_lock_irq(&task->sighand->siglock); - if (task_is_traced(task) && !__fatal_signal_pending(task)) { -- task->state = __TASK_TRACED; -+ raw_spin_lock_irq(&task->pi_lock); -+ if (task->state & __TASK_TRACED) -+ task->state = __TASK_TRACED; -+ else -+ task->saved_state = __TASK_TRACED; -+ raw_spin_unlock_irq(&task->pi_lock); - ret = true; - } - spin_unlock_irq(&task->sighand->siglock); ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1426,6 +1426,18 @@ int migrate_swap(struct task_struct *cur - return ret; - } - -+static bool check_task_state(struct task_struct *p, long match_state) -+{ -+ bool match = false; -+ -+ raw_spin_lock_irq(&p->pi_lock); -+ if (p->state == match_state || p->saved_state == match_state) -+ match = true; -+ raw_spin_unlock_irq(&p->pi_lock); -+ -+ return match; -+} -+ - /* - * wait_task_inactive - wait for a thread to unschedule. - * -@@ -1470,7 +1482,7 @@ unsigned long wait_task_inactive(struct - * is actually now running somewhere else! - */ - while (task_running(rq, p)) { -- if (match_state && unlikely(p->state != match_state)) -+ if (match_state && !check_task_state(p, match_state)) - return 0; - cpu_relax(); - } -@@ -1485,7 +1497,8 @@ unsigned long wait_task_inactive(struct - running = task_running(rq, p); - queued = task_on_rq_queued(p); - ncsw = 0; -- if (!match_state || p->state == match_state) -+ if (!match_state || p->state == match_state || -+ p->saved_state == match_state) - ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ - task_rq_unlock(rq, p, &flags); - diff --git a/debian/patches/features/all/rt/radix-tree-rt-aware.patch b/debian/patches/features/all/rt/radix-tree-rt-aware.patch deleted file mode 100644 index 3d66f6ce8..000000000 --- a/debian/patches/features/all/rt/radix-tree-rt-aware.patch +++ /dev/null @@ -1,74 +0,0 @@ -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:33:18 +0200 -Subject: radix-tree: Make RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Disable radix_tree_preload() on -RT. This functions returns with -preemption disabled which may cause high latencies and breaks if the -user tries to grab any locks after invoking it. - -Signed-off-by: Thomas Gleixner ---- - include/linux/radix-tree.h | 7 ++++++- - lib/radix-tree.c | 5 ++++- - 2 files changed, 10 insertions(+), 2 deletions(-) - ---- a/include/linux/radix-tree.h -+++ b/include/linux/radix-tree.h -@@ -277,8 +277,13 @@ radix_tree_gang_lookup(struct radix_tree - unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, - void ***results, unsigned long *indices, - unsigned long first_index, unsigned int max_items); -+#ifndef CONFIG_PREEMPT_RT_FULL - int radix_tree_preload(gfp_t gfp_mask); - int radix_tree_maybe_preload(gfp_t gfp_mask); -+#else -+static inline int radix_tree_preload(gfp_t gm) { return 0; } -+static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } -+#endif - void radix_tree_init(void); - void *radix_tree_tag_set(struct radix_tree_root *root, - unsigned long index, unsigned int tag); -@@ -303,7 +308,7 @@ unsigned long radix_tree_locate_item(str - - static inline void radix_tree_preload_end(void) - { -- preempt_enable(); -+ preempt_enable_nort(); - } - - /** ---- a/lib/radix-tree.c -+++ b/lib/radix-tree.c -@@ -196,13 +196,14 @@ radix_tree_node_alloc(struct radix_tree_ - * succeed in getting a node here (and never reach - * kmem_cache_alloc) - */ -- rtp = this_cpu_ptr(&radix_tree_preloads); -+ rtp = &get_cpu_var(radix_tree_preloads); - if (rtp->nr) { - ret = rtp->nodes; - rtp->nodes = ret->private_data; - ret->private_data = NULL; - rtp->nr--; - } -+ put_cpu_var(radix_tree_preloads); - /* - * Update the allocation stack trace as this is more useful - * for debugging. -@@ -242,6 +243,7 @@ radix_tree_node_free(struct radix_tree_n - call_rcu(&node->rcu_head, radix_tree_node_rcu_free); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Load up this CPU's radix_tree_node buffer with sufficient objects to - * ensure that the addition of a single element in the tree cannot fail. On -@@ -310,6 +312,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m - return 0; - } - EXPORT_SYMBOL(radix_tree_maybe_preload); -+#endif - - /* - * Return the maximum key which can be store into a diff --git a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch deleted file mode 100644 index 567619d00..000000000 --- a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch +++ /dev/null @@ -1,116 +0,0 @@ -Subject: random: Make it work on rt -From: Thomas Gleixner -Date: Tue, 21 Aug 2012 20:38:50 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Delegate the random insertion to the forced threaded interrupt -handler. Store the return IP of the hard interrupt handler in the irq -descriptor and feed it into the random generator as a source of -entropy. - -Signed-off-by: Thomas Gleixner - ---- - drivers/char/random.c | 11 +++++------ - include/linux/irqdesc.h | 1 + - include/linux/random.h | 2 +- - kernel/irq/handle.c | 8 +++++++- - kernel/irq/manage.c | 6 ++++++ - 5 files changed, 20 insertions(+), 8 deletions(-) - ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -888,28 +888,27 @@ static __u32 get_reg(struct fast_pool *f - return *(ptr + f->reg_idx++); - } - --void add_interrupt_randomness(int irq, int irq_flags) -+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) - { - struct entropy_store *r; - struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); -- struct pt_regs *regs = get_irq_regs(); - unsigned long now = jiffies; - cycles_t cycles = random_get_entropy(); - __u32 c_high, j_high; -- __u64 ip; - unsigned long seed; - int credit = 0; - - if (cycles == 0) -- cycles = get_reg(fast_pool, regs); -+ cycles = get_reg(fast_pool, NULL); - c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; - j_high = (sizeof(now) > 4) ? now >> 32 : 0; - fast_pool->pool[0] ^= cycles ^ j_high ^ irq; - fast_pool->pool[1] ^= now ^ c_high; -- ip = regs ? instruction_pointer(regs) : _RET_IP_; -+ if (!ip) -+ ip = _RET_IP_; - fast_pool->pool[2] ^= ip; - fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : -- get_reg(fast_pool, regs); -+ get_reg(fast_pool, NULL); - - fast_mix(fast_pool); - add_interrupt_bench(cycles); ---- a/include/linux/irqdesc.h -+++ b/include/linux/irqdesc.h -@@ -61,6 +61,7 @@ struct irq_desc { - unsigned int irqs_unhandled; - atomic_t threads_handled; - int threads_handled_last; -+ u64 random_ip; - raw_spinlock_t lock; - struct cpumask *percpu_enabled; - #ifdef CONFIG_SMP ---- a/include/linux/random.h -+++ b/include/linux/random.h -@@ -20,7 +20,7 @@ struct random_ready_callback { - extern void add_device_randomness(const void *, unsigned int); - extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value); --extern void add_interrupt_randomness(int irq, int irq_flags); -+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); - - extern void get_random_bytes(void *buf, int nbytes); - extern int add_random_ready_callback(struct random_ready_callback *rdy); ---- a/kernel/irq/handle.c -+++ b/kernel/irq/handle.c -@@ -134,6 +134,8 @@ void __irq_wake_thread(struct irq_desc * - - irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) - { -+ struct pt_regs *regs = get_irq_regs(); -+ u64 ip = regs ? instruction_pointer(regs) : 0; - irqreturn_t retval = IRQ_NONE; - unsigned int flags = 0, irq = desc->irq_data.irq; - struct irqaction *action = desc->action; -@@ -175,7 +177,11 @@ irqreturn_t handle_irq_event_percpu(stru - action = action->next; - } while (action); - -- add_interrupt_randomness(irq, flags); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ desc->random_ip = ip; -+#else -+ add_interrupt_randomness(irq, flags, ip); -+#endif - - if (!noirqdebug) - note_interrupt(desc, retval); ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -1045,6 +1045,12 @@ static int irq_thread(void *data) - if (action_ret == IRQ_WAKE_THREAD) - irq_wake_secondary(desc, action); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_disable(); -+ add_interrupt_randomness(action->irq, 0, -+ desc->random_ip ^ (unsigned long) action); -+ migrate_enable(); -+#endif - wake_threads_waitq(desc); - } - diff --git a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch deleted file mode 100644 index 85afe3b67..000000000 --- a/debian/patches/features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch +++ /dev/null @@ -1,434 +0,0 @@ -From: "Paul E. McKenney" -Date: Mon, 4 Nov 2013 13:21:10 -0800 -Subject: rcu: Eliminate softirq processing from rcutree -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Running RCU out of softirq is a problem for some workloads that would -like to manage RCU core processing independently of other softirq work, -for example, setting kthread priority. This commit therefore moves the -RCU core work from softirq to a per-CPU/per-flavor SCHED_OTHER kthread -named rcuc. The SCHED_OTHER approach avoids the scalability problems -that appeared with the earlier attempt to move RCU core processing to -from softirq to kthreads. That said, kernels built with RCU_BOOST=y -will run the rcuc kthreads at the RCU-boosting priority. - -Reported-by: Thomas Gleixner -Tested-by: Mike Galbraith -Signed-off-by: Paul E. McKenney -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/tree.c | 110 ++++++++++++++++++++++++++++++--- - kernel/rcu/tree.h | 5 - - kernel/rcu/tree_plugin.h | 153 ++++++----------------------------------------- - 3 files changed, 122 insertions(+), 146 deletions(-) - ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -56,6 +56,11 @@ - #include - #include - #include -+#include -+#include -+#include -+#include -+#include "../time/tick-internal.h" - - #include "tree.h" - #include "rcu.h" -@@ -2956,18 +2961,17 @@ static void - /* - * Do RCU core processing for the current CPU. - */ --static void rcu_process_callbacks(struct softirq_action *unused) -+static void rcu_process_callbacks(void) - { - struct rcu_state *rsp; - - if (cpu_is_offline(smp_processor_id())) - return; -- trace_rcu_utilization(TPS("Start RCU core")); - for_each_rcu_flavor(rsp) - __rcu_process_callbacks(rsp); -- trace_rcu_utilization(TPS("End RCU core")); - } - -+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); - /* - * Schedule RCU callback invocation. If the specified type of RCU - * does not support RCU priority boosting, just do a direct call, -@@ -2979,18 +2983,105 @@ static void invoke_rcu_callbacks(struct - { - if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) - return; -- if (likely(!rsp->boost)) { -- rcu_do_batch(rsp, rdp); -+ rcu_do_batch(rsp, rdp); -+} -+ -+static void rcu_wake_cond(struct task_struct *t, int status) -+{ -+ /* -+ * If the thread is yielding, only wake it when this -+ * is invoked from idle -+ */ -+ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) -+ wake_up_process(t); -+} -+ -+/* -+ * Wake up this CPU's rcuc kthread to do RCU core processing. -+ */ -+static void invoke_rcu_core(void) -+{ -+ unsigned long flags; -+ struct task_struct *t; -+ -+ if (!cpu_online(smp_processor_id())) - return; -+ local_irq_save(flags); -+ __this_cpu_write(rcu_cpu_has_work, 1); -+ t = __this_cpu_read(rcu_cpu_kthread_task); -+ if (t != NULL && current != t) -+ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); -+ local_irq_restore(flags); -+} -+ -+static void rcu_cpu_kthread_park(unsigned int cpu) -+{ -+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; -+} -+ -+static int rcu_cpu_kthread_should_run(unsigned int cpu) -+{ -+ return __this_cpu_read(rcu_cpu_has_work); -+} -+ -+/* -+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the -+ * RCU softirq used in flavors and configurations of RCU that do not -+ * support RCU priority boosting. -+ */ -+static void rcu_cpu_kthread(unsigned int cpu) -+{ -+ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); -+ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); -+ int spincnt; -+ -+ for (spincnt = 0; spincnt < 10; spincnt++) { -+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); -+ local_bh_disable(); -+ *statusp = RCU_KTHREAD_RUNNING; -+ this_cpu_inc(rcu_cpu_kthread_loops); -+ local_irq_disable(); -+ work = *workp; -+ *workp = 0; -+ local_irq_enable(); -+ if (work) -+ rcu_process_callbacks(); -+ local_bh_enable(); -+ if (*workp == 0) { -+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); -+ *statusp = RCU_KTHREAD_WAITING; -+ return; -+ } - } -- invoke_rcu_callbacks_kthread(); -+ *statusp = RCU_KTHREAD_YIELDING; -+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); -+ schedule_timeout_interruptible(2); -+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); -+ *statusp = RCU_KTHREAD_WAITING; - } - --static void invoke_rcu_core(void) -+static struct smp_hotplug_thread rcu_cpu_thread_spec = { -+ .store = &rcu_cpu_kthread_task, -+ .thread_should_run = rcu_cpu_kthread_should_run, -+ .thread_fn = rcu_cpu_kthread, -+ .thread_comm = "rcuc/%u", -+ .setup = rcu_cpu_kthread_setup, -+ .park = rcu_cpu_kthread_park, -+}; -+ -+/* -+ * Spawn per-CPU RCU core processing kthreads. -+ */ -+static int __init rcu_spawn_core_kthreads(void) - { -- if (cpu_online(smp_processor_id())) -- raise_softirq(RCU_SOFTIRQ); -+ int cpu; -+ -+ for_each_possible_cpu(cpu) -+ per_cpu(rcu_cpu_has_work, cpu) = 0; -+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); -+ return 0; - } -+early_initcall(rcu_spawn_core_kthreads); - - /* - * Handle any core-RCU processing required by a call_rcu() invocation. -@@ -4609,7 +4700,6 @@ void __init rcu_init(void) - if (dump_tree) - rcu_dump_rcu_node_tree(&rcu_sched_state); - __rcu_init_preempt(); -- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); - - /* - * We don't need protection against CPU-hotplug here because ---- a/kernel/rcu/tree.h -+++ b/kernel/rcu/tree.h -@@ -563,12 +563,10 @@ extern struct rcu_state rcu_bh_state; - extern struct rcu_state rcu_preempt_state; - #endif /* #ifdef CONFIG_PREEMPT_RCU */ - --#ifdef CONFIG_RCU_BOOST - DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); - DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); - DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); - DECLARE_PER_CPU(char, rcu_cpu_has_work); --#endif /* #ifdef CONFIG_RCU_BOOST */ - - #ifndef RCU_TREE_NONCORE - -@@ -588,10 +586,9 @@ void call_rcu(struct rcu_head *head, rcu - static void __init __rcu_init_preempt(void); - static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); - static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); --static void invoke_rcu_callbacks_kthread(void); - static bool rcu_is_callbacks_kthread(void); -+static void rcu_cpu_kthread_setup(unsigned int cpu); - #ifdef CONFIG_RCU_BOOST --static void rcu_preempt_do_callbacks(void); - static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, - struct rcu_node *rnp); - #endif /* #ifdef CONFIG_RCU_BOOST */ ---- a/kernel/rcu/tree_plugin.h -+++ b/kernel/rcu/tree_plugin.h -@@ -24,26 +24,10 @@ - * Paul E. McKenney - */ - --#include --#include --#include --#include --#include --#include "../time/tick-internal.h" -- - #ifdef CONFIG_RCU_BOOST - - #include "../locking/rtmutex_common.h" - --/* -- * Control variables for per-CPU and per-rcu_node kthreads. These -- * handle all flavors of RCU. -- */ --static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); --DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); --DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); --DEFINE_PER_CPU(char, rcu_cpu_has_work); -- - #else /* #ifdef CONFIG_RCU_BOOST */ - - /* -@@ -56,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work); - - #endif /* #else #ifdef CONFIG_RCU_BOOST */ - -+/* -+ * Control variables for per-CPU and per-rcu_node kthreads. These -+ * handle all flavors of RCU. -+ */ -+DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); -+DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); -+DEFINE_PER_CPU(char, rcu_cpu_has_work); -+ - #ifdef CONFIG_RCU_NOCB_CPU - static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ - static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ -@@ -646,15 +638,6 @@ static void rcu_preempt_check_callbacks( - t->rcu_read_unlock_special.b.need_qs = true; - } - --#ifdef CONFIG_RCU_BOOST -- --static void rcu_preempt_do_callbacks(void) --{ -- rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p)); --} -- --#endif /* #ifdef CONFIG_RCU_BOOST */ -- - /* - * Queue a preemptible-RCU callback for invocation after a grace period. - */ -@@ -931,6 +914,19 @@ void exit_rcu(void) - - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -+/* -+ * If boosting, set rcuc kthreads to realtime priority. -+ */ -+static void rcu_cpu_kthread_setup(unsigned int cpu) -+{ -+#ifdef CONFIG_RCU_BOOST -+ struct sched_param sp; -+ -+ sp.sched_priority = kthread_prio; -+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); -+#endif /* #ifdef CONFIG_RCU_BOOST */ -+} -+ - #ifdef CONFIG_RCU_BOOST - - #include "../locking/rtmutex_common.h" -@@ -962,16 +958,6 @@ static void rcu_initiate_boost_trace(str - - #endif /* #else #ifdef CONFIG_RCU_TRACE */ - --static void rcu_wake_cond(struct task_struct *t, int status) --{ -- /* -- * If the thread is yielding, only wake it when this -- * is invoked from idle -- */ -- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) -- wake_up_process(t); --} -- - /* - * Carry out RCU priority boosting on the task indicated by ->exp_tasks - * or ->boost_tasks, advancing the pointer to the next task in the -@@ -1116,23 +1102,6 @@ static void rcu_initiate_boost(struct rc - } - - /* -- * Wake up the per-CPU kthread to invoke RCU callbacks. -- */ --static void invoke_rcu_callbacks_kthread(void) --{ -- unsigned long flags; -- -- local_irq_save(flags); -- __this_cpu_write(rcu_cpu_has_work, 1); -- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && -- current != __this_cpu_read(rcu_cpu_kthread_task)) { -- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), -- __this_cpu_read(rcu_cpu_kthread_status)); -- } -- local_irq_restore(flags); --} -- --/* - * Is the current CPU running the RCU-callbacks kthread? - * Caller must have preemption disabled. - */ -@@ -1187,67 +1156,6 @@ static int rcu_spawn_one_boost_kthread(s - return 0; - } - --static void rcu_kthread_do_work(void) --{ -- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); -- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); -- rcu_preempt_do_callbacks(); --} -- --static void rcu_cpu_kthread_setup(unsigned int cpu) --{ -- struct sched_param sp; -- -- sp.sched_priority = kthread_prio; -- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); --} -- --static void rcu_cpu_kthread_park(unsigned int cpu) --{ -- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; --} -- --static int rcu_cpu_kthread_should_run(unsigned int cpu) --{ -- return __this_cpu_read(rcu_cpu_has_work); --} -- --/* -- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the -- * RCU softirq used in flavors and configurations of RCU that do not -- * support RCU priority boosting. -- */ --static void rcu_cpu_kthread(unsigned int cpu) --{ -- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); -- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); -- int spincnt; -- -- for (spincnt = 0; spincnt < 10; spincnt++) { -- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); -- local_bh_disable(); -- *statusp = RCU_KTHREAD_RUNNING; -- this_cpu_inc(rcu_cpu_kthread_loops); -- local_irq_disable(); -- work = *workp; -- *workp = 0; -- local_irq_enable(); -- if (work) -- rcu_kthread_do_work(); -- local_bh_enable(); -- if (*workp == 0) { -- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); -- *statusp = RCU_KTHREAD_WAITING; -- return; -- } -- } -- *statusp = RCU_KTHREAD_YIELDING; -- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); -- schedule_timeout_interruptible(2); -- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); -- *statusp = RCU_KTHREAD_WAITING; --} -- - /* - * Set the per-rcu_node kthread's affinity to cover all CPUs that are - * served by the rcu_node in question. The CPU hotplug lock is still -@@ -1277,26 +1185,12 @@ static void rcu_boost_kthread_setaffinit - free_cpumask_var(cm); - } - --static struct smp_hotplug_thread rcu_cpu_thread_spec = { -- .store = &rcu_cpu_kthread_task, -- .thread_should_run = rcu_cpu_kthread_should_run, -- .thread_fn = rcu_cpu_kthread, -- .thread_comm = "rcuc/%u", -- .setup = rcu_cpu_kthread_setup, -- .park = rcu_cpu_kthread_park, --}; -- - /* - * Spawn boost kthreads -- called as soon as the scheduler is running. - */ - static void __init rcu_spawn_boost_kthreads(void) - { - struct rcu_node *rnp; -- int cpu; -- -- for_each_possible_cpu(cpu) -- per_cpu(rcu_cpu_has_work, cpu) = 0; -- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); - rcu_for_each_leaf_node(rcu_state_p, rnp) - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); - } -@@ -1319,11 +1213,6 @@ static void rcu_initiate_boost(struct rc - raw_spin_unlock_irqrestore(&rnp->lock, flags); - } - --static void invoke_rcu_callbacks_kthread(void) --{ -- WARN_ON_ONCE(1); --} -- - static bool rcu_is_callbacks_kthread(void) - { - return false; diff --git a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch deleted file mode 100644 index ef2896666..000000000 --- a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch +++ /dev/null @@ -1,25 +0,0 @@ -Subject: rcu: Disable RCU_FAST_NO_HZ on RT -From: Thomas Gleixner -Date: Sun, 28 Oct 2012 13:26:09 +0000 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -This uses a timer_list timer from the irq disabled guts of the idle -code. Disable it for now to prevent wreckage. - -Signed-off-by: Thomas Gleixner - ---- - init/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -614,7 +614,7 @@ config RCU_FANOUT_LEAF - - config RCU_FAST_NO_HZ - bool "Accelerate last non-dyntick-idle CPU's grace periods" -- depends on NO_HZ_COMMON && SMP && RCU_EXPERT -+ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL - default n - help - This option permits CPUs to enter dynticks-idle state even if diff --git a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch deleted file mode 100644 index d9deea5f7..000000000 --- a/debian/patches/features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch +++ /dev/null @@ -1,35 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 21 Mar 2014 20:19:05 +0100 -Subject: rcu: make RCU_BOOST default on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Since it is no longer invoked from the softirq people run into OOM more -often if the priority of the RCU thread is too low. Making boosting -default on RT should help in those case and it can be switched off if -someone knows better. - -Signed-off-by: Sebastian Andrzej Siewior ---- - init/Kconfig | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -498,7 +498,7 @@ config TINY_RCU - - config RCU_EXPERT - bool "Make expert-level adjustments to RCU configuration" -- default n -+ default y if PREEMPT_RT_FULL - help - This option needs to be enabled if you wish to make - expert-level adjustments to RCU configuration. By default, -@@ -641,7 +641,7 @@ config TREE_RCU_TRACE - config RCU_BOOST - bool "Enable RCU priority boosting" - depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT -- default n -+ default y if PREEMPT_RT_FULL - help - This option boosts the priority of preempted RCU readers that - block the current preemptible RCU grace period for too long. diff --git a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch deleted file mode 100644 index a6bdaab11..000000000 --- a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch +++ /dev/null @@ -1,270 +0,0 @@ -Subject: rcu: Merge RCU-bh into RCU-preempt -Date: Wed, 5 Oct 2011 11:59:38 -0700 -From: Thomas Gleixner -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The Linux kernel has long RCU-bh read-side critical sections that -intolerably increase scheduling latency under mainline's RCU-bh rules, -which include RCU-bh read-side critical sections being non-preemptible. -This patch therefore arranges for RCU-bh to be implemented in terms of -RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. - -This has the downside of defeating the purpose of RCU-bh, namely, -handling the case where the system is subjected to a network-based -denial-of-service attack that keeps at least one CPU doing full-time -softirq processing. This issue will be fixed by a later commit. - -The current commit will need some work to make it appropriate for -mainline use, for example, it needs to be extended to cover Tiny RCU. - -[ paulmck: Added a useful changelog ] - -Signed-off-by: Thomas Gleixner -Signed-off-by: Paul E. McKenney -Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com -Signed-off-by: Thomas Gleixner - ---- - include/linux/rcupdate.h | 23 +++++++++++++++++++++++ - include/linux/rcutree.h | 18 ++++++++++++++++-- - kernel/rcu/tree.c | 16 ++++++++++++++++ - kernel/rcu/update.c | 2 ++ - 4 files changed, 57 insertions(+), 2 deletions(-) - ---- a/include/linux/rcupdate.h -+++ b/include/linux/rcupdate.h -@@ -169,6 +169,9 @@ void call_rcu(struct rcu_head *head, - - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define call_rcu_bh call_rcu -+#else - /** - * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. -@@ -192,6 +195,7 @@ void call_rcu(struct rcu_head *head, - */ - void call_rcu_bh(struct rcu_head *head, - rcu_callback_t func); -+#endif - - /** - * call_rcu_sched() - Queue an RCU for invocation after sched grace period. -@@ -330,7 +334,11 @@ static inline int rcu_preempt_depth(void - void rcu_init(void); - void rcu_end_inkernel_boot(void); - void rcu_sched_qs(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline void rcu_bh_qs(void) { } -+#else - void rcu_bh_qs(void); -+#endif - void rcu_check_callbacks(int user); - struct notifier_block; - int rcu_cpu_notify(struct notifier_block *self, -@@ -496,7 +504,14 @@ extern struct lockdep_map rcu_callback_m - int debug_lockdep_rcu_enabled(void); - - int rcu_read_lock_held(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline int rcu_read_lock_bh_held(void) -+{ -+ return rcu_read_lock_held(); -+} -+#else - int rcu_read_lock_bh_held(void); -+#endif - - /** - * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? -@@ -944,10 +959,14 @@ static inline void rcu_read_unlock(void) - static inline void rcu_read_lock_bh(void) - { - local_bh_disable(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ rcu_read_lock(); -+#else - __acquire(RCU_BH); - rcu_lock_acquire(&rcu_bh_lock_map); - RCU_LOCKDEP_WARN(!rcu_is_watching(), - "rcu_read_lock_bh() used illegally while idle"); -+#endif - } - - /* -@@ -957,10 +976,14 @@ static inline void rcu_read_lock_bh(void - */ - static inline void rcu_read_unlock_bh(void) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ rcu_read_unlock(); -+#else - RCU_LOCKDEP_WARN(!rcu_is_watching(), - "rcu_read_unlock_bh() used illegally while idle"); - rcu_lock_release(&rcu_bh_lock_map); - __release(RCU_BH); -+#endif - local_bh_enable(); - } - ---- a/include/linux/rcutree.h -+++ b/include/linux/rcutree.h -@@ -44,7 +44,11 @@ static inline void rcu_virt_note_context - rcu_note_context_switch(); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define synchronize_rcu_bh synchronize_rcu -+#else - void synchronize_rcu_bh(void); -+#endif - void synchronize_sched_expedited(void); - void synchronize_rcu_expedited(void); - -@@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_ex - } - - void rcu_barrier(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define rcu_barrier_bh rcu_barrier -+#else - void rcu_barrier_bh(void); -+#endif - void rcu_barrier_sched(void); - unsigned long get_state_synchronize_rcu(void); - void cond_synchronize_rcu(unsigned long oldstate); -@@ -85,12 +93,10 @@ unsigned long rcu_batches_started(void); - unsigned long rcu_batches_started_bh(void); - unsigned long rcu_batches_started_sched(void); - unsigned long rcu_batches_completed(void); --unsigned long rcu_batches_completed_bh(void); - unsigned long rcu_batches_completed_sched(void); - void show_rcu_gp_kthreads(void); - - void rcu_force_quiescent_state(void); --void rcu_bh_force_quiescent_state(void); - void rcu_sched_force_quiescent_state(void); - - void rcu_idle_enter(void); -@@ -105,6 +111,14 @@ extern int rcu_scheduler_active __read_m - - bool rcu_is_watching(void); - -+#ifndef CONFIG_PREEMPT_RT_FULL -+void rcu_bh_force_quiescent_state(void); -+unsigned long rcu_batches_completed_bh(void); -+#else -+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state -+# define rcu_batches_completed_bh rcu_batches_completed -+#endif -+ - void rcu_all_qs(void); - - #endif /* __LINUX_RCUTREE_H */ ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -266,6 +266,7 @@ void rcu_sched_qs(void) - } - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void rcu_bh_qs(void) - { - if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { -@@ -275,6 +276,7 @@ void rcu_bh_qs(void) - __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); - } - } -+#endif - - static DEFINE_PER_CPU(int, rcu_sched_qs_mask); - -@@ -459,6 +461,7 @@ unsigned long rcu_batches_completed_sche - } - EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Return the number of RCU BH batches completed thus far for debug & stats. - */ -@@ -486,6 +489,13 @@ void rcu_bh_force_quiescent_state(void) - } - EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); - -+#else -+void rcu_force_quiescent_state(void) -+{ -+} -+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); -+#endif -+ - /* - * Force a quiescent state for RCU-sched. - */ -@@ -3114,6 +3124,7 @@ void call_rcu_sched(struct rcu_head *hea - } - EXPORT_SYMBOL_GPL(call_rcu_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Queue an RCU callback for invocation after a quicker grace period. - */ -@@ -3122,6 +3133,7 @@ void call_rcu_bh(struct rcu_head *head, - __call_rcu(head, func, &rcu_bh_state, -1, 0); - } - EXPORT_SYMBOL_GPL(call_rcu_bh); -+#endif - - /* - * Queue an RCU callback for lazy invocation after a grace period. -@@ -3213,6 +3225,7 @@ void synchronize_sched(void) - } - EXPORT_SYMBOL_GPL(synchronize_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. - * -@@ -3239,6 +3252,7 @@ void synchronize_rcu_bh(void) - wait_rcu_gp(call_rcu_bh); - } - EXPORT_SYMBOL_GPL(synchronize_rcu_bh); -+#endif - - /** - * get_state_synchronize_rcu - Snapshot current RCU state -@@ -4101,6 +4115,7 @@ static void _rcu_barrier(struct rcu_stat - mutex_unlock(&rsp->barrier_mutex); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. - */ -@@ -4109,6 +4124,7 @@ void rcu_barrier_bh(void) - _rcu_barrier(&rcu_bh_state); - } - EXPORT_SYMBOL_GPL(rcu_barrier_bh); -+#endif - - /** - * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. ---- a/kernel/rcu/update.c -+++ b/kernel/rcu/update.c -@@ -276,6 +276,7 @@ int rcu_read_lock_held(void) - } - EXPORT_SYMBOL_GPL(rcu_read_lock_held); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? - * -@@ -302,6 +303,7 @@ int rcu_read_lock_bh_held(void) - return in_softirq() || irqs_disabled(); - } - EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); -+#endif - - #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ - diff --git a/debian/patches/features/all/rt/rcu-more-swait-conversions.patch b/debian/patches/features/all/rt/rcu-more-swait-conversions.patch deleted file mode 100644 index 9d8051ce1..000000000 --- a/debian/patches/features/all/rt/rcu-more-swait-conversions.patch +++ /dev/null @@ -1,213 +0,0 @@ -From: Thomas Gleixner -Date: Wed, 31 Jul 2013 19:00:35 +0200 -Subject: rcu: use simple waitqueues -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Convert RCU's wait-queues into simple waitqueues. - -Signed-off-by: Thomas Gleixner - -Merged Steven's - - static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { -- swait_wake(&rnp->nocb_gp_wq[rnp->completed & 0x1]); -+ wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); - } - -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/tree.c | 16 ++++++++-------- - kernel/rcu/tree.h | 9 +++++---- - kernel/rcu/tree_plugin.h | 18 +++++++++--------- - 3 files changed, 22 insertions(+), 21 deletions(-) - ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -1633,7 +1633,7 @@ static void rcu_gp_kthread_wake(struct r - !READ_ONCE(rsp->gp_flags) || - !rsp->gp_kthread) - return; -- wake_up(&rsp->gp_wq); -+ swait_wake(&rsp->gp_wq); - } - - /* -@@ -2098,7 +2098,7 @@ static int __noreturn rcu_gp_kthread(voi - READ_ONCE(rsp->gpnum), - TPS("reqwait")); - rsp->gp_state = RCU_GP_WAIT_GPS; -- wait_event_interruptible(rsp->gp_wq, -+ swait_event_interruptible(rsp->gp_wq, - READ_ONCE(rsp->gp_flags) & - RCU_GP_FLAG_INIT); - rsp->gp_state = RCU_GP_DONE_GPS; -@@ -2128,7 +2128,7 @@ static int __noreturn rcu_gp_kthread(voi - READ_ONCE(rsp->gpnum), - TPS("fqswait")); - rsp->gp_state = RCU_GP_WAIT_FQS; -- ret = wait_event_interruptible_timeout(rsp->gp_wq, -+ ret = swait_event_interruptible_timeout(rsp->gp_wq, - rcu_gp_fqs_check_wake(rsp, &gf), j); - rsp->gp_state = RCU_GP_DOING_FQS; - /* Locking provides needed memory barriers. */ -@@ -3550,7 +3550,7 @@ static void __rcu_report_exp_rnp(struct - raw_spin_unlock_irqrestore(&rnp->lock, flags); - if (wake) { - smp_mb(); /* EGP done before wake_up(). */ -- wake_up(&rsp->expedited_wq); -+ swait_wake(&rsp->expedited_wq); - } - break; - } -@@ -3807,7 +3807,7 @@ static void synchronize_sched_expedited_ - jiffies_start = jiffies; - - for (;;) { -- ret = wait_event_interruptible_timeout( -+ ret = swait_event_interruptible_timeout( - rsp->expedited_wq, - sync_rcu_preempt_exp_done(rnp_root), - jiffies_stall); -@@ -3815,7 +3815,7 @@ static void synchronize_sched_expedited_ - return; - if (ret < 0) { - /* Hit a signal, disable CPU stall warnings. */ -- wait_event(rsp->expedited_wq, -+ swait_event(rsp->expedited_wq, - sync_rcu_preempt_exp_done(rnp_root)); - return; - } -@@ -4483,8 +4483,8 @@ static void __init rcu_init_one(struct r - } - } - -- init_waitqueue_head(&rsp->gp_wq); -- init_waitqueue_head(&rsp->expedited_wq); -+ init_swait_head(&rsp->gp_wq); -+ init_swait_head(&rsp->expedited_wq); - rnp = rsp->level[rcu_num_lvls - 1]; - for_each_possible_cpu(i) { - while (i > rnp->grphi) ---- a/kernel/rcu/tree.h -+++ b/kernel/rcu/tree.h -@@ -28,6 +28,7 @@ - #include - #include - #include -+#include - - /* - * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and -@@ -241,7 +242,7 @@ struct rcu_node { - /* Refused to boost: not sure why, though. */ - /* This can happen due to race conditions. */ - #ifdef CONFIG_RCU_NOCB_CPU -- wait_queue_head_t nocb_gp_wq[2]; -+ struct swait_head nocb_gp_wq[2]; - /* Place for rcu_nocb_kthread() to wait GP. */ - #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ - int need_future_gp[2]; -@@ -393,7 +394,7 @@ struct rcu_data { - atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ - struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ - struct rcu_head **nocb_follower_tail; -- wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ -+ struct swait_head nocb_wq; /* For nocb kthreads to sleep on. */ - struct task_struct *nocb_kthread; - int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ - -@@ -472,7 +473,7 @@ struct rcu_state { - unsigned long gpnum; /* Current gp number. */ - unsigned long completed; /* # of last completed gp. */ - struct task_struct *gp_kthread; /* Task for grace periods. */ -- wait_queue_head_t gp_wq; /* Where GP task waits. */ -+ struct swait_head gp_wq; /* Where GP task waits. */ - short gp_flags; /* Commands for GP task. */ - short gp_state; /* GP kthread sleep state. */ - -@@ -504,7 +505,7 @@ struct rcu_state { - atomic_long_t expedited_workdone3; /* # done by others #3. */ - atomic_long_t expedited_normal; /* # fallbacks to normal. */ - atomic_t expedited_need_qs; /* # CPUs left to check in. */ -- wait_queue_head_t expedited_wq; /* Wait for check-ins. */ -+ struct swait_head expedited_wq; /* Wait for check-ins. */ - int ncpus_snap; /* # CPUs seen last time. */ - - unsigned long jiffies_force_qs; /* Time at which to invoke */ ---- a/kernel/rcu/tree_plugin.h -+++ b/kernel/rcu/tree_plugin.h -@@ -1830,7 +1830,7 @@ early_param("rcu_nocb_poll", parse_rcu_n - */ - static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) - { -- wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); -+ swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); - } - - /* -@@ -1848,8 +1848,8 @@ static void rcu_nocb_gp_set(struct rcu_n - - static void rcu_init_one_nocb(struct rcu_node *rnp) - { -- init_waitqueue_head(&rnp->nocb_gp_wq[0]); -- init_waitqueue_head(&rnp->nocb_gp_wq[1]); -+ init_swait_head(&rnp->nocb_gp_wq[0]); -+ init_swait_head(&rnp->nocb_gp_wq[1]); - } - - #ifndef CONFIG_RCU_NOCB_CPU_ALL -@@ -1874,7 +1874,7 @@ static void wake_nocb_leader(struct rcu_ - if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { - /* Prior smp_mb__after_atomic() orders against prior enqueue. */ - WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); -- wake_up(&rdp_leader->nocb_wq); -+ swait_wake(&rdp_leader->nocb_wq); - } - } - -@@ -2087,7 +2087,7 @@ static void rcu_nocb_wait_gp(struct rcu_ - */ - trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); - for (;;) { -- wait_event_interruptible( -+ swait_event_interruptible( - rnp->nocb_gp_wq[c & 0x1], - (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); - if (likely(d)) -@@ -2115,7 +2115,7 @@ static void nocb_leader_wait(struct rcu_ - /* Wait for callbacks to appear. */ - if (!rcu_nocb_poll) { - trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); -- wait_event_interruptible(my_rdp->nocb_wq, -+ swait_event_interruptible(my_rdp->nocb_wq, - !READ_ONCE(my_rdp->nocb_leader_sleep)); - /* Memory barrier handled by smp_mb() calls below and repoll. */ - } else if (firsttime) { -@@ -2190,7 +2190,7 @@ static void nocb_leader_wait(struct rcu_ - * List was empty, wake up the follower. - * Memory barriers supplied by atomic_long_add(). - */ -- wake_up(&rdp->nocb_wq); -+ swait_wake(&rdp->nocb_wq); - } - } - -@@ -2211,7 +2211,7 @@ static void nocb_follower_wait(struct rc - if (!rcu_nocb_poll) { - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, - "FollowerSleep"); -- wait_event_interruptible(rdp->nocb_wq, -+ swait_event_interruptible(rdp->nocb_wq, - READ_ONCE(rdp->nocb_follower_head)); - } else if (firsttime) { - /* Don't drown trace log with "Poll"! */ -@@ -2370,7 +2370,7 @@ void __init rcu_init_nohz(void) - static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) - { - rdp->nocb_tail = &rdp->nocb_head; -- init_waitqueue_head(&rdp->nocb_wq); -+ init_swait_head(&rdp->nocb_wq); - rdp->nocb_follower_tail = &rdp->nocb_follower_head; - } - diff --git a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch deleted file mode 100644 index 2e22c5df6..000000000 --- a/debian/patches/features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch +++ /dev/null @@ -1,49 +0,0 @@ -From: Tiejun Chen -Date: Wed, 18 Dec 2013 17:51:49 +0800 -Subject: rcutree/rcu_bh_qs: Disable irq while calling rcu_preempt_qs() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Any callers to the function rcu_preempt_qs() must disable irqs in -order to protect the assignment to ->rcu_read_unlock_special. In -RT case, rcu_bh_qs() as the wrapper of rcu_preempt_qs() is called -in some scenarios where irq is enabled, like this path, - -do_single_softirq() - | - + local_irq_enable(); - + handle_softirq() - | | - | + rcu_bh_qs() - | | - | + rcu_preempt_qs() - | - + local_irq_disable() - -So here we'd better disable irq directly inside of rcu_bh_qs() to -fix this, otherwise the kernel may be freezable sometimes as -observed. And especially this way is also kind and safe for the -potential rcu_bh_qs() usage elsewhere in the future. - - -Signed-off-by: Tiejun Chen -Signed-off-by: Bin Jiang -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/tree.c | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -271,7 +271,12 @@ static void rcu_preempt_qs(void); - - void rcu_bh_qs(void) - { -+ unsigned long flags; -+ -+ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ -+ local_irq_save(flags); - rcu_preempt_qs(); -+ local_irq_restore(flags); - } - #else - void rcu_bh_qs(void) diff --git a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch deleted file mode 100644 index 104c20f50..000000000 --- a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch +++ /dev/null @@ -1,35 +0,0 @@ -From: Yong Zhang -Date: Thu, 28 Jul 2011 11:16:00 +0800 -Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -When retry happens, it's likely that the task has been migrated to -another cpu (except unplug failed), but it still derefernces the -original hotplug_pcp per cpu data. - -Update the pointer to hotplug_pcp in the retry path, so it points to -the current cpu. - -Signed-off-by: Yong Zhang -Cc: Peter Zijlstra -Link: http://lkml.kernel.org/r/20110728031600.GA338@windriver.com -Signed-off-by: Thomas Gleixner ---- - kernel/cpu.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -107,9 +107,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp - */ - void pin_current_cpu(void) - { -- struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp); -+ struct hotplug_pcp *hp; - - retry: -+ hp = this_cpu_ptr(&hotplug_pcp); -+ - if (!hp->unplug || hp->refcount || preempt_count() > 1 || - hp->unplug == current) { - hp->refcount++; diff --git a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch deleted file mode 100644 index 377e87570..000000000 --- a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch +++ /dev/null @@ -1,69 +0,0 @@ -Subject: ARM: Initialize split page table locks for vector page -From: Frank Rowand -Date: Sat, 1 Oct 2011 18:58:13 -0700 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if -PREEMPT_RT_FULL=y because vectors_user_mapping() creates a -VM_ALWAYSDUMP mapping of the vector page (address 0xffff0000), but no -ptl->lock has been allocated for the page. An attempt to coredump -that page will result in a kernel NULL pointer dereference when -follow_page() attempts to lock the page. - -The call tree to the NULL pointer dereference is: - - do_notify_resume() - get_signal_to_deliver() - do_coredump() - elf_core_dump() - get_dump_page() - __get_user_pages() - follow_page() - pte_offset_map_lock() <----- a #define - ... - rt_spin_lock() - -The underlying problem is exposed by mm-shrink-the-page-frame-to-rt-size.patch. - -Signed-off-by: Frank Rowand -Cc: Frank -Cc: Peter Zijlstra -Link: http://lkml.kernel.org/r/4E87C535.2030907@am.sony.com -Signed-off-by: Thomas Gleixner ---- - arch/arm/kernel/process.c | 24 ++++++++++++++++++++++++ - 1 file changed, 24 insertions(+) - ---- a/arch/arm/kernel/process.c -+++ b/arch/arm/kernel/process.c -@@ -319,6 +319,30 @@ unsigned long arch_randomize_brk(struct - } - - #ifdef CONFIG_MMU -+/* -+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not -+ * initialized by pgtable_page_ctor() then a coredump of the vector page will -+ * fail. -+ */ -+static int __init vectors_user_mapping_init_page(void) -+{ -+ struct page *page; -+ unsigned long addr = 0xffff0000; -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ page = pmd_page(*(pmd)); -+ -+ pgtable_page_ctor(page); -+ -+ return 0; -+} -+late_initcall(vectors_user_mapping_init_page); -+ - #ifdef CONFIG_KUSER_HELPERS - /* - * The vectors page is always readable from user space for the diff --git a/debian/patches/features/all/rt/relay-fix-timer-madness.patch b/debian/patches/features/all/rt/relay-fix-timer-madness.patch deleted file mode 100644 index 750dc0998..000000000 --- a/debian/patches/features/all/rt/relay-fix-timer-madness.patch +++ /dev/null @@ -1,53 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:44:07 -0500 -Subject: relay: Fix timer madness -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -remove timer calls (!!!) from deep within the tracing infrastructure. -This was totally bogus code that can cause lockups and worse. Poll -the buffer every 2 jiffies for now. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - kernel/relay.c | 14 +++++--------- - 1 file changed, 5 insertions(+), 9 deletions(-) - ---- a/kernel/relay.c -+++ b/kernel/relay.c -@@ -336,6 +336,10 @@ static void wakeup_readers(unsigned long - { - struct rchan_buf *buf = (struct rchan_buf *)data; - wake_up_interruptible(&buf->read_wait); -+ /* -+ * Stupid polling for now: -+ */ -+ mod_timer(&buf->timer, jiffies + 1); - } - - /** -@@ -353,6 +357,7 @@ static void __relay_reset(struct rchan_b - init_waitqueue_head(&buf->read_wait); - kref_init(&buf->kref); - setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); -+ mod_timer(&buf->timer, jiffies + 1); - } else - del_timer_sync(&buf->timer); - -@@ -736,15 +741,6 @@ size_t relay_switch_subbuf(struct rchan_ - else - buf->early_bytes += buf->chan->subbuf_size - - buf->padding[old_subbuf]; -- smp_mb(); -- if (waitqueue_active(&buf->read_wait)) -- /* -- * Calling wake_up_interruptible() from here -- * will deadlock if we happen to be logging -- * from the scheduler (trying to re-grab -- * rq->lock), so defer it. -- */ -- mod_timer(&buf->timer, jiffies + 1); - } - - old = buf->data; diff --git a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch deleted file mode 100644 index 7cb8205c9..000000000 --- a/debian/patches/features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch +++ /dev/null @@ -1,96 +0,0 @@ -Subject: ARM: smp: Move clear_tasks_mm_cpumask() call to __cpu_die() -From: Grygorii Strashko -Date: Fri, 11 Sep 2015 21:21:23 +0300 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -When running with the RT-kernel (4.1.5-rt5) on TI OMAP dra7-evm and trying -to do Suspend to RAM, the following backtrace occurs: - - Disabling non-boot CPUs ... - PM: noirq suspend of devices complete after 7.295 msecs - Disabling non-boot CPUs ... - BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917 - in_atomic(): 1, irqs_disabled(): 128, pid: 18, name: migration/1 - INFO: lockdep is turned off. - irq event stamp: 122 - hardirqs last enabled at (121): [] _raw_spin_unlock_irqrestore+0x88/0x90 - hardirqs last disabled at (122): [] _raw_spin_lock_irq+0x28/0x5c - softirqs last enabled at (0): [] copy_process.part.52+0x410/0x19d8 - softirqs last disabled at (0): [< (null)>] (null) - Preemption disabled at:[< (null)>] (null) - CPU: 1 PID: 18 Comm: migration/1 Tainted: G W 4.1.4-rt3-01046-g96ac8da #204 - Hardware name: Generic DRA74X (Flattened Device Tree) - [] (unwind_backtrace) from [] (show_stack+0x20/0x24) - [] (show_stack) from [] (dump_stack+0x88/0xdc) - [] (dump_stack) from [] (___might_sleep+0x198/0x2a8) - [] (___might_sleep) from [] (rt_spin_lock+0x30/0x70) - [] (rt_spin_lock) from [] (find_lock_task_mm+0x9c/0x174) - [] (find_lock_task_mm) from [] (clear_tasks_mm_cpumask+0xb4/0x1ac) - [] (clear_tasks_mm_cpumask) from [] (__cpu_disable+0x98/0xbc) - [] (__cpu_disable) from [] (take_cpu_down+0x1c/0x50) - [] (take_cpu_down) from [] (multi_cpu_stop+0x11c/0x158) - [] (multi_cpu_stop) from [] (cpu_stopper_thread+0xc4/0x184) - [] (cpu_stopper_thread) from [] (smpboot_thread_fn+0x18c/0x324) - [] (smpboot_thread_fn) from [] (kthread+0xe8/0x104) - [] (kthread) from [] (ret_from_fork+0x14/0x3c) - CPU1: shutdown - PM: Calling sched_clock_suspend+0x0/0x40 - PM: Calling timekeeping_suspend+0x0/0x2e0 - PM: Calling irq_gc_suspend+0x0/0x68 - PM: Calling fw_suspend+0x0/0x2c - PM: Calling cpu_pm_suspend+0x0/0x28 - -Also, sometimes system stucks right after displaying "Disabling non-boot -CPUs ...". The root cause of above backtrace is task_lock() which takes -a sleeping lock on -RT. - -To fix the issue, move clear_tasks_mm_cpumask() call from __cpu_disable() -to __cpu_die() which is called on the thread which is asking for a target -CPU to be shutdown. In addition, this change restores CPUhotplug functionality -on TI OMAP dra7-evm and CPU1 can be unplugged/plugged many times. - -Signed-off-by: Grygorii Strashko -Cc: Steven Rostedt -Cc: -Cc: Sekhar Nori -Cc: Austin Schuh -Cc: -Cc: Russell King -Cc: -Cc: stable-rt@vger.kernel.org -Link: http://lkml.kernel.org/r/1441995683-30817-1-git-send-email-grygorii.strashko@ti.com -Signed-off-by: Thomas Gleixner ---- - -RFC: I'm not sure how safe this change is and will be appreciated for any comments. -Most of arches call clear_tasks_mm_cpumask() from __cpu_disable(), but *powerpc* -calls it from CPU_DEAD notifier. This patch follows powerpc's approach in -general. - -This issue was first reported in: - http://www.spinics.net/lists/linux-rt-users/msg13752.html - - arch/arm/kernel/smp.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - ---- a/arch/arm/kernel/smp.c -+++ b/arch/arm/kernel/smp.c -@@ -230,8 +230,6 @@ int __cpu_disable(void) - flush_cache_louis(); - local_flush_tlb_all(); - -- clear_tasks_mm_cpumask(cpu); -- - return 0; - } - -@@ -247,6 +245,9 @@ void __cpu_die(unsigned int cpu) - pr_err("CPU%u: cpu didn't die\n", cpu); - return; - } -+ -+ clear_tasks_mm_cpumask(cpu); -+ - pr_notice("CPU%u: shutdown\n", cpu); - - /* diff --git a/debian/patches/features/all/rt/rt-add-rt-locks.patch b/debian/patches/features/all/rt/rt-add-rt-locks.patch deleted file mode 100644 index 18a3d3f88..000000000 --- a/debian/patches/features/all/rt/rt-add-rt-locks.patch +++ /dev/null @@ -1,2173 +0,0 @@ -From: Thomas Gleixner -Date: Sun, 26 Jul 2009 19:39:56 +0200 -Subject: rt: Add the preempt-rt lock replacement APIs -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex -based locking functions for preempt-rt. -This also introduces RT's sleeping locks. - -Signed-off-by: Thomas Gleixner - ---- - include/linux/kernel.h | 4 - include/linux/locallock.h | 6 - include/linux/mutex.h | 20 + - include/linux/mutex_rt.h | 84 ++++++ - include/linux/rtmutex.h | 29 +- - include/linux/rwlock_rt.h | 99 +++++++ - include/linux/rwlock_types_rt.h | 33 ++ - include/linux/rwsem.h | 6 - include/linux/rwsem_rt.h | 152 ++++++++++++ - include/linux/sched.h | 19 + - include/linux/spinlock.h | 12 - include/linux/spinlock_api_smp.h | 4 - include/linux/spinlock_rt.h | 173 +++++++++++++ - include/linux/spinlock_types.h | 11 - include/linux/spinlock_types_rt.h | 51 ++++ - kernel/futex.c | 10 - kernel/locking/Makefile | 9 - kernel/locking/rt.c | 476 ++++++++++++++++++++++++++++++++++++++ - kernel/locking/rtmutex.c | 423 +++++++++++++++++++++++++++++++-- - kernel/locking/rtmutex_common.h | 14 + - kernel/locking/spinlock.c | 7 - kernel/locking/spinlock_debug.c | 5 - kernel/sched/core.c | 7 - 23 files changed, 1598 insertions(+), 56 deletions(-) - ---- a/include/linux/kernel.h -+++ b/include/linux/kernel.h -@@ -188,6 +188,9 @@ extern int _cond_resched(void); - */ - # define might_sleep() \ - do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) -+ -+# define might_sleep_no_state_check() \ -+ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) - # define sched_annotate_sleep() (current->task_state_change = 0) - #else - static inline void ___might_sleep(const char *file, int line, -@@ -195,6 +198,7 @@ extern int _cond_resched(void); - static inline void __might_sleep(const char *file, int line, - int preempt_offset) { } - # define might_sleep() do { might_resched(); } while (0) -+# define might_sleep_no_state_check() do { might_resched(); } while (0) - # define sched_annotate_sleep() do { } while (0) - #endif - ---- a/include/linux/locallock.h -+++ b/include/linux/locallock.h -@@ -42,9 +42,15 @@ struct local_irq_lock { - * already takes care of the migrate_disable/enable - * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define spin_lock_local(lock) rt_spin_lock(lock) -+# define spin_trylock_local(lock) rt_spin_trylock(lock) -+# define spin_unlock_local(lock) rt_spin_unlock(lock) -+#else - # define spin_lock_local(lock) spin_lock(lock) - # define spin_trylock_local(lock) spin_trylock(lock) - # define spin_unlock_local(lock) spin_unlock(lock) -+#endif - - static inline void __local_lock(struct local_irq_lock *lv) - { ---- a/include/linux/mutex.h -+++ b/include/linux/mutex.h -@@ -19,6 +19,17 @@ - #include - #include - -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ -+ , .dep_map = { .name = #lockname } -+#else -+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -+#endif -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else -+ - /* - * Simple, straightforward mutexes with strict semantics: - * -@@ -99,13 +110,6 @@ do { \ - static inline void mutex_destroy(struct mutex *lock) {} - #endif - --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ -- , .dep_map = { .name = #lockname } --#else --# define __DEP_MAP_MUTEX_INITIALIZER(lockname) --#endif -- - #define __MUTEX_INITIALIZER(lockname) \ - { .count = ATOMIC_INIT(1) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ -@@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killa - extern int mutex_trylock(struct mutex *lock); - extern void mutex_unlock(struct mutex *lock); - -+#endif /* !PREEMPT_RT_FULL */ -+ - extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); - - #endif /* __LINUX_MUTEX_H */ ---- /dev/null -+++ b/include/linux/mutex_rt.h -@@ -0,0 +1,84 @@ -+#ifndef __LINUX_MUTEX_RT_H -+#define __LINUX_MUTEX_RT_H -+ -+#ifndef __LINUX_MUTEX_H -+#error "Please include mutex.h" -+#endif -+ -+#include -+ -+/* FIXME: Just for __lockfunc */ -+#include -+ -+struct mutex { -+ struct rt_mutex lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; -+ -+#define __MUTEX_INITIALIZER(mutexname) \ -+ { \ -+ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ -+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ -+ } -+ -+#define DEFINE_MUTEX(mutexname) \ -+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) -+ -+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); -+extern void __lockfunc _mutex_lock(struct mutex *lock); -+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); -+extern int __lockfunc _mutex_lock_killable(struct mutex *lock); -+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); -+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); -+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_trylock(struct mutex *lock); -+extern void __lockfunc _mutex_unlock(struct mutex *lock); -+ -+#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) -+#define mutex_lock(l) _mutex_lock(l) -+#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) -+#define mutex_lock_killable(l) _mutex_lock_killable(l) -+#define mutex_trylock(l) _mutex_trylock(l) -+#define mutex_unlock(l) _mutex_unlock(l) -+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) -+# define mutex_lock_interruptible_nested(l, s) \ -+ _mutex_lock_interruptible_nested(l, s) -+# define mutex_lock_killable_nested(l, s) \ -+ _mutex_lock_killable_nested(l, s) -+ -+# define mutex_lock_nest_lock(lock, nest_lock) \ -+do { \ -+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ -+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ -+} while (0) -+ -+#else -+# define mutex_lock_nested(l, s) _mutex_lock(l) -+# define mutex_lock_interruptible_nested(l, s) \ -+ _mutex_lock_interruptible(l) -+# define mutex_lock_killable_nested(l, s) \ -+ _mutex_lock_killable(l) -+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) -+#endif -+ -+# define mutex_init(mutex) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(mutex)->lock); \ -+ __mutex_do_init((mutex), #mutex, &__key); \ -+} while (0) -+ -+# define __mutex_init(mutex, name, key) \ -+do { \ -+ rt_mutex_init(&(mutex)->lock); \ -+ __mutex_do_init((mutex), name, key); \ -+} while (0) -+ -+#endif ---- a/include/linux/rtmutex.h -+++ b/include/linux/rtmutex.h -@@ -13,11 +13,15 @@ - #define __LINUX_RT_MUTEX_H - - #include --#include - #include -+#include - - extern int max_lock_depth; /* for sysctl */ - -+#ifdef CONFIG_DEBUG_MUTEXES -+#include -+#endif -+ - /** - * The rt_mutex structure - * -@@ -31,8 +35,8 @@ struct rt_mutex { - struct rb_root waiters; - struct rb_node *waiters_leftmost; - struct task_struct *owner; --#ifdef CONFIG_DEBUG_RT_MUTEXES - int save_state; -+#ifdef CONFIG_DEBUG_RT_MUTEXES - const char *name, *file; - int line; - void *magic; -@@ -55,22 +59,33 @@ struct hrtimer_sleeper; - # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) - #endif - -+# define rt_mutex_init(mutex) \ -+ do { \ -+ raw_spin_lock_init(&(mutex)->wait_lock); \ -+ __rt_mutex_init(mutex, #mutex); \ -+ } while (0) -+ - #ifdef CONFIG_DEBUG_RT_MUTEXES - # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ - , .name = #mutexname, .file = __FILE__, .line = __LINE__ --# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) - extern void rt_mutex_debug_task_free(struct task_struct *tsk); - #else - # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) --# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) - # define rt_mutex_debug_task_free(t) do { } while (0) - #endif - --#define __RT_MUTEX_INITIALIZER(mutexname) \ -- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ -+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .waiters = RB_ROOT \ - , .owner = NULL \ -- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} -+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) -+ -+#define __RT_MUTEX_INITIALIZER(mutexname) \ -+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } -+ -+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ -+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ -+ , .save_state = 1 } - - #define DEFINE_RT_MUTEX(mutexname) \ - struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) ---- /dev/null -+++ b/include/linux/rwlock_rt.h -@@ -0,0 +1,99 @@ -+#ifndef __LINUX_RWLOCK_RT_H -+#define __LINUX_RWLOCK_RT_H -+ -+#ifndef __LINUX_SPINLOCK_H -+#error Do not include directly. Use spinlock.h -+#endif -+ -+#define rwlock_init(rwl) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(rwl)->lock); \ -+ __rt_rwlock_init(rwl, #rwl, &__key); \ -+} while (0) -+ -+extern void __lockfunc rt_write_lock(rwlock_t *rwlock); -+extern void __lockfunc rt_read_lock(rwlock_t *rwlock); -+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); -+extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); -+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); -+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); -+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); -+extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); -+extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); -+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); -+ -+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) -+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) -+ -+#define write_trylock_irqsave(lock, flags) \ -+ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) -+ -+#define read_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = rt_read_lock_irqsave(lock); \ -+ } while (0) -+ -+#define write_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = rt_write_lock_irqsave(lock); \ -+ } while (0) -+ -+#define read_lock(lock) rt_read_lock(lock) -+ -+#define read_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_read_lock(lock); \ -+ } while (0) -+ -+#define read_lock_irq(lock) read_lock(lock) -+ -+#define write_lock(lock) rt_write_lock(lock) -+ -+#define write_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_write_lock(lock); \ -+ } while (0) -+ -+#define write_lock_irq(lock) write_lock(lock) -+ -+#define read_unlock(lock) rt_read_unlock(lock) -+ -+#define read_unlock_bh(lock) \ -+ do { \ -+ rt_read_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define read_unlock_irq(lock) read_unlock(lock) -+ -+#define write_unlock(lock) rt_write_unlock(lock) -+ -+#define write_unlock_bh(lock) \ -+ do { \ -+ rt_write_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define write_unlock_irq(lock) write_unlock(lock) -+ -+#define read_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ rt_read_unlock(lock); \ -+ } while (0) -+ -+#define write_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ rt_write_unlock(lock); \ -+ } while (0) -+ -+#endif ---- /dev/null -+++ b/include/linux/rwlock_types_rt.h -@@ -0,0 +1,33 @@ -+#ifndef __LINUX_RWLOCK_TYPES_RT_H -+#define __LINUX_RWLOCK_TYPES_RT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+/* -+ * rwlocks - rtmutex which allows single reader recursion -+ */ -+typedef struct { -+ struct rt_mutex lock; -+ int read_depth; -+ unsigned int break_lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} rwlock_t; -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -+#else -+# define RW_DEP_MAP_INIT(lockname) -+#endif -+ -+#define __RW_LOCK_UNLOCKED(name) \ -+ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ -+ RW_DEP_MAP_INIT(name) } -+ -+#define DEFINE_RWLOCK(name) \ -+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) -+ -+#endif ---- a/include/linux/rwsem.h -+++ b/include/linux/rwsem.h -@@ -18,6 +18,10 @@ - #include - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+#include -+#else /* PREEMPT_RT_FULL */ -+ - struct rw_semaphore; - - #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -@@ -177,4 +181,6 @@ extern void up_read_non_owner(struct rw_ - # define up_read_non_owner(sem) up_read(sem) - #endif - -+#endif /* !PREEMPT_RT_FULL */ -+ - #endif /* _LINUX_RWSEM_H */ ---- /dev/null -+++ b/include/linux/rwsem_rt.h -@@ -0,0 +1,152 @@ -+#ifndef _LINUX_RWSEM_RT_H -+#define _LINUX_RWSEM_RT_H -+ -+#ifndef _LINUX_RWSEM_H -+#error "Include rwsem.h" -+#endif -+ -+/* -+ * RW-semaphores are a spinlock plus a reader-depth count. -+ * -+ * Note that the semantics are different from the usual -+ * Linux rw-sems, in PREEMPT_RT mode we do not allow -+ * multiple readers to hold the lock at once, we only allow -+ * a read-lock owner to read-lock recursively. This is -+ * better for latency, makes the implementation inherently -+ * fair and makes it simpler as well. -+ */ -+ -+#include -+ -+struct rw_semaphore { -+ struct rt_mutex lock; -+ int read_depth; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; -+ -+#define __RWSEM_INITIALIZER(name) \ -+ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ -+ RW_DEP_MAP_INIT(name) } -+ -+#define DECLARE_RWSEM(lockname) \ -+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) -+ -+extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, -+ struct lock_class_key *key); -+ -+#define __rt_init_rwsem(sem, name, key) \ -+ do { \ -+ rt_mutex_init(&(sem)->lock); \ -+ __rt_rwsem_init((sem), (name), (key));\ -+ } while (0) -+ -+#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) -+ -+# define rt_init_rwsem(sem) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ __rt_init_rwsem((sem), #sem, &__key); \ -+} while (0) -+ -+extern void rt_down_write(struct rw_semaphore *rwsem); -+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); -+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); -+extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, -+ struct lockdep_map *nest); -+extern void rt__down_read(struct rw_semaphore *rwsem); -+extern void rt_down_read(struct rw_semaphore *rwsem); -+extern int rt_down_write_trylock(struct rw_semaphore *rwsem); -+extern int rt__down_read_trylock(struct rw_semaphore *rwsem); -+extern int rt_down_read_trylock(struct rw_semaphore *rwsem); -+extern void __rt_up_read(struct rw_semaphore *rwsem); -+extern void rt_up_read(struct rw_semaphore *rwsem); -+extern void rt_up_write(struct rw_semaphore *rwsem); -+extern void rt_downgrade_write(struct rw_semaphore *rwsem); -+ -+#define init_rwsem(sem) rt_init_rwsem(sem) -+#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) -+ -+static inline int rwsem_is_contended(struct rw_semaphore *sem) -+{ -+ /* rt_mutex_has_waiters() */ -+ return !RB_EMPTY_ROOT(&sem->lock.waiters); -+} -+ -+static inline void __down_read(struct rw_semaphore *sem) -+{ -+ rt__down_read(sem); -+} -+ -+static inline void down_read(struct rw_semaphore *sem) -+{ -+ rt_down_read(sem); -+} -+ -+static inline int __down_read_trylock(struct rw_semaphore *sem) -+{ -+ return rt__down_read_trylock(sem); -+} -+ -+static inline int down_read_trylock(struct rw_semaphore *sem) -+{ -+ return rt_down_read_trylock(sem); -+} -+ -+static inline void down_write(struct rw_semaphore *sem) -+{ -+ rt_down_write(sem); -+} -+ -+static inline int down_write_trylock(struct rw_semaphore *sem) -+{ -+ return rt_down_write_trylock(sem); -+} -+ -+static inline void __up_read(struct rw_semaphore *sem) -+{ -+ __rt_up_read(sem); -+} -+ -+static inline void up_read(struct rw_semaphore *sem) -+{ -+ rt_up_read(sem); -+} -+ -+static inline void up_write(struct rw_semaphore *sem) -+{ -+ rt_up_write(sem); -+} -+ -+static inline void downgrade_write(struct rw_semaphore *sem) -+{ -+ rt_downgrade_write(sem); -+} -+ -+static inline void down_read_nested(struct rw_semaphore *sem, int subclass) -+{ -+ return rt_down_read_nested(sem, subclass); -+} -+ -+static inline void down_write_nested(struct rw_semaphore *sem, int subclass) -+{ -+ rt_down_write_nested(sem, subclass); -+} -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+static inline void down_write_nest_lock(struct rw_semaphore *sem, -+ struct rw_semaphore *nest_lock) -+{ -+ rt_down_write_nested_lock(sem, &nest_lock->dep_map); -+} -+ -+#else -+ -+static inline void down_write_nest_lock(struct rw_semaphore *sem, -+ struct rw_semaphore *nest_lock) -+{ -+ rt_down_write_nested_lock(sem, NULL); -+} -+#endif -+#endif ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -311,6 +311,11 @@ extern char ___assert_task_state[1 - 2*! - - #endif - -+#define __set_current_state_no_track(state_value) \ -+ do { current->state = (state_value); } while (0) -+#define set_current_state_no_track(state_value) \ -+ set_mb(current->state, (state_value)) -+ - /* Task command name length */ - #define TASK_COMM_LEN 16 - -@@ -969,8 +974,18 @@ struct wake_q_head { - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } - - extern void wake_q_add(struct wake_q_head *head, -- struct task_struct *task); --extern void wake_up_q(struct wake_q_head *head); -+ struct task_struct *task); -+extern void __wake_up_q(struct wake_q_head *head, bool sleeper); -+ -+static inline void wake_up_q(struct wake_q_head *head) -+{ -+ __wake_up_q(head, false); -+} -+ -+static inline void wake_up_q_sleeper(struct wake_q_head *head) -+{ -+ __wake_up_q(head, true); -+} - - /* - * sched-domains (multiprocessor balancing) declarations: ---- a/include/linux/spinlock.h -+++ b/include/linux/spinlock.h -@@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(ra - #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) - - /* Include rwlock functions */ --#include -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else -+# include -+#endif - - /* - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: -@@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(ra - # include - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else /* PREEMPT_RT_FULL */ -+ - /* - * Map the spin_lock functions to the raw variants for PREEMPT_RT=n - */ -@@ -416,4 +424,6 @@ extern int _atomic_dec_and_lock(atomic_t - #define atomic_dec_and_lock(atomic, lock) \ - __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) - -+#endif /* !PREEMPT_RT_FULL */ -+ - #endif /* __LINUX_SPINLOCK_H */ ---- a/include/linux/spinlock_api_smp.h -+++ b/include/linux/spinlock_api_smp.h -@@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh( - return 0; - } - --#include -+#ifndef CONFIG_PREEMPT_RT_FULL -+# include -+#endif - - #endif /* __LINUX_SPINLOCK_API_SMP_H */ ---- /dev/null -+++ b/include/linux/spinlock_rt.h -@@ -0,0 +1,173 @@ -+#ifndef __LINUX_SPINLOCK_RT_H -+#define __LINUX_SPINLOCK_RT_H -+ -+#ifndef __LINUX_SPINLOCK_H -+#error Do not include directly. Use spinlock.h -+#endif -+ -+#include -+ -+extern void -+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); -+ -+#define spin_lock_init(slock) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(slock)->lock); \ -+ __rt_spin_lock_init(slock, #slock, &__key); \ -+} while (0) -+ -+extern void __lockfunc rt_spin_lock(spinlock_t *lock); -+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); -+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); -+extern void __lockfunc rt_spin_unlock(spinlock_t *lock); -+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); -+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); -+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); -+extern int __lockfunc rt_spin_trylock(spinlock_t *lock); -+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); -+ -+/* -+ * lockdep-less calls, for derived types like rwlock: -+ * (for trylock they can use rt_mutex_trylock() directly. -+ */ -+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); -+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); -+ -+#define spin_lock(lock) \ -+ do { \ -+ migrate_disable(); \ -+ rt_spin_lock(lock); \ -+ } while (0) -+ -+#define spin_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ migrate_disable(); \ -+ rt_spin_lock(lock); \ -+ } while (0) -+ -+#define spin_lock_irq(lock) spin_lock(lock) -+ -+#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) -+ -+#define spin_trylock(lock) \ -+({ \ -+ int __locked; \ -+ migrate_disable(); \ -+ __locked = spin_do_trylock(lock); \ -+ if (!__locked) \ -+ migrate_enable(); \ -+ __locked; \ -+}) -+ -+#ifdef CONFIG_LOCKDEP -+# define spin_lock_nested(lock, subclass) \ -+ do { \ -+ migrate_disable(); \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+ -+#define spin_lock_bh_nested(lock, subclass) \ -+ do { \ -+ local_bh_disable(); \ -+ migrate_disable(); \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+ -+# define spin_lock_irqsave_nested(lock, flags, subclass) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ migrate_disable(); \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+#else -+# define spin_lock_nested(lock, subclass) spin_lock(lock) -+# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock) -+ -+# define spin_lock_irqsave_nested(lock, flags, subclass) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ spin_lock(lock); \ -+ } while (0) -+#endif -+ -+#define spin_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ spin_lock(lock); \ -+ } while (0) -+ -+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) -+{ -+ unsigned long flags = 0; -+#ifdef CONFIG_TRACE_IRQFLAGS -+ flags = rt_spin_lock_trace_flags(lock); -+#else -+ spin_lock(lock); /* lock_local */ -+#endif -+ return flags; -+} -+ -+/* FIXME: we need rt_spin_lock_nest_lock */ -+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) -+ -+#define spin_unlock(lock) \ -+ do { \ -+ rt_spin_unlock(lock); \ -+ migrate_enable(); \ -+ } while (0) -+ -+#define spin_unlock_bh(lock) \ -+ do { \ -+ rt_spin_unlock(lock); \ -+ migrate_enable(); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define spin_unlock_irq(lock) spin_unlock(lock) -+ -+#define spin_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ spin_unlock(lock); \ -+ } while (0) -+ -+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) -+#define spin_trylock_irq(lock) spin_trylock(lock) -+ -+#define spin_trylock_irqsave(lock, flags) \ -+ rt_spin_trylock_irqsave(lock, &(flags)) -+ -+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) -+ -+#ifdef CONFIG_GENERIC_LOCKBREAK -+# define spin_is_contended(lock) ((lock)->break_lock) -+#else -+# define spin_is_contended(lock) (((void)(lock), 0)) -+#endif -+ -+static inline int spin_can_lock(spinlock_t *lock) -+{ -+ return !rt_mutex_is_locked(&lock->lock); -+} -+ -+static inline int spin_is_locked(spinlock_t *lock) -+{ -+ return rt_mutex_is_locked(&lock->lock); -+} -+ -+static inline void assert_spin_locked(spinlock_t *lock) -+{ -+ BUG_ON(!spin_is_locked(lock)); -+} -+ -+#define atomic_dec_and_lock(atomic, lock) \ -+ atomic_dec_and_spin_lock(atomic, lock) -+ -+#endif ---- a/include/linux/spinlock_types.h -+++ b/include/linux/spinlock_types.h -@@ -11,8 +11,13 @@ - - #include - --#include -- --#include -+#ifndef CONFIG_PREEMPT_RT_FULL -+# include -+# include -+#else -+# include -+# include -+# include -+#endif - - #endif /* __LINUX_SPINLOCK_TYPES_H */ ---- /dev/null -+++ b/include/linux/spinlock_types_rt.h -@@ -0,0 +1,51 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_RT_H -+#define __LINUX_SPINLOCK_TYPES_RT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+#include -+ -+/* -+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: -+ */ -+typedef struct spinlock { -+ struct rt_mutex lock; -+ unsigned int break_lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} spinlock_t; -+ -+#ifdef CONFIG_DEBUG_RT_MUTEXES -+# define __RT_SPIN_INITIALIZER(name) \ -+ { \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ -+ .save_state = 1, \ -+ .file = __FILE__, \ -+ .line = __LINE__ , \ -+ } -+#else -+# define __RT_SPIN_INITIALIZER(name) \ -+ { \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ -+ .save_state = 1, \ -+ } -+#endif -+ -+/* -+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) -+*/ -+ -+#define __SPIN_LOCK_UNLOCKED(name) \ -+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ -+ SPIN_DEP_MAP_INIT(name) } -+ -+#define __DEFINE_SPINLOCK(name) \ -+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) -+ -+#define DEFINE_SPINLOCK(name) \ -+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) -+ -+#endif ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -1212,6 +1212,7 @@ static int wake_futex_pi(u32 __user *uad - struct futex_pi_state *pi_state = this->pi_state; - u32 uninitialized_var(curval), newval; - WAKE_Q(wake_q); -+ WAKE_Q(wake_sleeper_q); - bool deboost; - int ret = 0; - -@@ -1268,7 +1269,8 @@ static int wake_futex_pi(u32 __user *uad - - raw_spin_unlock(&pi_state->pi_mutex.wait_lock); - -- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); -+ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, -+ &wake_sleeper_q); - - /* - * First unlock HB so the waiter does not spin on it once he got woken -@@ -1278,6 +1280,7 @@ static int wake_futex_pi(u32 __user *uad - */ - spin_unlock(&hb->lock); - wake_up_q(&wake_q); -+ wake_up_q_sleeper(&wake_sleeper_q); - if (deboost) - rt_mutex_adjust_prio(current); - -@@ -2709,10 +2712,7 @@ static int futex_wait_requeue_pi(u32 __u - * The waiter is allocated on our stack, manipulated by the requeue - * code while we sleep on uaddr. - */ -- debug_rt_mutex_init_waiter(&rt_waiter); -- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); -- RB_CLEAR_NODE(&rt_waiter.tree_entry); -- rt_waiter.task = NULL; -+ rt_mutex_init_waiter(&rt_waiter, false); - - ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); - if (unlikely(ret != 0)) ---- a/kernel/locking/Makefile -+++ b/kernel/locking/Makefile -@@ -1,5 +1,5 @@ - --obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o -+obj-y += semaphore.o percpu-rwsem.o - - ifdef CONFIG_FUNCTION_TRACER - CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) -@@ -8,7 +8,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS - CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) - endif - -+ifneq ($(CONFIG_PREEMPT_RT_FULL),y) -+obj-y += mutex.o - obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o -+obj-y += rwsem.o -+endif - obj-$(CONFIG_LOCKDEP) += lockdep.o - ifeq ($(CONFIG_PROC_FS),y) - obj-$(CONFIG_LOCKDEP) += lockdep_proc.o -@@ -22,7 +26,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o - obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o - obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o - obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o -+ifneq ($(CONFIG_PREEMPT_RT_FULL),y) - obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o - obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o -+endif -+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o - obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o - obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o ---- /dev/null -+++ b/kernel/locking/rt.c -@@ -0,0 +1,476 @@ -+/* -+ * kernel/rt.c -+ * -+ * Real-Time Preemption Support -+ * -+ * started by Ingo Molnar: -+ * -+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar -+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner -+ * -+ * historic credit for proving that Linux spinlocks can be implemented via -+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow -+ * and others) who prototyped it on 2.4 and did lots of comparative -+ * research and analysis; TimeSys, for proving that you can implement a -+ * fully preemptible kernel via the use of IRQ threading and mutexes; -+ * Bill Huey for persuasively arguing on lkml that the mutex model is the -+ * right one; and to MontaVista, who ported pmutexes to 2.6. -+ * -+ * This code is a from-scratch implementation and is not based on pmutexes, -+ * but the idea of converting spinlocks to mutexes is used here too. -+ * -+ * lock debugging, locking tree, deadlock detection: -+ * -+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey -+ * Released under the General Public License (GPL). -+ * -+ * Includes portions of the generic R/W semaphore implementation from: -+ * -+ * Copyright (c) 2001 David Howells (dhowells@redhat.com). -+ * - Derived partially from idea by Andrea Arcangeli -+ * - Derived also from comments by Linus -+ * -+ * Pending ownership of locks and ownership stealing: -+ * -+ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt -+ * -+ * (also by Steven Rostedt) -+ * - Converted single pi_lock to individual task locks. -+ * -+ * By Esben Nielsen: -+ * Doing priority inheritance with help of the scheduler. -+ * -+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner -+ * - major rework based on Esben Nielsens initial patch -+ * - replaced thread_info references by task_struct refs -+ * - removed task->pending_owner dependency -+ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks -+ * in the scheduler return path as discussed with Steven Rostedt -+ * -+ * Copyright (C) 2006, Kihon Technologies Inc. -+ * Steven Rostedt -+ * - debugged and patched Thomas Gleixner's rework. -+ * - added back the cmpxchg to the rework. -+ * - turned atomic require back on for SMP. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rtmutex_common.h" -+ -+/* -+ * struct mutex functions -+ */ -+void __mutex_do_init(struct mutex *mutex, const char *name, -+ struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); -+ lockdep_init_map(&mutex->dep_map, name, key, 0); -+#endif -+ mutex->lock.save_state = 0; -+} -+EXPORT_SYMBOL(__mutex_do_init); -+ -+void __lockfunc _mutex_lock(struct mutex *lock) -+{ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ rt_mutex_lock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_lock); -+ -+int __lockfunc _mutex_lock_interruptible(struct mutex *lock) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ ret = rt_mutex_lock_interruptible(&lock->lock); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_interruptible); -+ -+int __lockfunc _mutex_lock_killable(struct mutex *lock) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ ret = rt_mutex_lock_killable(&lock->lock); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_killable); -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) -+{ -+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); -+ rt_mutex_lock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_lock_nested); -+ -+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) -+{ -+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); -+ rt_mutex_lock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_lock_nest_lock); -+ -+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) -+{ -+ int ret; -+ -+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); -+ ret = rt_mutex_lock_interruptible(&lock->lock); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_interruptible_nested); -+ -+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -+ ret = rt_mutex_lock_killable(&lock->lock); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_killable_nested); -+#endif -+ -+int __lockfunc _mutex_trylock(struct mutex *lock) -+{ -+ int ret = rt_mutex_trylock(&lock->lock); -+ -+ if (ret) -+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_trylock); -+ -+void __lockfunc _mutex_unlock(struct mutex *lock) -+{ -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ rt_mutex_unlock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_unlock); -+ -+/* -+ * rwlock_t functions -+ */ -+int __lockfunc rt_write_trylock(rwlock_t *rwlock) -+{ -+ int ret; -+ -+ migrate_disable(); -+ ret = rt_mutex_trylock(&rwlock->lock); -+ if (ret) -+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); -+ else -+ migrate_enable(); -+ -+ return ret; -+} -+EXPORT_SYMBOL(rt_write_trylock); -+ -+int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) -+{ -+ int ret; -+ -+ *flags = 0; -+ ret = rt_write_trylock(rwlock); -+ return ret; -+} -+EXPORT_SYMBOL(rt_write_trylock_irqsave); -+ -+int __lockfunc rt_read_trylock(rwlock_t *rwlock) -+{ -+ struct rt_mutex *lock = &rwlock->lock; -+ int ret = 1; -+ -+ /* -+ * recursive read locks succeed when current owns the lock, -+ * but not when read_depth == 0 which means that the lock is -+ * write locked. -+ */ -+ if (rt_mutex_owner(lock) != current) { -+ migrate_disable(); -+ ret = rt_mutex_trylock(lock); -+ if (ret) -+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); -+ else -+ migrate_enable(); -+ -+ } else if (!rwlock->read_depth) { -+ ret = 0; -+ } -+ -+ if (ret) -+ rwlock->read_depth++; -+ -+ return ret; -+} -+EXPORT_SYMBOL(rt_read_trylock); -+ -+void __lockfunc rt_write_lock(rwlock_t *rwlock) -+{ -+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); -+ migrate_disable(); -+ __rt_spin_lock(&rwlock->lock); -+} -+EXPORT_SYMBOL(rt_write_lock); -+ -+void __lockfunc rt_read_lock(rwlock_t *rwlock) -+{ -+ struct rt_mutex *lock = &rwlock->lock; -+ -+ -+ /* -+ * recursive read locks succeed when current owns the lock -+ */ -+ if (rt_mutex_owner(lock) != current) { -+ migrate_disable(); -+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); -+ __rt_spin_lock(lock); -+ } -+ rwlock->read_depth++; -+} -+ -+EXPORT_SYMBOL(rt_read_lock); -+ -+void __lockfunc rt_write_unlock(rwlock_t *rwlock) -+{ -+ /* NOTE: we always pass in '1' for nested, for simplicity */ -+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); -+ __rt_spin_unlock(&rwlock->lock); -+ migrate_enable(); -+} -+EXPORT_SYMBOL(rt_write_unlock); -+ -+void __lockfunc rt_read_unlock(rwlock_t *rwlock) -+{ -+ /* Release the lock only when read_depth is down to 0 */ -+ if (--rwlock->read_depth == 0) { -+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); -+ __rt_spin_unlock(&rwlock->lock); -+ migrate_enable(); -+ } -+} -+EXPORT_SYMBOL(rt_read_unlock); -+ -+unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) -+{ -+ rt_write_lock(rwlock); -+ -+ return 0; -+} -+EXPORT_SYMBOL(rt_write_lock_irqsave); -+ -+unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) -+{ -+ rt_read_lock(rwlock); -+ -+ return 0; -+} -+EXPORT_SYMBOL(rt_read_lock_irqsave); -+ -+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); -+ lockdep_init_map(&rwlock->dep_map, name, key, 0); -+#endif -+ rwlock->lock.save_state = 1; -+ rwlock->read_depth = 0; -+} -+EXPORT_SYMBOL(__rt_rwlock_init); -+ -+/* -+ * rw_semaphores -+ */ -+ -+void rt_up_write(struct rw_semaphore *rwsem) -+{ -+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); -+ rt_mutex_unlock(&rwsem->lock); -+} -+EXPORT_SYMBOL(rt_up_write); -+ -+void __rt_up_read(struct rw_semaphore *rwsem) -+{ -+ if (--rwsem->read_depth == 0) -+ rt_mutex_unlock(&rwsem->lock); -+} -+ -+void rt_up_read(struct rw_semaphore *rwsem) -+{ -+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); -+ __rt_up_read(rwsem); -+} -+EXPORT_SYMBOL(rt_up_read); -+ -+/* -+ * downgrade a write lock into a read lock -+ * - just wake up any readers at the front of the queue -+ */ -+void rt_downgrade_write(struct rw_semaphore *rwsem) -+{ -+ BUG_ON(rt_mutex_owner(&rwsem->lock) != current); -+ rwsem->read_depth = 1; -+} -+EXPORT_SYMBOL(rt_downgrade_write); -+ -+int rt_down_write_trylock(struct rw_semaphore *rwsem) -+{ -+ int ret = rt_mutex_trylock(&rwsem->lock); -+ -+ if (ret) -+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(rt_down_write_trylock); -+ -+void rt_down_write(struct rw_semaphore *rwsem) -+{ -+ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); -+ rt_mutex_lock(&rwsem->lock); -+} -+EXPORT_SYMBOL(rt_down_write); -+ -+void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) -+{ -+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); -+ rt_mutex_lock(&rwsem->lock); -+} -+EXPORT_SYMBOL(rt_down_write_nested); -+ -+void rt_down_write_nested_lock(struct rw_semaphore *rwsem, -+ struct lockdep_map *nest) -+{ -+ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); -+ rt_mutex_lock(&rwsem->lock); -+} -+EXPORT_SYMBOL(rt_down_write_nested_lock); -+ -+int rt__down_read_trylock(struct rw_semaphore *rwsem) -+{ -+ struct rt_mutex *lock = &rwsem->lock; -+ int ret = 1; -+ -+ /* -+ * recursive read locks succeed when current owns the rwsem, -+ * but not when read_depth == 0 which means that the rwsem is -+ * write locked. -+ */ -+ if (rt_mutex_owner(lock) != current) -+ ret = rt_mutex_trylock(&rwsem->lock); -+ else if (!rwsem->read_depth) -+ ret = 0; -+ -+ if (ret) -+ rwsem->read_depth++; -+ return ret; -+ -+} -+ -+int rt_down_read_trylock(struct rw_semaphore *rwsem) -+{ -+ int ret; -+ -+ ret = rt__down_read_trylock(rwsem); -+ if (ret) -+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); -+ -+ return ret; -+} -+EXPORT_SYMBOL(rt_down_read_trylock); -+ -+void rt__down_read(struct rw_semaphore *rwsem) -+{ -+ struct rt_mutex *lock = &rwsem->lock; -+ -+ if (rt_mutex_owner(lock) != current) -+ rt_mutex_lock(&rwsem->lock); -+ rwsem->read_depth++; -+} -+EXPORT_SYMBOL(rt__down_read); -+ -+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) -+{ -+ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); -+ rt__down_read(rwsem); -+} -+ -+void rt_down_read(struct rw_semaphore *rwsem) -+{ -+ __rt_down_read(rwsem, 0); -+} -+EXPORT_SYMBOL(rt_down_read); -+ -+void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) -+{ -+ __rt_down_read(rwsem, subclass); -+} -+EXPORT_SYMBOL(rt_down_read_nested); -+ -+void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, -+ struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); -+ lockdep_init_map(&rwsem->dep_map, name, key, 0); -+#endif -+ rwsem->read_depth = 0; -+ rwsem->lock.save_state = 0; -+} -+EXPORT_SYMBOL(__rt_rwsem_init); -+ -+/** -+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 -+ * @cnt: the atomic which we are to dec -+ * @lock: the mutex to return holding if we dec to 0 -+ * -+ * return true and hold lock if we dec to 0, return false otherwise -+ */ -+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) -+{ -+ /* dec if we can't possibly hit 0 */ -+ if (atomic_add_unless(cnt, -1, 1)) -+ return 0; -+ /* we might hit 0, so take the lock */ -+ mutex_lock(lock); -+ if (!atomic_dec_and_test(cnt)) { -+ /* when we actually did the dec, we didn't hit 0 */ -+ mutex_unlock(lock); -+ return 0; -+ } -+ /* we hit 0, and we hold the lock */ -+ return 1; -+} -+EXPORT_SYMBOL(atomic_dec_and_mutex_lock); ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -7,6 +7,11 @@ - * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner - * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt - * Copyright (C) 2006 Esben Nielsen -+ * Adaptive Spinlocks: -+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, -+ * and Peter Morreale, -+ * Adaptive Spinlocks simplification: -+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt - * - * See Documentation/locking/rt-mutex-design.txt for details. - */ -@@ -354,6 +359,14 @@ static bool rt_mutex_cond_detect_deadloc - return debug_rt_mutex_detect_deadlock(waiter, chwalk); - } - -+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) -+{ -+ if (waiter->savestate) -+ wake_up_lock_sleeper(waiter->task); -+ else -+ wake_up_process(waiter->task); -+} -+ - /* - * Max number of times we'll walk the boosting chain: - */ -@@ -661,13 +674,16 @@ static int rt_mutex_adjust_prio_chain(st - * follow here. This is the end of the chain we are walking. - */ - if (!rt_mutex_owner(lock)) { -+ struct rt_mutex_waiter *lock_top_waiter; -+ - /* - * If the requeue [7] above changed the top waiter, - * then we need to wake the new top waiter up to try - * to get the lock. - */ -- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) -- wake_up_process(rt_mutex_top_waiter(lock)->task); -+ lock_top_waiter = rt_mutex_top_waiter(lock); -+ if (prerequeue_top_waiter != lock_top_waiter) -+ rt_mutex_wake_waiter(lock_top_waiter); - raw_spin_unlock(&lock->wait_lock); - return 0; - } -@@ -760,6 +776,25 @@ static int rt_mutex_adjust_prio_chain(st - return ret; - } - -+ -+#define STEAL_NORMAL 0 -+#define STEAL_LATERAL 1 -+ -+/* -+ * Note that RT tasks are excluded from lateral-steals to prevent the -+ * introduction of an unbounded latency -+ */ -+static inline int lock_is_stealable(struct task_struct *task, -+ struct task_struct *pendowner, int mode) -+{ -+ if (mode == STEAL_NORMAL || rt_task(task)) { -+ if (task->prio >= pendowner->prio) -+ return 0; -+ } else if (task->prio > pendowner->prio) -+ return 0; -+ return 1; -+} -+ - /* - * Try to take an rt-mutex - * -@@ -770,8 +805,9 @@ static int rt_mutex_adjust_prio_chain(st - * @waiter: The waiter that is queued to the lock's wait tree if the - * callsite called task_blocked_on_lock(), otherwise NULL - */ --static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, -- struct rt_mutex_waiter *waiter) -+static int __try_to_take_rt_mutex(struct rt_mutex *lock, -+ struct task_struct *task, -+ struct rt_mutex_waiter *waiter, int mode) - { - unsigned long flags; - -@@ -810,8 +846,10 @@ static int try_to_take_rt_mutex(struct r - * If waiter is not the highest priority waiter of - * @lock, give up. - */ -- if (waiter != rt_mutex_top_waiter(lock)) -+ if (waiter != rt_mutex_top_waiter(lock)) { -+ /* XXX lock_is_stealable() ? */ - return 0; -+ } - - /* - * We can acquire the lock. Remove the waiter from the -@@ -829,14 +867,10 @@ static int try_to_take_rt_mutex(struct r - * not need to be dequeued. - */ - if (rt_mutex_has_waiters(lock)) { -- /* -- * If @task->prio is greater than or equal to -- * the top waiter priority (kernel view), -- * @task lost. -- */ -- if (task->prio >= rt_mutex_top_waiter(lock)->prio) -- return 0; -+ struct task_struct *pown = rt_mutex_top_waiter(lock)->task; - -+ if (task != pown && !lock_is_stealable(task, pown, mode)) -+ return 0; - /* - * The current top waiter stays enqueued. We - * don't have to change anything in the lock -@@ -885,6 +919,315 @@ static int try_to_take_rt_mutex(struct r - return 1; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * preemptible spin_lock functions: -+ */ -+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, -+ void (*slowfn)(struct rt_mutex *lock)) -+{ -+ might_sleep_no_state_check(); -+ -+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) -+ rt_mutex_deadlock_account_lock(lock, current); -+ else -+ slowfn(lock); -+} -+ -+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, -+ void (*slowfn)(struct rt_mutex *lock)) -+{ -+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) -+ rt_mutex_deadlock_account_unlock(current); -+ else -+ slowfn(lock); -+} -+#ifdef CONFIG_SMP -+/* -+ * Note that owner is a speculative pointer and dereferencing relies -+ * on rcu_read_lock() and the check against the lock owner. -+ */ -+static int adaptive_wait(struct rt_mutex *lock, -+ struct task_struct *owner) -+{ -+ int res = 0; -+ -+ rcu_read_lock(); -+ for (;;) { -+ if (owner != rt_mutex_owner(lock)) -+ break; -+ /* -+ * Ensure that owner->on_cpu is dereferenced _after_ -+ * checking the above to be valid. -+ */ -+ barrier(); -+ if (!owner->on_cpu) { -+ res = 1; -+ break; -+ } -+ cpu_relax(); -+ } -+ rcu_read_unlock(); -+ return res; -+} -+#else -+static int adaptive_wait(struct rt_mutex *lock, -+ struct task_struct *orig_owner) -+{ -+ return 1; -+} -+#endif -+ -+# define pi_lock(lock) raw_spin_lock_irq(lock) -+# define pi_unlock(lock) raw_spin_unlock_irq(lock) -+ -+static int task_blocks_on_rt_mutex(struct rt_mutex *lock, -+ struct rt_mutex_waiter *waiter, -+ struct task_struct *task, -+ enum rtmutex_chainwalk chwalk); -+/* -+ * Slow path lock function spin_lock style: this variant is very -+ * careful not to miss any non-lock wakeups. -+ * -+ * We store the current state under p->pi_lock in p->saved_state and -+ * the try_to_wake_up() code handles this accordingly. -+ */ -+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) -+{ -+ struct task_struct *lock_owner, *self = current; -+ struct rt_mutex_waiter waiter, *top_waiter; -+ int ret; -+ -+ rt_mutex_init_waiter(&waiter, true); -+ -+ raw_spin_lock(&lock->wait_lock); -+ -+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { -+ raw_spin_unlock(&lock->wait_lock); -+ return; -+ } -+ -+ BUG_ON(rt_mutex_owner(lock) == self); -+ -+ /* -+ * We save whatever state the task is in and we'll restore it -+ * after acquiring the lock taking real wakeups into account -+ * as well. We are serialized via pi_lock against wakeups. See -+ * try_to_wake_up(). -+ */ -+ pi_lock(&self->pi_lock); -+ self->saved_state = self->state; -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ pi_unlock(&self->pi_lock); -+ -+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0); -+ BUG_ON(ret); -+ -+ for (;;) { -+ /* Try to acquire the lock again. */ -+ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) -+ break; -+ -+ top_waiter = rt_mutex_top_waiter(lock); -+ lock_owner = rt_mutex_owner(lock); -+ -+ raw_spin_unlock(&lock->wait_lock); -+ -+ debug_rt_mutex_print_deadlock(&waiter); -+ -+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) -+ schedule(); -+ -+ raw_spin_lock(&lock->wait_lock); -+ -+ pi_lock(&self->pi_lock); -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ pi_unlock(&self->pi_lock); -+ } -+ -+ /* -+ * Restore the task state to current->saved_state. We set it -+ * to the original state above and the try_to_wake_up() code -+ * has possibly updated it when a real (non-rtmutex) wakeup -+ * happened while we were blocked. Clear saved_state so -+ * try_to_wakeup() does not get confused. -+ */ -+ pi_lock(&self->pi_lock); -+ __set_current_state_no_track(self->saved_state); -+ self->saved_state = TASK_RUNNING; -+ pi_unlock(&self->pi_lock); -+ -+ /* -+ * try_to_take_rt_mutex() sets the waiter bit -+ * unconditionally. We might have to fix that up: -+ */ -+ fixup_rt_mutex_waiters(lock); -+ -+ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); -+ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry)); -+ -+ raw_spin_unlock(&lock->wait_lock); -+ -+ debug_rt_mutex_free_waiter(&waiter); -+} -+ -+static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q, -+ struct rt_mutex *lock); -+/* -+ * Slow path to release a rt_mutex spin_lock style -+ */ -+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) -+{ -+ WAKE_Q(wake_q); -+ WAKE_Q(wake_sleeper_q); -+ -+ raw_spin_lock(&lock->wait_lock); -+ -+ debug_rt_mutex_unlock(lock); -+ -+ rt_mutex_deadlock_account_unlock(current); -+ -+ if (!rt_mutex_has_waiters(lock)) { -+ lock->owner = NULL; -+ raw_spin_unlock(&lock->wait_lock); -+ return; -+ } -+ -+ mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock); -+ -+ raw_spin_unlock(&lock->wait_lock); -+ wake_up_q(&wake_q); -+ wake_up_q_sleeper(&wake_sleeper_q); -+ -+ /* Undo pi boosting.when necessary */ -+ rt_mutex_adjust_prio(current); -+} -+ -+void __lockfunc rt_spin_lock(spinlock_t *lock) -+{ -+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); -+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+} -+EXPORT_SYMBOL(rt_spin_lock); -+ -+void __lockfunc __rt_spin_lock(struct rt_mutex *lock) -+{ -+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); -+} -+EXPORT_SYMBOL(__rt_spin_lock); -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) -+{ -+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); -+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -+} -+EXPORT_SYMBOL(rt_spin_lock_nested); -+#endif -+ -+void __lockfunc rt_spin_unlock(spinlock_t *lock) -+{ -+ /* NOTE: we always pass in '1' for nested, for simplicity */ -+ spin_release(&lock->dep_map, 1, _RET_IP_); -+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); -+} -+EXPORT_SYMBOL(rt_spin_unlock); -+ -+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) -+{ -+ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); -+} -+EXPORT_SYMBOL(__rt_spin_unlock); -+ -+/* -+ * Wait for the lock to get unlocked: instead of polling for an unlock -+ * (like raw spinlocks do), we lock and unlock, to force the kernel to -+ * schedule if there's contention: -+ */ -+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) -+{ -+ spin_lock(lock); -+ spin_unlock(lock); -+} -+EXPORT_SYMBOL(rt_spin_unlock_wait); -+ -+int __lockfunc rt_spin_trylock(spinlock_t *lock) -+{ -+ int ret = rt_mutex_trylock(&lock->lock); -+ -+ if (ret) -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock); -+ -+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) -+{ -+ int ret; -+ -+ local_bh_disable(); -+ ret = rt_mutex_trylock(&lock->lock); -+ if (ret) { -+ migrate_disable(); -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ } else -+ local_bh_enable(); -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock_bh); -+ -+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) -+{ -+ int ret; -+ -+ *flags = 0; -+ ret = rt_mutex_trylock(&lock->lock); -+ if (ret) { -+ migrate_disable(); -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ } -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock_irqsave); -+ -+int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) -+{ -+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ -+ if (atomic_add_unless(atomic, -1, 1)) -+ return 0; -+ migrate_disable(); -+ rt_spin_lock(lock); -+ if (atomic_dec_and_test(atomic)) -+ return 1; -+ rt_spin_unlock(lock); -+ migrate_enable(); -+ return 0; -+} -+EXPORT_SYMBOL(atomic_dec_and_spin_lock); -+ -+ void -+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); -+ lockdep_init_map(&lock->dep_map, name, key, 0); -+#endif -+} -+EXPORT_SYMBOL(__rt_spin_lock_init); -+ -+#endif /* PREEMPT_RT_FULL */ -+ -+static inline int -+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, -+ struct rt_mutex_waiter *waiter) -+{ -+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); -+} -+ - /* - * Task blocks on lock. - * -@@ -998,6 +1341,7 @@ static int task_blocks_on_rt_mutex(struc - * Called with lock->wait_lock held. - */ - static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q, - struct rt_mutex *lock) - { - struct rt_mutex_waiter *waiter; -@@ -1027,7 +1371,10 @@ static void mark_wakeup_next_waiter(stru - - raw_spin_unlock_irqrestore(¤t->pi_lock, flags); - -- wake_q_add(wake_q, waiter->task); -+ if (waiter->savestate) -+ wake_q_add(wake_sleeper_q, waiter->task); -+ else -+ wake_q_add(wake_q, waiter->task); - } - - /* -@@ -1109,11 +1456,11 @@ void rt_mutex_adjust_pi(struct task_stru - return; - } - next_lock = waiter->lock; -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); - - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(task); - -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); - rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, - next_lock, NULL, task); - } -@@ -1199,9 +1546,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, - struct rt_mutex_waiter waiter; - int ret = 0; - -- debug_rt_mutex_init_waiter(&waiter); -- RB_CLEAR_NODE(&waiter.pi_tree_entry); -- RB_CLEAR_NODE(&waiter.tree_entry); -+ rt_mutex_init_waiter(&waiter, false); - - raw_spin_lock(&lock->wait_lock); - -@@ -1286,7 +1631,8 @@ static inline int rt_mutex_slowtrylock(s - * Return whether the current task needs to undo a potential priority boosting. - */ - static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, -- struct wake_q_head *wake_q) -+ struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q) - { - raw_spin_lock(&lock->wait_lock); - -@@ -1339,7 +1685,7 @@ static bool __sched rt_mutex_slowunlock( - * - * Queue the next waiter for wakeup once we release the wait_lock. - */ -- mark_wakeup_next_waiter(wake_q, lock); -+ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock); - - raw_spin_unlock(&lock->wait_lock); - -@@ -1396,17 +1742,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo - static inline void - rt_mutex_fastunlock(struct rt_mutex *lock, - bool (*slowfn)(struct rt_mutex *lock, -- struct wake_q_head *wqh)) -+ struct wake_q_head *wqh, -+ struct wake_q_head *wq_sleeper)) - { - WAKE_Q(wake_q); -+ WAKE_Q(wake_sleeper_q); - - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { - rt_mutex_deadlock_account_unlock(current); - - } else { -- bool deboost = slowfn(lock, &wake_q); -+ bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q); - - wake_up_q(&wake_q); -+ wake_up_q_sleeper(&wake_sleeper_q); - - /* Undo pi boosting if necessary: */ - if (deboost) -@@ -1543,13 +1892,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock); - * required or not. - */ - bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, -- struct wake_q_head *wqh) -+ struct wake_q_head *wqh, -+ struct wake_q_head *wq_sleeper) - { - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { - rt_mutex_deadlock_account_unlock(current); - return false; - } -- return rt_mutex_slowunlock(lock, wqh); -+ return rt_mutex_slowunlock(lock, wqh, wq_sleeper); - } - - /** -@@ -1582,13 +1932,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); - void __rt_mutex_init(struct rt_mutex *lock, const char *name) - { - lock->owner = NULL; -- raw_spin_lock_init(&lock->wait_lock); - lock->waiters = RB_ROOT; - lock->waiters_leftmost = NULL; - - debug_rt_mutex_init(lock, name); - } --EXPORT_SYMBOL_GPL(__rt_mutex_init); -+EXPORT_SYMBOL(__rt_mutex_init); - - /** - * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a -@@ -1603,7 +1952,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); - void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner) - { -- __rt_mutex_init(lock, NULL); -+ rt_mutex_init(lock); - debug_rt_mutex_proxy_lock(lock, proxy_owner); - rt_mutex_set_owner(lock, proxy_owner); - rt_mutex_deadlock_account_lock(lock, proxy_owner); -@@ -1765,3 +2114,25 @@ int rt_mutex_finish_proxy_lock(struct rt - - return ret; - } -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+struct ww_mutex { -+}; -+struct ww_acquire_ctx { -+}; -+int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -+{ -+ BUG(); -+} -+EXPORT_SYMBOL_GPL(__ww_mutex_lock); -+int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -+{ -+ BUG(); -+} -+EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); -+void __sched ww_mutex_unlock(struct ww_mutex *lock) -+{ -+ BUG(); -+} -+EXPORT_SYMBOL_GPL(ww_mutex_unlock); -+#endif ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -27,6 +27,7 @@ struct rt_mutex_waiter { - struct rb_node pi_tree_entry; - struct task_struct *task; - struct rt_mutex *lock; -+ bool savestate; - #ifdef CONFIG_DEBUG_RT_MUTEXES - unsigned long ip; - struct pid *deadlock_task_pid; -@@ -113,7 +114,8 @@ extern int rt_mutex_finish_proxy_lock(st - struct rt_mutex_waiter *waiter); - extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); - extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, -- struct wake_q_head *wqh); -+ struct wake_q_head *wqh, -+ struct wake_q_head *wq_sleeper); - extern void rt_mutex_adjust_prio(struct task_struct *task); - - #ifdef CONFIG_DEBUG_RT_MUTEXES -@@ -122,4 +124,14 @@ extern void rt_mutex_adjust_prio(struct - # include "rtmutex.h" - #endif - -+static inline void -+rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) -+{ -+ debug_rt_mutex_init_waiter(waiter); -+ waiter->task = NULL; -+ waiter->savestate = savestate; -+ RB_CLEAR_NODE(&waiter->pi_tree_entry); -+ RB_CLEAR_NODE(&waiter->tree_entry); -+} -+ - #endif ---- a/kernel/locking/spinlock.c -+++ b/kernel/locking/spinlock.c -@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(loc - * __[spin|read|write]_lock_bh() - */ - BUILD_LOCK_OPS(spin, raw_spinlock); -+ -+#ifndef CONFIG_PREEMPT_RT_FULL - BUILD_LOCK_OPS(read, rwlock); - BUILD_LOCK_OPS(write, rwlock); -+#endif - - #endif - -@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_ - EXPORT_SYMBOL(_raw_spin_unlock_bh); - #endif - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #ifndef CONFIG_INLINE_READ_TRYLOCK - int __lockfunc _raw_read_trylock(rwlock_t *lock) - { -@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwl - EXPORT_SYMBOL(_raw_write_unlock_bh); - #endif - -+#endif /* !PREEMPT_RT_FULL */ -+ - #ifdef CONFIG_DEBUG_LOCK_ALLOC - - void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) ---- a/kernel/locking/spinlock_debug.c -+++ b/kernel/locking/spinlock_debug.c -@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t - - EXPORT_SYMBOL(__raw_spin_lock_init); - -+#ifndef CONFIG_PREEMPT_RT_FULL - void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key) - { -@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const - } - - EXPORT_SYMBOL(__rwlock_init); -+#endif - - static void spin_dump(raw_spinlock_t *lock, const char *msg) - { -@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t * - arch_spin_unlock(&lock->raw_lock); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - static void rwlock_bug(rwlock_t *lock, const char *msg) - { - if (!debug_locks_off()) -@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock) - debug_write_unlock(lock); - arch_write_unlock(&lock->raw_lock); - } -+ -+#endif ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -547,7 +547,7 @@ void wake_q_add(struct wake_q_head *head - head->lastp = &node->next; - } - --void wake_up_q(struct wake_q_head *head) -+void __wake_up_q(struct wake_q_head *head, bool sleeper) - { - struct wake_q_node *node = head->first; - -@@ -564,7 +564,10 @@ void wake_up_q(struct wake_q_head *head) - * wake_up_process() implies a wmb() to pair with the queueing - * in wake_q_add() so as not to miss wakeups. - */ -- wake_up_process(task); -+ if (sleeper) -+ wake_up_lock_sleeper(task); -+ else -+ wake_up_process(task); - put_task_struct(task); - } - } diff --git a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch deleted file mode 100644 index 2e87f33f8..000000000 --- a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch +++ /dev/null @@ -1,129 +0,0 @@ -Subject: rt: Introduce cpu_chill() -From: Thomas Gleixner -Date: Wed, 07 Mar 2012 20:51:03 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Retry loops on RT might loop forever when the modifying side was -preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() -defaults to cpu_relax() for non RT. On RT it puts the looping task to -sleep for a tick so the preempted task can make progress. - -Steven Rostedt changed it to use a hrtimer instead of msleep(): -| -|Ulrich Obergfell pointed out that cpu_chill() calls msleep() which is woken -|up by the ksoftirqd running the TIMER softirq. But as the cpu_chill() is -|called from softirq context, it may block the ksoftirqd() from running, in -|which case, it may never wake up the msleep() causing the deadlock. -| -|I checked the vmcore, and irq/74-qla2xxx is stuck in the msleep() call, -|running on CPU 8. The one ksoftirqd that is stuck, happens to be the one that -|runs on CPU 8, and it is blocked on a lock held by irq/74-qla2xxx. As that -|ksoftirqd is the one that will wake up irq/74-qla2xxx, and it happens to be -|blocked on a lock that irq/74-qla2xxx holds, we have our deadlock. -| -|The solution is not to convert the cpu_chill() back to a cpu_relax() as that -|will re-create a possible live lock that the cpu_chill() fixed earlier, and may -|also leave this bug open on other softirqs. The fix is to remove the -|dependency on ksoftirqd from cpu_chill(). That is, instead of calling -|msleep() that requires ksoftirqd to wake it up, use the -|hrtimer_nanosleep() code that does the wakeup from hard irq context. -| -||Looks to be the lock of the block softirq. I don't have the core dump -||anymore, but from what I could tell the ksoftirqd was blocked on the -||block softirq lock, where the block softirq handler did a msleep -||(called by the qla2xxx interrupt handler). -|| -||Looking at trigger_softirq() in block/blk-softirq.c, it can do a -||smp_callfunction() to another cpu to run the block softirq. If that -||happens to be the cpu where the qla2xx irq handler is doing the block -||softirq and is in a middle of a msleep(), I believe the ksoftirqd will -||try to run the softirq. If it does that, then BOOM, it's deadlocked -||because the ksoftirqd will never run the timer softirq either. -| -||I should have also stated that it was only one lock that was involved. -||But the lock owner was doing a msleep() that requires a wakeup by -||ksoftirqd to continue. If ksoftirqd happens to be blocked on a lock -||held by the msleep() caller, then you have your deadlock. -|| -||It's best not to have any softirqs going to sleep requiring another -||softirq to wake it up. Note, if we ever require a timer softirq to do a -||cpu_chill() it will most definitely hit this deadlock. - -+ bigeasy: add PF_NOFREEZE: -| [....] Waiting for /dev to be fully populated... -| ===================================== -| [ BUG: udevd/229 still has locks held! ] -| 3.12.11-rt17 #23 Not tainted -| ------------------------------------- -| 1 lock held by udevd/229: -| #0: (&type->i_mutex_dir_key#2){+.+.+.}, at: lookup_slow+0x28/0x98 -| -| stack backtrace: -| CPU: 0 PID: 229 Comm: udevd Not tainted 3.12.11-rt17 #23 -| (unwind_backtrace+0x0/0xf8) from (show_stack+0x10/0x14) -| (show_stack+0x10/0x14) from (dump_stack+0x74/0xbc) -| (dump_stack+0x74/0xbc) from (do_nanosleep+0x120/0x160) -| (do_nanosleep+0x120/0x160) from (hrtimer_nanosleep+0x90/0x110) -| (hrtimer_nanosleep+0x90/0x110) from (cpu_chill+0x30/0x38) -| (cpu_chill+0x30/0x38) from (dentry_kill+0x158/0x1ec) -| (dentry_kill+0x158/0x1ec) from (dput+0x74/0x15c) -| (dput+0x74/0x15c) from (lookup_real+0x4c/0x50) -| (lookup_real+0x4c/0x50) from (__lookup_hash+0x34/0x44) -| (__lookup_hash+0x34/0x44) from (lookup_slow+0x38/0x98) -| (lookup_slow+0x38/0x98) from (path_lookupat+0x208/0x7fc) -| (path_lookupat+0x208/0x7fc) from (filename_lookup+0x20/0x60) -| (filename_lookup+0x20/0x60) from (user_path_at_empty+0x50/0x7c) -| (user_path_at_empty+0x50/0x7c) from (user_path_at+0x14/0x1c) -| (user_path_at+0x14/0x1c) from (vfs_fstatat+0x48/0x94) -| (vfs_fstatat+0x48/0x94) from (SyS_stat64+0x14/0x30) -| (SyS_stat64+0x14/0x30) from (ret_fast_syscall+0x0/0x48) - -Signed-off-by: Thomas Gleixner -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/delay.h | 6 ++++++ - kernel/time/hrtimer.c | 19 +++++++++++++++++++ - 2 files changed, 25 insertions(+) - ---- a/include/linux/delay.h -+++ b/include/linux/delay.h -@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int s - msleep(seconds * 1000); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void cpu_chill(void); -+#else -+# define cpu_chill() cpu_relax() -+#endif -+ - #endif /* defined(_LINUX_DELAY_H) */ ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -1775,6 +1775,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp - return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * Sleep for 1 ms in hope whoever holds what we want will let it go. -+ */ -+void cpu_chill(void) -+{ -+ struct timespec tu = { -+ .tv_nsec = NSEC_PER_MSEC, -+ }; -+ unsigned int freeze_flag = current->flags & PF_NOFREEZE; -+ -+ current->flags |= PF_NOFREEZE; -+ hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); -+ if (!freeze_flag) -+ current->flags &= ~PF_NOFREEZE; -+} -+EXPORT_SYMBOL(cpu_chill); -+#endif -+ - /* - * Functions related to boot-time initialization: - */ diff --git a/debian/patches/features/all/rt/rt-local-irq-lock.patch b/debian/patches/features/all/rt/rt-local-irq-lock.patch deleted file mode 100644 index 5dff93f72..000000000 --- a/debian/patches/features/all/rt/rt-local-irq-lock.patch +++ /dev/null @@ -1,324 +0,0 @@ -Subject: rt: Add local irq locks -From: Thomas Gleixner -Date: Mon, 20 Jun 2011 09:03:47 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Introduce locallock. For !RT this maps to preempt_disable()/ -local_irq_disable() so there is not much that changes. For RT this will -map to a spinlock. This makes preemption possible and locked "ressource" -gets the lockdep anotation it wouldn't have otherwise. The locks are -recursive for owner == current. Also, all locks user migrate_disable() -which ensures that the task is not migrated to another CPU while the lock -is held and the owner is preempted. - -Signed-off-by: Thomas Gleixner ---- - include/linux/locallock.h | 264 ++++++++++++++++++++++++++++++++++++++++++++++ - include/linux/percpu.h | 29 +++++ - 2 files changed, 293 insertions(+) - ---- /dev/null -+++ b/include/linux/locallock.h -@@ -0,0 +1,264 @@ -+#ifndef _LINUX_LOCALLOCK_H -+#define _LINUX_LOCALLOCK_H -+ -+#include -+#include -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ -+#ifdef CONFIG_DEBUG_SPINLOCK -+# define LL_WARN(cond) WARN_ON(cond) -+#else -+# define LL_WARN(cond) do { } while (0) -+#endif -+ -+/* -+ * per cpu lock based substitute for local_irq_*() -+ */ -+struct local_irq_lock { -+ spinlock_t lock; -+ struct task_struct *owner; -+ int nestcnt; -+ unsigned long flags; -+}; -+ -+#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ -+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ -+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } -+ -+#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ -+ DECLARE_PER_CPU(struct local_irq_lock, lvar) -+ -+#define local_irq_lock_init(lvar) \ -+ do { \ -+ int __cpu; \ -+ for_each_possible_cpu(__cpu) \ -+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ -+ } while (0) -+ -+/* -+ * spin_lock|trylock|unlock_local flavour that does not migrate disable -+ * used for __local_lock|trylock|unlock where get_local_var/put_local_var -+ * already takes care of the migrate_disable/enable -+ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. -+ */ -+# define spin_lock_local(lock) spin_lock(lock) -+# define spin_trylock_local(lock) spin_trylock(lock) -+# define spin_unlock_local(lock) spin_unlock(lock) -+ -+static inline void __local_lock(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current) { -+ spin_lock_local(&lv->lock); -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ } -+ lv->nestcnt++; -+} -+ -+#define local_lock(lvar) \ -+ do { __local_lock(&get_local_var(lvar)); } while (0) -+ -+static inline int __local_trylock(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current && spin_trylock_local(&lv->lock)) { -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ lv->nestcnt = 1; -+ return 1; -+ } -+ return 0; -+} -+ -+#define local_trylock(lvar) \ -+ ({ \ -+ int __locked; \ -+ __locked = __local_trylock(&get_local_var(lvar)); \ -+ if (!__locked) \ -+ put_local_var(lvar); \ -+ __locked; \ -+ }) -+ -+static inline void __local_unlock(struct local_irq_lock *lv) -+{ -+ LL_WARN(lv->nestcnt == 0); -+ LL_WARN(lv->owner != current); -+ if (--lv->nestcnt) -+ return; -+ -+ lv->owner = NULL; -+ spin_unlock_local(&lv->lock); -+} -+ -+#define local_unlock(lvar) \ -+ do { \ -+ __local_unlock(this_cpu_ptr(&lvar)); \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+static inline void __local_lock_irq(struct local_irq_lock *lv) -+{ -+ spin_lock_irqsave(&lv->lock, lv->flags); -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ lv->nestcnt = 1; -+} -+ -+#define local_lock_irq(lvar) \ -+ do { __local_lock_irq(&get_local_var(lvar)); } while (0) -+ -+#define local_lock_irq_on(lvar, cpu) \ -+ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) -+ -+static inline void __local_unlock_irq(struct local_irq_lock *lv) -+{ -+ LL_WARN(!lv->nestcnt); -+ LL_WARN(lv->owner != current); -+ lv->owner = NULL; -+ lv->nestcnt = 0; -+ spin_unlock_irq(&lv->lock); -+} -+ -+#define local_unlock_irq(lvar) \ -+ do { \ -+ __local_unlock_irq(this_cpu_ptr(&lvar)); \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_irq_on(lvar, cpu) \ -+ do { \ -+ __local_unlock_irq(&per_cpu(lvar, cpu)); \ -+ } while (0) -+ -+static inline int __local_lock_irqsave(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current) { -+ __local_lock_irq(lv); -+ return 0; -+ } else { -+ lv->nestcnt++; -+ return 1; -+ } -+} -+ -+#define local_lock_irqsave(lvar, _flags) \ -+ do { \ -+ if (__local_lock_irqsave(&get_local_var(lvar))) \ -+ put_local_var(lvar); \ -+ _flags = __this_cpu_read(lvar.flags); \ -+ } while (0) -+ -+#define local_lock_irqsave_on(lvar, _flags, cpu) \ -+ do { \ -+ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ -+ _flags = per_cpu(lvar, cpu).flags; \ -+ } while (0) -+ -+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, -+ unsigned long flags) -+{ -+ LL_WARN(!lv->nestcnt); -+ LL_WARN(lv->owner != current); -+ if (--lv->nestcnt) -+ return 0; -+ -+ lv->owner = NULL; -+ spin_unlock_irqrestore(&lv->lock, lv->flags); -+ return 1; -+} -+ -+#define local_unlock_irqrestore(lvar, flags) \ -+ do { \ -+ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_irqrestore_on(lvar, flags, cpu) \ -+ do { \ -+ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ -+ } while (0) -+ -+#define local_spin_trylock_irq(lvar, lock) \ -+ ({ \ -+ int __locked; \ -+ local_lock_irq(lvar); \ -+ __locked = spin_trylock(lock); \ -+ if (!__locked) \ -+ local_unlock_irq(lvar); \ -+ __locked; \ -+ }) -+ -+#define local_spin_lock_irq(lvar, lock) \ -+ do { \ -+ local_lock_irq(lvar); \ -+ spin_lock(lock); \ -+ } while (0) -+ -+#define local_spin_unlock_irq(lvar, lock) \ -+ do { \ -+ spin_unlock(lock); \ -+ local_unlock_irq(lvar); \ -+ } while (0) -+ -+#define local_spin_lock_irqsave(lvar, lock, flags) \ -+ do { \ -+ local_lock_irqsave(lvar, flags); \ -+ spin_lock(lock); \ -+ } while (0) -+ -+#define local_spin_unlock_irqrestore(lvar, lock, flags) \ -+ do { \ -+ spin_unlock(lock); \ -+ local_unlock_irqrestore(lvar, flags); \ -+ } while (0) -+ -+#define get_locked_var(lvar, var) \ -+ (*({ \ -+ local_lock(lvar); \ -+ this_cpu_ptr(&var); \ -+ })) -+ -+#define put_locked_var(lvar, var) local_unlock(lvar); -+ -+#define local_lock_cpu(lvar) \ -+ ({ \ -+ local_lock(lvar); \ -+ smp_processor_id(); \ -+ }) -+ -+#define local_unlock_cpu(lvar) local_unlock(lvar) -+ -+#else /* PREEMPT_RT_BASE */ -+ -+#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar -+#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar -+ -+static inline void local_irq_lock_init(int lvar) { } -+ -+#define local_lock(lvar) preempt_disable() -+#define local_unlock(lvar) preempt_enable() -+#define local_lock_irq(lvar) local_irq_disable() -+#define local_unlock_irq(lvar) local_irq_enable() -+#define local_lock_irqsave(lvar, flags) local_irq_save(flags) -+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) -+ -+#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) -+#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) -+#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) -+#define local_spin_lock_irqsave(lvar, lock, flags) \ -+ spin_lock_irqsave(lock, flags) -+#define local_spin_unlock_irqrestore(lvar, lock, flags) \ -+ spin_unlock_irqrestore(lock, flags) -+ -+#define get_locked_var(lvar, var) get_cpu_var(var) -+#define put_locked_var(lvar, var) put_cpu_var(var) -+ -+#define local_lock_cpu(lvar) get_cpu() -+#define local_unlock_cpu(lvar) put_cpu() -+ -+#endif -+ -+#endif ---- a/include/linux/percpu.h -+++ b/include/linux/percpu.h -@@ -24,6 +24,35 @@ - PERCPU_MODULE_RESERVE) - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+#define get_local_var(var) (*({ \ -+ migrate_disable(); \ -+ this_cpu_ptr(&var); })) -+ -+#define put_local_var(var) do { \ -+ (void)&(var); \ -+ migrate_enable(); \ -+} while (0) -+ -+# define get_local_ptr(var) ({ \ -+ migrate_disable(); \ -+ this_cpu_ptr(var); }) -+ -+# define put_local_ptr(var) do { \ -+ (void)(var); \ -+ migrate_enable(); \ -+} while (0) -+ -+#else -+ -+#define get_local_var(var) get_cpu_var(var) -+#define put_local_var(var) put_cpu_var(var) -+#define get_local_ptr(var) get_cpu_ptr(var) -+#define put_local_ptr(var) put_cpu_ptr(var) -+ -+#endif -+ - /* minimum unit size, also is the maximum supported allocation size */ - #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) - diff --git a/debian/patches/features/all/rt/rt-preempt-base-config.patch b/debian/patches/features/all/rt/rt-preempt-base-config.patch deleted file mode 100644 index 7460614dc..000000000 --- a/debian/patches/features/all/rt/rt-preempt-base-config.patch +++ /dev/null @@ -1,54 +0,0 @@ -Subject: rt: Provide PREEMPT_RT_BASE config switch -From: Thomas Gleixner -Date: Fri, 17 Jun 2011 12:39:57 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Introduce PREEMPT_RT_BASE which enables parts of -PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT -substitutions for testing. - -Signed-off-by: Thomas Gleixner ---- - kernel/Kconfig.preempt | 19 +++++++++++++++++-- - 1 file changed, 17 insertions(+), 2 deletions(-) - ---- a/kernel/Kconfig.preempt -+++ b/kernel/Kconfig.preempt -@@ -1,3 +1,10 @@ -+config PREEMPT -+ bool -+ select PREEMPT_COUNT -+ -+config PREEMPT_RT_BASE -+ bool -+ select PREEMPT - - choice - prompt "Preemption Model" -@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY - - Select this if you are building a kernel for a desktop system. - --config PREEMPT -+config PREEMPT__LL - bool "Preemptible Kernel (Low-Latency Desktop)" -- select PREEMPT_COUNT -+ select PREEMPT - select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK - help - This option reduces the latency of the kernel by making -@@ -52,6 +59,14 @@ config PREEMPT - embedded system with latency requirements in the milliseconds - range. - -+config PREEMPT_RTB -+ bool "Preemptible Kernel (Basic RT)" -+ select PREEMPT_RT_BASE -+ help -+ This option is basically the same as (Low-Latency Desktop) but -+ enables changes which are preliminary for the full preemptible -+ RT kernel. -+ - endchoice - - config PREEMPT_COUNT diff --git a/debian/patches/features/all/rt/rt-serial-warn-fix.patch b/debian/patches/features/all/rt/rt-serial-warn-fix.patch deleted file mode 100644 index 4af3008be..000000000 --- a/debian/patches/features/all/rt/rt-serial-warn-fix.patch +++ /dev/null @@ -1,38 +0,0 @@ -Subject: rt: Improve the serial console PASS_LIMIT -From: Ingo Molnar -Date: Wed Dec 14 13:05:54 CET 2011 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Beyond the warning: - - drivers/tty/serial/8250/8250.c:1613:6: warning: unused variable ‘pass_counter’ [-Wunused-variable] - -the solution of just looping infinitely was ugly - up it to 1 million to -give it a chance to continue in some really ugly situation. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - drivers/tty/serial/8250/8250_core.c | 11 ++++++++++- - 1 file changed, 10 insertions(+), 1 deletion(-) - ---- a/drivers/tty/serial/8250/8250_core.c -+++ b/drivers/tty/serial/8250/8250_core.c -@@ -58,7 +58,16 @@ static struct uart_driver serial8250_reg - - static unsigned int skip_txen_test; /* force skip of txen test at init time */ - --#define PASS_LIMIT 512 -+/* -+ * On -rt we can have a more delays, and legitimately -+ * so - so don't drop work spuriously and spam the -+ * syslog: -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define PASS_LIMIT 1000000 -+#else -+# define PASS_LIMIT 512 -+#endif - - #include - /* diff --git a/debian/patches/features/all/rt/rtmutex-Use-chainwalking-control-enum.patch b/debian/patches/features/all/rt/rtmutex-Use-chainwalking-control-enum.patch deleted file mode 100644 index 5a97e49a9..000000000 --- a/debian/patches/features/all/rt/rtmutex-Use-chainwalking-control-enum.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: "bmouring@ni.com" -Date: Tue, 15 Dec 2015 17:07:30 -0600 -Subject: rtmutex: Use chainwalking control enum -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -In 8930ed80 (rtmutex: Cleanup deadlock detector debug logic), -chainwalking control enums were introduced to limit the deadlock -detection logic. One of the calls to task_blocks_on_rt_mutex was -missed when converting to use the enums. - -Cc: Thomas Gleixner -Signed-off-by: Brad Mouring -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1020,7 +1020,7 @@ static void noinline __sched rt_spin_lo - __set_current_state_no_track(TASK_UNINTERRUPTIBLE); - pi_unlock(&self->pi_lock); - -- ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0); -+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK); - BUG_ON(ret); - - for (;;) { diff --git a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch b/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch deleted file mode 100644 index 3e5b27348..000000000 --- a/debian/patches/features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch +++ /dev/null @@ -1,424 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 28 Oct 2013 09:36:37 +0100 -Subject: rtmutex: Add RT aware ww locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -lockdep says: -| -------------------------------------------------------------------------- -| | Wound/wait tests | -| --------------------- -| ww api failures: ok | ok | ok | -| ww contexts mixing: ok | ok | -| finishing ww context: ok | ok | ok | ok | -| locking mismatches: ok | ok | ok | -| EDEADLK handling: ok | ok | ok | ok | ok | ok | ok | ok | ok | ok | -| spinlock nest unlocked: ok | -| ----------------------------------------------------- -| |block | try |context| -| ----------------------------------------------------- -| context: ok | ok | ok | -| try: ok | ok | ok | -| block: ok | ok | ok | -| spinlock: ok | ok | ok | - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 251 ++++++++++++++++++++++++++++++++++++++++++----- - 1 file changed, 226 insertions(+), 25 deletions(-) - ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - - #include "rtmutex_common.h" - -@@ -1221,6 +1222,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init); - - #endif /* PREEMPT_RT_FULL */ - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ static inline int __sched -+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); -+ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); -+ -+ if (!hold_ctx) -+ return 0; -+ -+ if (unlikely(ctx == hold_ctx)) -+ return -EALREADY; -+ -+ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && -+ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { -+#ifdef CONFIG_DEBUG_MUTEXES -+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock); -+ ctx->contending_lock = ww; -+#endif -+ return -EDEADLK; -+ } -+ -+ return 0; -+} -+#else -+ static inline int __sched -+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+ BUG(); -+ return 0; -+} -+ -+#endif -+ - static inline int - try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) -@@ -1478,7 +1513,8 @@ void rt_mutex_adjust_pi(struct task_stru - static int __sched - __rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- struct rt_mutex_waiter *waiter) -+ struct rt_mutex_waiter *waiter, -+ struct ww_acquire_ctx *ww_ctx) - { - int ret = 0; - -@@ -1501,6 +1537,12 @@ static int __sched - break; - } - -+ if (ww_ctx && ww_ctx->acquired > 0) { -+ ret = __mutex_lock_check_stamp(lock, ww_ctx); -+ if (ret) -+ break; -+ } -+ - raw_spin_unlock(&lock->wait_lock); - - debug_rt_mutex_print_deadlock(waiter); -@@ -1535,13 +1577,90 @@ static void rt_mutex_handle_deadlock(int - } - } - -+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+#ifdef CONFIG_DEBUG_MUTEXES -+ /* -+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire, -+ * but released with a normal mutex_unlock in this call. -+ * -+ * This should never happen, always use ww_mutex_unlock. -+ */ -+ DEBUG_LOCKS_WARN_ON(ww->ctx); -+ -+ /* -+ * Not quite done after calling ww_acquire_done() ? -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); -+ -+ if (ww_ctx->contending_lock) { -+ /* -+ * After -EDEADLK you tried to -+ * acquire a different ww_mutex? Bad! -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); -+ -+ /* -+ * You called ww_mutex_lock after receiving -EDEADLK, -+ * but 'forgot' to unlock everything else first? -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); -+ ww_ctx->contending_lock = NULL; -+ } -+ -+ /* -+ * Naughty, using a different class will lead to undefined behavior! -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); -+#endif -+ ww_ctx->acquired++; -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void ww_mutex_account_lock(struct rt_mutex *lock, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); -+ struct rt_mutex_waiter *waiter, *n; -+ -+ /* -+ * This branch gets optimized out for the common case, -+ * and is only important for ww_mutex_lock. -+ */ -+ ww_mutex_lock_acquired(ww, ww_ctx); -+ ww->ctx = ww_ctx; -+ -+ /* -+ * Give any possible sleeping processes the chance to wake up, -+ * so they can recheck if they have to back off. -+ */ -+ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters, -+ tree_entry) { -+ /* XXX debug rt mutex waiter wakeup */ -+ -+ BUG_ON(waiter->lock != lock); -+ rt_mutex_wake_waiter(waiter); -+ } -+} -+ -+#else -+ -+static void ww_mutex_account_lock(struct rt_mutex *lock, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+ BUG(); -+} -+#endif -+ - /* - * Slow path lock function: - */ - static int __sched - rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx) - { - struct rt_mutex_waiter waiter; - int ret = 0; -@@ -1552,6 +1671,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, - - /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock, current, NULL)) { -+ if (ww_ctx) -+ ww_mutex_account_lock(lock, ww_ctx); - raw_spin_unlock(&lock->wait_lock); - return 0; - } -@@ -1566,13 +1687,23 @@ rt_mutex_slowlock(struct rt_mutex *lock, - - if (likely(!ret)) - /* sleep on the mutex */ -- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); -+ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, -+ ww_ctx); -+ else if (ww_ctx) { -+ /* ww_mutex received EDEADLK, let it become EALREADY */ -+ ret = __mutex_lock_check_stamp(lock, ww_ctx); -+ BUG_ON(!ret); -+ } - - if (unlikely(ret)) { - __set_current_state(TASK_RUNNING); - if (rt_mutex_has_waiters(lock)) - remove_waiter(lock, &waiter); -- rt_mutex_handle_deadlock(ret, chwalk, &waiter); -+ /* ww_mutex want to report EDEADLK/EALREADY, let them */ -+ if (!ww_ctx) -+ rt_mutex_handle_deadlock(ret, chwalk, &waiter); -+ } else if (ww_ctx) { -+ ww_mutex_account_lock(lock, ww_ctx); - } - - /* -@@ -1701,31 +1832,36 @@ static bool __sched rt_mutex_slowunlock( - */ - static inline int - rt_mutex_fastlock(struct rt_mutex *lock, int state, -+ struct ww_acquire_ctx *ww_ctx, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk)) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx)) - { - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { - rt_mutex_deadlock_account_lock(lock, current); - return 0; - } else -- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); -+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, -+ ww_ctx); - } - - static inline int - rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk)) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx)) - { - if (chwalk == RT_MUTEX_MIN_CHAINWALK && - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { - rt_mutex_deadlock_account_lock(lock, current); - return 0; - } else -- return slowfn(lock, state, timeout, chwalk); -+ return slowfn(lock, state, timeout, chwalk, ww_ctx); - } - - static inline int -@@ -1772,7 +1908,7 @@ void __sched rt_mutex_lock(struct rt_mut - { - might_sleep(); - -- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); -+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock); - } - EXPORT_SYMBOL_GPL(rt_mutex_lock); - -@@ -1789,7 +1925,7 @@ int __sched rt_mutex_lock_interruptible( - { - might_sleep(); - -- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); -+ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock); - } - EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); - -@@ -1802,7 +1938,7 @@ int rt_mutex_timed_futex_lock(struct rt_ - might_sleep(); - - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, -- RT_MUTEX_FULL_CHAINWALK, -+ RT_MUTEX_FULL_CHAINWALK, NULL, - rt_mutex_slowlock); - } - -@@ -1821,7 +1957,7 @@ int __sched rt_mutex_lock_killable(struc - { - might_sleep(); - -- return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock); -+ return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock); - } - EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); - -@@ -1845,6 +1981,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc - - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - RT_MUTEX_MIN_CHAINWALK, -+ NULL, - rt_mutex_slowlock); - } - EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); -@@ -2099,7 +2236,7 @@ int rt_mutex_finish_proxy_lock(struct rt - set_current_state(TASK_INTERRUPTIBLE); - - /* sleep on the mutex */ -- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); -+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); - - if (unlikely(ret)) - remove_waiter(lock, waiter); -@@ -2115,24 +2252,88 @@ int rt_mutex_finish_proxy_lock(struct rt - return ret; - } - --#ifdef CONFIG_PREEMPT_RT_FULL --struct ww_mutex { --}; --struct ww_acquire_ctx { --}; --int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -+static inline int -+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) - { -- BUG(); -+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH -+ unsigned tmp; -+ -+ if (ctx->deadlock_inject_countdown-- == 0) { -+ tmp = ctx->deadlock_inject_interval; -+ if (tmp > UINT_MAX/4) -+ tmp = UINT_MAX; -+ else -+ tmp = tmp*2 + tmp + tmp/2; -+ -+ ctx->deadlock_inject_interval = tmp; -+ ctx->deadlock_inject_countdown = tmp; -+ ctx->contending_lock = lock; -+ -+ ww_mutex_unlock(lock); -+ -+ return -EDEADLK; -+ } -+#endif -+ -+ return 0; - } --EXPORT_SYMBOL_GPL(__ww_mutex_lock); --int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+int __sched -+__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) - { -- BUG(); -+ int ret; -+ -+ might_sleep(); -+ -+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); -+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); -+ if (ret) -+ mutex_release(&lock->base.dep_map, 1, _RET_IP_); -+ else if (!ret && ww_ctx->acquired > 1) -+ return ww_mutex_deadlock_injection(lock, ww_ctx); -+ -+ return ret; - } - EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); -+ -+int __sched -+__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -+{ -+ int ret; -+ -+ might_sleep(); -+ -+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); -+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); -+ if (ret) -+ mutex_release(&lock->base.dep_map, 1, _RET_IP_); -+ else if (!ret && ww_ctx->acquired > 1) -+ return ww_mutex_deadlock_injection(lock, ww_ctx); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(__ww_mutex_lock); -+ - void __sched ww_mutex_unlock(struct ww_mutex *lock) - { -- BUG(); -+ int nest = !!lock->ctx; -+ -+ /* -+ * The unlocking fastpath is the 0->1 transition from 'locked' -+ * into 'unlocked' state: -+ */ -+ if (nest) { -+#ifdef CONFIG_DEBUG_MUTEXES -+ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); -+#endif -+ if (lock->ctx->acquired > 0) -+ lock->ctx->acquired--; -+ lock->ctx = NULL; -+ } -+ -+ mutex_release(&lock->base.dep_map, nest, _RET_IP_); -+ rt_mutex_unlock(&lock->base.lock); - } --EXPORT_SYMBOL_GPL(ww_mutex_unlock); -+EXPORT_SYMBOL(ww_mutex_unlock); - #endif diff --git a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch deleted file mode 100644 index a55e6c0cb..000000000 --- a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch +++ /dev/null @@ -1,24 +0,0 @@ -Subject: rtmutex: Avoid include hell -From: Thomas Gleixner -Date: Wed, 29 Jun 2011 20:06:39 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Include only the required raw types. This avoids pulling in the -complete spinlock header which in turn requires rtmutex.h at some point. - -Signed-off-by: Thomas Gleixner ---- - include/linux/rtmutex.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/include/linux/rtmutex.h -+++ b/include/linux/rtmutex.h -@@ -14,7 +14,7 @@ - - #include - #include --#include -+#include - - extern int max_lock_depth; /* for sysctl */ - diff --git a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch deleted file mode 100644 index c7e99ebf5..000000000 --- a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch +++ /dev/null @@ -1,239 +0,0 @@ -Subject: rtmutex: Handle the various new futex race conditions -From: Thomas Gleixner -Date: Fri, 10 Jun 2011 11:04:15 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -RT opens a few new interesting race conditions in the rtmutex/futex -combo due to futex hash bucket lock being a 'sleeping' spinlock and -therefor not disabling preemption. - -Signed-off-by: Thomas Gleixner ---- - kernel/futex.c | 77 ++++++++++++++++++++++++++++++++-------- - kernel/locking/rtmutex.c | 36 +++++++++++++++--- - kernel/locking/rtmutex_common.h | 2 + - 3 files changed, 94 insertions(+), 21 deletions(-) - ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -1812,6 +1812,16 @@ static int futex_requeue(u32 __user *uad - requeue_pi_wake_futex(this, &key2, hb2); - drop_count++; - continue; -+ } else if (ret == -EAGAIN) { -+ /* -+ * Waiter was woken by timeout or -+ * signal and has set pi_blocked_on to -+ * PI_WAKEUP_INPROGRESS before we -+ * tried to enqueue it on the rtmutex. -+ */ -+ this->pi_state = NULL; -+ free_pi_state(pi_state); -+ continue; - } else if (ret) { - /* -EDEADLK */ - this->pi_state = NULL; -@@ -2672,7 +2682,7 @@ static int futex_wait_requeue_pi(u32 __u - struct hrtimer_sleeper timeout, *to = NULL; - struct rt_mutex_waiter rt_waiter; - struct rt_mutex *pi_mutex = NULL; -- struct futex_hash_bucket *hb; -+ struct futex_hash_bucket *hb, *hb2; - union futex_key key2 = FUTEX_KEY_INIT; - struct futex_q q = futex_q_init; - int res, ret; -@@ -2731,20 +2741,55 @@ static int futex_wait_requeue_pi(u32 __u - /* Queue the futex_q, drop the hb lock, wait for wakeup. */ - futex_wait_queue_me(hb, &q, to); - -- spin_lock(&hb->lock); -- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -- spin_unlock(&hb->lock); -- if (ret) -- goto out_put_keys; -+ /* -+ * On RT we must avoid races with requeue and trying to block -+ * on two mutexes (hb->lock and uaddr2's rtmutex) by -+ * serializing access to pi_blocked_on with pi_lock. -+ */ -+ raw_spin_lock_irq(¤t->pi_lock); -+ if (current->pi_blocked_on) { -+ /* -+ * We have been requeued or are in the process of -+ * being requeued. -+ */ -+ raw_spin_unlock_irq(¤t->pi_lock); -+ } else { -+ /* -+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS -+ * prevents a concurrent requeue from moving us to the -+ * uaddr2 rtmutex. After that we can safely acquire -+ * (and possibly block on) hb->lock. -+ */ -+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; -+ raw_spin_unlock_irq(¤t->pi_lock); -+ -+ spin_lock(&hb->lock); -+ -+ /* -+ * Clean up pi_blocked_on. We might leak it otherwise -+ * when we succeeded with the hb->lock in the fast -+ * path. -+ */ -+ raw_spin_lock_irq(¤t->pi_lock); -+ current->pi_blocked_on = NULL; -+ raw_spin_unlock_irq(¤t->pi_lock); -+ -+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -+ spin_unlock(&hb->lock); -+ if (ret) -+ goto out_put_keys; -+ } - - /* -- * In order for us to be here, we know our q.key == key2, and since -- * we took the hb->lock above, we also know that futex_requeue() has -- * completed and we no longer have to concern ourselves with a wakeup -- * race with the atomic proxy lock acquisition by the requeue code. The -- * futex_requeue dropped our key1 reference and incremented our key2 -- * reference count. -+ * In order to be here, we have either been requeued, are in -+ * the process of being requeued, or requeue successfully -+ * acquired uaddr2 on our behalf. If pi_blocked_on was -+ * non-null above, we may be racing with a requeue. Do not -+ * rely on q->lock_ptr to be hb2->lock until after blocking on -+ * hb->lock or hb2->lock. The futex_requeue dropped our key1 -+ * reference and incremented our key2 reference count. - */ -+ hb2 = hash_futex(&key2); - - /* Check if the requeue code acquired the second futex for us. */ - if (!q.rt_waiter) { -@@ -2753,9 +2798,10 @@ static int futex_wait_requeue_pi(u32 __u - * did a lock-steal - fix up the PI-state in that case. - */ - if (q.pi_state && (q.pi_state->owner != current)) { -- spin_lock(q.lock_ptr); -+ spin_lock(&hb2->lock); -+ BUG_ON(&hb2->lock != q.lock_ptr); - ret = fixup_pi_state_owner(uaddr2, &q, current); -- spin_unlock(q.lock_ptr); -+ spin_unlock(&hb2->lock); - } - } else { - /* -@@ -2768,7 +2814,8 @@ static int futex_wait_requeue_pi(u32 __u - ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter); - debug_rt_mutex_free_waiter(&rt_waiter); - -- spin_lock(q.lock_ptr); -+ spin_lock(&hb2->lock); -+ BUG_ON(&hb2->lock != q.lock_ptr); - /* - * Fixup the pi_state owner and possibly acquire the lock if we - * haven't already. ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -69,6 +69,11 @@ static void fixup_rt_mutex_waiters(struc - clear_rt_mutex_waiters(lock); - } - -+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) -+{ -+ return waiter && waiter != PI_WAKEUP_INPROGRESS; -+} -+ - /* - * We can speed up the acquire/release, if there's no debugging state to be - * set up. -@@ -355,7 +360,8 @@ int max_lock_depth = 1024; - - static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) - { -- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; -+ return rt_mutex_real_waiter(p->pi_blocked_on) ? -+ p->pi_blocked_on->lock : NULL; - } - - /* -@@ -492,7 +498,7 @@ static int rt_mutex_adjust_prio_chain(st - * reached or the state of the chain has changed while we - * dropped the locks. - */ -- if (!waiter) -+ if (!rt_mutex_real_waiter(waiter)) - goto out_unlock_pi; - - /* -@@ -909,6 +915,23 @@ static int task_blocks_on_rt_mutex(struc - return -EDEADLK; - - raw_spin_lock_irqsave(&task->pi_lock, flags); -+ -+ /* -+ * In the case of futex requeue PI, this will be a proxy -+ * lock. The task will wake unaware that it is enqueueed on -+ * this lock. Avoid blocking on two locks and corrupting -+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS -+ * flag. futex_wait_requeue_pi() sets this when it wakes up -+ * before requeue (due to a signal or timeout). Do not enqueue -+ * the task if PI_WAKEUP_INPROGRESS is set. -+ */ -+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+ return -EAGAIN; -+ } -+ -+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); -+ - __rt_mutex_adjust_prio(task); - waiter->task = task; - waiter->lock = lock; -@@ -932,7 +955,7 @@ static int task_blocks_on_rt_mutex(struc - rt_mutex_enqueue_pi(owner, waiter); - - __rt_mutex_adjust_prio(owner); -- if (owner->pi_blocked_on) -+ if (rt_mutex_real_waiter(owner->pi_blocked_on)) - chain_walk = 1; - } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { - chain_walk = 1; -@@ -1017,7 +1040,7 @@ static void remove_waiter(struct rt_mute - { - bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); - struct task_struct *owner = rt_mutex_owner(lock); -- struct rt_mutex *next_lock; -+ struct rt_mutex *next_lock = NULL; - unsigned long flags; - - raw_spin_lock_irqsave(¤t->pi_lock, flags); -@@ -1042,7 +1065,8 @@ static void remove_waiter(struct rt_mute - __rt_mutex_adjust_prio(owner); - - /* Store the lock on which owner is blocked or NULL */ -- next_lock = task_blocked_on_lock(owner); -+ if (rt_mutex_real_waiter(owner->pi_blocked_on)) -+ next_lock = task_blocked_on_lock(owner); - - raw_spin_unlock_irqrestore(&owner->pi_lock, flags); - -@@ -1078,7 +1102,7 @@ void rt_mutex_adjust_pi(struct task_stru - raw_spin_lock_irqsave(&task->pi_lock, flags); - - waiter = task->pi_blocked_on; -- if (!waiter || (waiter->prio == task->prio && -+ if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio && - !dl_prio(task->prio))) { - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return; ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -97,6 +97,8 @@ enum rtmutex_chainwalk { - /* - * PI-futex support (proxy locking functions, etc.): - */ -+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) -+ - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); - extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner); diff --git a/debian/patches/features/all/rt/rtmutex-lock-killable.patch b/debian/patches/features/all/rt/rtmutex-lock-killable.patch deleted file mode 100644 index 734f66a11..000000000 --- a/debian/patches/features/all/rt/rtmutex-lock-killable.patch +++ /dev/null @@ -1,52 +0,0 @@ -Subject: rtmutex: Add rtmutex_lock_killable() -From: Thomas Gleixner -Date: Thu, 09 Jun 2011 11:43:52 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Add "killable" type to rtmutex. We need this since rtmutex are used as -"normal" mutexes which do use this type. - -Signed-off-by: Thomas Gleixner ---- - include/linux/rtmutex.h | 1 + - kernel/locking/rtmutex.c | 19 +++++++++++++++++++ - 2 files changed, 20 insertions(+) - ---- a/include/linux/rtmutex.h -+++ b/include/linux/rtmutex.h -@@ -91,6 +91,7 @@ extern void rt_mutex_destroy(struct rt_m - - extern void rt_mutex_lock(struct rt_mutex *lock); - extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); -+extern int rt_mutex_lock_killable(struct rt_mutex *lock); - extern int rt_mutex_timed_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout); - ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1458,6 +1458,25 @@ int rt_mutex_timed_futex_lock(struct rt_ - } - - /** -+ * rt_mutex_lock_killable - lock a rt_mutex killable -+ * -+ * @lock: the rt_mutex to be locked -+ * @detect_deadlock: deadlock detection on/off -+ * -+ * Returns: -+ * 0 on success -+ * -EINTR when interrupted by a signal -+ * -EDEADLK when the lock would deadlock (when deadlock detection is on) -+ */ -+int __sched rt_mutex_lock_killable(struct rt_mutex *lock) -+{ -+ might_sleep(); -+ -+ return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock); -+} -+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); -+ -+/** - * rt_mutex_timed_lock - lock a rt_mutex interruptible - * the timeout structure is provided - * by the caller diff --git a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch b/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch deleted file mode 100644 index dc7a4036a..000000000 --- a/debian/patches/features/all/rt/rtmutex-trylock-is-okay-on-RT.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed 02 Dec 2015 11:34:07 +0100 -Subject: rtmutex: trylock is okay on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On --RT we don't run softirqs in IRQ context but in thread context so it is -not a issue here. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1469,7 +1469,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); - */ - int __sched rt_mutex_trylock(struct rt_mutex *lock) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (WARN_ON(in_irq() || in_nmi())) -+#else - if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq())) -+#endif - return 0; - - return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); diff --git a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch b/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch deleted file mode 100644 index f7713d12a..000000000 --- a/debian/patches/features/all/rt/rtmutex_dont_include_rcu.patch +++ /dev/null @@ -1,76 +0,0 @@ -From: Sebastian Andrzej Siewior -Subject: rbtree: don't include the rcu header -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The RCU header pulls in spinlock.h and fails due not yet defined types: - -|In file included from include/linux/spinlock.h:275:0, -| from include/linux/rcupdate.h:38, -| from include/linux/rbtree.h:34, -| from include/linux/rtmutex.h:17, -| from include/linux/spinlock_types.h:18, -| from kernel/bounds.c:13: -|include/linux/rwlock_rt.h:16:38: error: unknown type name ‘rwlock_t’ -| extern void __lockfunc rt_write_lock(rwlock_t *rwlock); -| ^ - -This patch moves the only RCU user from the header file into c file so the -inclusion can be avoided. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/rbtree.h | 11 ++--------- - lib/rbtree.c | 11 +++++++++++ - 2 files changed, 13 insertions(+), 9 deletions(-) - ---- a/include/linux/rbtree.h -+++ b/include/linux/rbtree.h -@@ -31,7 +31,6 @@ - - #include - #include --#include - - struct rb_node { - unsigned long __rb_parent_color; -@@ -86,14 +85,8 @@ static inline void rb_link_node(struct r - *rb_link = node; - } - --static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, -- struct rb_node **rb_link) --{ -- node->__rb_parent_color = (unsigned long)parent; -- node->rb_left = node->rb_right = NULL; -- -- rcu_assign_pointer(*rb_link, node); --} -+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, -+ struct rb_node **rb_link); - - #define rb_entry_safe(ptr, type, member) \ - ({ typeof(ptr) ____ptr = (ptr); \ ---- a/lib/rbtree.c -+++ b/lib/rbtree.c -@@ -23,6 +23,7 @@ - - #include - #include -+#include - - /* - * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree -@@ -590,3 +591,13 @@ struct rb_node *rb_first_postorder(const - return rb_left_deepest_node(root->rb_node); - } - EXPORT_SYMBOL(rb_first_postorder); -+ -+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, -+ struct rb_node **rb_link) -+{ -+ node->__rb_parent_color = (unsigned long)parent; -+ node->rb_left = node->rb_right = NULL; -+ -+ rcu_assign_pointer(*rb_link, node); -+} -+EXPORT_SYMBOL(rb_link_node_rcu); diff --git a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch b/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch deleted file mode 100644 index d266152ae..000000000 --- a/debian/patches/features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch +++ /dev/null @@ -1,79 +0,0 @@ -From: Paul Gortmaker -Date: Sat, 14 Feb 2015 11:01:16 -0500 -Subject: sas-ata/isci: dont't disable interrupts in qc_issue handler -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On 3.14-rt we see the following trace on Canoe Pass for -SCSI_ISCI "Intel(R) C600 Series Chipset SAS Controller" -when the sas qc_issue handler is run: - - BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:905 - in_atomic(): 0, irqs_disabled(): 1, pid: 432, name: udevd - CPU: 11 PID: 432 Comm: udevd Not tainted 3.14.28-rt22 #2 - Hardware name: Intel Corporation S2600CP/S2600CP, BIOS SE5C600.86B.02.01.0002.082220131453 08/22/2013 - ffff880fab500000 ffff880fa9f239c0 ffffffff81a2d273 0000000000000000 - ffff880fa9f239d8 ffffffff8107f023 ffff880faac23dc0 ffff880fa9f239f0 - ffffffff81a33cc0 ffff880faaeb1400 ffff880fa9f23a40 ffffffff815de891 - Call Trace: - [] dump_stack+0x4e/0x7a - [] __might_sleep+0xe3/0x160 - [] rt_spin_lock+0x20/0x50 - [] isci_task_execute_task+0x171/0x2f0 <----- - [] sas_ata_qc_issue+0x25b/0x2a0 - [] ata_qc_issue+0x1f3/0x370 - [] ? ata_scsi_invalid_field+0x40/0x40 - [] ata_scsi_translate+0xa5/0x1b0 - [] ata_sas_queuecmd+0x86/0x280 - [] sas_queuecommand+0x196/0x230 - [] ? get_parent_ip+0xd/0x50 - [] scsi_dispatch_cmd+0xb4/0x210 - [] scsi_request_fn+0x314/0x530 - -and gdb shows: - -(gdb) list * isci_task_execute_task+0x171 -0xffffffff815ddfb1 is in isci_task_execute_task (drivers/scsi/isci/task.c:138). -133 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); -134 -135 for_each_sas_task(num, task) { -136 enum sci_status status = SCI_FAILURE; -137 -138 spin_lock_irqsave(&ihost->scic_lock, flags); <----- -139 idev = isci_lookup_device(task->dev); -140 io_ready = isci_device_io_ready(idev, task); -141 tag = isci_alloc_tag(ihost); -142 spin_unlock_irqrestore(&ihost->scic_lock, flags); -(gdb) - -In addition to the scic_lock, the function also contains locking of -the task_state_lock -- which is clearly not a candidate for raw lock -conversion. As can be seen by the comment nearby, we really should -be running the qc_issue code with interrupts enabled anyway. - - -Signed-off-by: Paul Gortmaker -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/scsi/libsas/sas_ata.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/scsi/libsas/sas_ata.c -+++ b/drivers/scsi/libsas/sas_ata.c -@@ -190,7 +190,7 @@ static unsigned int sas_ata_qc_issue(str - /* TODO: audit callers to ensure they are ready for qc_issue to - * unconditionally re-enable interrupts - */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - spin_unlock(ap->lock); - - /* If the device fell off, no sense in issuing commands */ -@@ -255,7 +255,7 @@ static unsigned int sas_ata_qc_issue(str - - out: - spin_lock(ap->lock); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return ret; - } - diff --git a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch deleted file mode 100644 index 8678f1cb9..000000000 --- a/debian/patches/features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch +++ /dev/null @@ -1,23 +0,0 @@ -From: Juri Lelli -Date: Tue, 13 May 2014 15:30:20 +0200 -Subject: sched/deadline: dl_task_timer has to be irqsafe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -As for rt_period_timer, dl_task_timer has to be irqsafe. - -Signed-off-by: Juri Lelli -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/deadline.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -697,6 +697,7 @@ void init_dl_task_timer(struct sched_dl_ - - hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - timer->function = dl_task_timer; -+ timer->irqsafe = 1; - } - - static diff --git a/debian/patches/features/all/rt/sched-delay-put-task.patch b/debian/patches/features/all/rt/sched-delay-put-task.patch deleted file mode 100644 index 520267ba6..000000000 --- a/debian/patches/features/all/rt/sched-delay-put-task.patch +++ /dev/null @@ -1,82 +0,0 @@ -Subject: sched: Move task_struct cleanup to RCU -From: Thomas Gleixner -Date: Tue, 31 May 2011 16:59:16 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -__put_task_struct() does quite some expensive work. We don't want to -burden random tasks with that. - -Signed-off-by: Thomas Gleixner ---- - include/linux/sched.h | 13 +++++++++++++ - kernel/fork.c | 15 ++++++++++++++- - 2 files changed, 27 insertions(+), 1 deletion(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1831,6 +1831,9 @@ struct task_struct { - unsigned int sequential_io; - unsigned int sequential_io_avg; - #endif -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head put_rcu; -+#endif - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; - #endif -@@ -2040,6 +2043,15 @@ extern struct pid *cad_pid; - extern void free_task(struct task_struct *tsk); - #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) - -+#ifdef CONFIG_PREEMPT_RT_BASE -+extern void __put_task_struct_cb(struct rcu_head *rhp); -+ -+static inline void put_task_struct(struct task_struct *t) -+{ -+ if (atomic_dec_and_test(&t->usage)) -+ call_rcu(&t->put_rcu, __put_task_struct_cb); -+} -+#else - extern void __put_task_struct(struct task_struct *t); - - static inline void put_task_struct(struct task_struct *t) -@@ -2047,6 +2059,7 @@ static inline void put_task_struct(struc - if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); - } -+#endif - - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - extern void task_cputime(struct task_struct *t, ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -244,7 +244,9 @@ static inline void put_signal_struct(str - if (atomic_dec_and_test(&sig->sigcnt)) - free_signal_struct(sig); - } -- -+#ifdef CONFIG_PREEMPT_RT_BASE -+static -+#endif - void __put_task_struct(struct task_struct *tsk) - { - WARN_ON(!tsk->exit_state); -@@ -261,7 +263,18 @@ void __put_task_struct(struct task_struc - if (!profile_handoff_task(tsk)) - free_task(tsk); - } -+#ifndef CONFIG_PREEMPT_RT_BASE - EXPORT_SYMBOL_GPL(__put_task_struct); -+#else -+void __put_task_struct_cb(struct rcu_head *rhp) -+{ -+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); -+ -+ __put_task_struct(tsk); -+ -+} -+EXPORT_SYMBOL_GPL(__put_task_struct_cb); -+#endif - - void __init __weak arch_task_cache_init(void) { } - diff --git a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch deleted file mode 100644 index d6e5990d4..000000000 --- a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch +++ /dev/null @@ -1,29 +0,0 @@ -Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT -From: Thomas Gleixner -Date: Mon, 18 Jul 2011 17:03:52 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Carsten reported problems when running: - - taskset 01 chrt -f 1 sleep 1 - -from within rc.local on a F15 machine. The task stays running and -never gets on the run queue because some of the run queues have -rt_throttled=1 which does not go away. Works nice from a ssh login -shell. Disabling CONFIG_RT_GROUP_SCHED solves that as well. - -Signed-off-by: Thomas Gleixner ---- - init/Kconfig | 1 + - 1 file changed, 1 insertion(+) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1106,6 +1106,7 @@ config CFS_BANDWIDTH - config RT_GROUP_SCHED - bool "Group scheduling for SCHED_RR/FIFO" - depends on CGROUP_SCHED -+ depends on !PREEMPT_RT_FULL - default n - help - This feature lets you explicitly allocate real CPU bandwidth diff --git a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch deleted file mode 100644 index 8727490a7..000000000 --- a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch +++ /dev/null @@ -1,32 +0,0 @@ -Subject: sched: Disable TTWU_QUEUE on RT -From: Thomas Gleixner -Date: Tue, 13 Sep 2011 16:42:35 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The queued remote wakeup mechanism can introduce rather large -latencies if the number of migrated tasks is high. Disable it for RT. - -Signed-off-by: Thomas Gleixner ---- - kernel/sched/features.h | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/kernel/sched/features.h -+++ b/kernel/sched/features.h -@@ -45,11 +45,16 @@ SCHED_FEAT(LB_BIAS, true) - */ - SCHED_FEAT(NONTASK_CAPACITY, true) - -+#ifdef CONFIG_PREEMPT_RT_FULL -+SCHED_FEAT(TTWU_QUEUE, false) -+#else -+ - /* - * Queue remote wakeups on the target CPU and process them - * using the scheduler IPI. Reduces rq->lock contention/bounces. - */ - SCHED_FEAT(TTWU_QUEUE, true) -+#endif - - #ifdef HAVE_RT_PUSH_IPI - /* diff --git a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch deleted file mode 100644 index 9ec18bbba..000000000 --- a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch +++ /dev/null @@ -1,27 +0,0 @@ -Subject: sched: Limit the number of task migrations per batch -From: Thomas Gleixner -Date: Mon, 06 Jun 2011 12:12:51 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Put an upper limit on the number of tasks which are migrated per batch -to avoid large latencies. - -Signed-off-by: Thomas Gleixner ---- - kernel/sched/core.c | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -260,7 +260,11 @@ late_initcall(sched_init_debug); - * Number of tasks to iterate in a single balance run. - * Limited because this is done with IRQs disabled. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - const_debug unsigned int sysctl_sched_nr_migrate = 32; -+#else -+const_debug unsigned int sysctl_sched_nr_migrate = 8; -+#endif - - /* - * period over which we average the RT time consumption, measured diff --git a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch deleted file mode 100644 index 8057436c1..000000000 --- a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch +++ /dev/null @@ -1,48 +0,0 @@ -Subject: sched: Do not account rcu_preempt_depth on RT in might_sleep() -From: Thomas Gleixner -Date: Tue, 07 Jun 2011 09:19:06 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -RT changes the rcu_preempt_depth semantics, so we cannot check for it -in might_sleep(). - -Signed-off-by: Thomas Gleixner ---- - include/linux/rcupdate.h | 7 +++++++ - kernel/sched/core.c | 2 +- - 2 files changed, 8 insertions(+), 1 deletion(-) - ---- a/include/linux/rcupdate.h -+++ b/include/linux/rcupdate.h -@@ -292,6 +292,11 @@ void synchronize_rcu(void); - * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. - */ - #define rcu_preempt_depth() (current->rcu_read_lock_nesting) -+#ifndef CONFIG_PREEMPT_RT_FULL -+#define sched_rcu_preempt_depth() rcu_preempt_depth() -+#else -+static inline int sched_rcu_preempt_depth(void) { return 0; } -+#endif - - #else /* #ifdef CONFIG_PREEMPT_RCU */ - -@@ -317,6 +322,8 @@ static inline int rcu_preempt_depth(void - return 0; - } - -+#define sched_rcu_preempt_depth() rcu_preempt_depth() -+ - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - - /* Internal to kernel */ ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7667,7 +7667,7 @@ void __init sched_init(void) - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - static inline int preempt_count_equals(int preempt_offset) - { -- int nested = preempt_count() + rcu_preempt_depth(); -+ int nested = preempt_count() + sched_rcu_preempt_depth(); - - return (nested == preempt_offset); - } diff --git a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch deleted file mode 100644 index 92e2c8782..000000000 --- a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch +++ /dev/null @@ -1,134 +0,0 @@ -Subject: sched: Move mmdrop to RCU on RT -From: Thomas Gleixner -Date: Mon, 06 Jun 2011 12:20:33 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Takes sleeping locks and calls into the memory allocator, so nothing -we want to do in task switch and oder atomic contexts. - -Signed-off-by: Thomas Gleixner ---- - include/linux/mm_types.h | 4 ++++ - include/linux/sched.h | 12 ++++++++++++ - kernel/fork.c | 13 +++++++++++++ - kernel/sched/core.c | 18 ++++++++++++++++-- - 4 files changed, 45 insertions(+), 2 deletions(-) - ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -504,6 +505,9 @@ struct mm_struct { - bool tlb_flush_pending; - #endif - struct uprobes_state uprobes_state; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head delayed_drop; -+#endif - #ifdef CONFIG_X86_INTEL_MPX - /* address of the bounds directory */ - void __user *bd_addr; ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -2606,12 +2606,24 @@ extern struct mm_struct * mm_alloc(void) - - /* mmdrop drops the mm and the page tables */ - extern void __mmdrop(struct mm_struct *); -+ - static inline void mmdrop(struct mm_struct * mm) - { - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+extern void __mmdrop_delayed(struct rcu_head *rhp); -+static inline void mmdrop_delayed(struct mm_struct *mm) -+{ -+ if (atomic_dec_and_test(&mm->mm_count)) -+ call_rcu(&mm->delayed_drop, __mmdrop_delayed); -+} -+#else -+# define mmdrop_delayed(mm) mmdrop(mm) -+#endif -+ - /* mmput gets rid of the mappings and all user-space */ - extern void mmput(struct mm_struct *); - /* Grab a reference to a task's mm, if it is not already going away */ ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -702,6 +702,19 @@ void __mmdrop(struct mm_struct *mm) - } - EXPORT_SYMBOL_GPL(__mmdrop); - -+#ifdef CONFIG_PREEMPT_RT_BASE -+/* -+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't -+ * want another facility to make this work. -+ */ -+void __mmdrop_delayed(struct rcu_head *rhp) -+{ -+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); -+ -+ __mmdrop(mm); -+} -+#endif -+ - /* - * Decrement the use count and release all resources for an mm. - */ ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -2593,8 +2593,12 @@ static struct rq *finish_task_switch(str - finish_arch_post_lock_switch(); - - fire_sched_in_preempt_notifiers(current); -+ /* -+ * We use mmdrop_delayed() here so we don't have to do the -+ * full __mmdrop() when we are the last user. -+ */ - if (mm) -- mmdrop(mm); -+ mmdrop_delayed(mm); - if (unlikely(prev_state == TASK_DEAD)) { - if (prev->sched_class->task_dead) - prev->sched_class->task_dead(prev); -@@ -5255,6 +5259,8 @@ void sched_setnuma(struct task_struct *p - #endif /* CONFIG_NUMA_BALANCING */ - - #ifdef CONFIG_HOTPLUG_CPU -+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); -+ - /* - * Ensures that the idle task is using init_mm right before its cpu goes - * offline. -@@ -5269,7 +5275,11 @@ void idle_task_exit(void) - switch_mm(mm, &init_mm, current); - finish_arch_post_lock_switch(); - } -- mmdrop(mm); -+ /* -+ * Defer the cleanup to an alive cpu. On RT we can neither -+ * call mmdrop() nor mmdrop_delayed() from here. -+ */ -+ per_cpu(idle_last_mm, smp_processor_id()) = mm; - } - - /* -@@ -5641,6 +5651,10 @@ migration_call(struct notifier_block *nf - - case CPU_DEAD: - calc_load_migrate(rq); -+ if (per_cpu(idle_last_mm, cpu)) { -+ mmdrop(per_cpu(idle_last_mm, cpu)); -+ per_cpu(idle_last_mm, cpu) = NULL; -+ } - break; - #endif - } diff --git a/debian/patches/features/all/rt/sched-provide-a-tsk_nr_cpus_allowed-helper.patch b/debian/patches/features/all/rt/sched-provide-a-tsk_nr_cpus_allowed-helper.patch deleted file mode 100644 index 8ee93b487..000000000 --- a/debian/patches/features/all/rt/sched-provide-a-tsk_nr_cpus_allowed-helper.patch +++ /dev/null @@ -1,262 +0,0 @@ -From: Thomas Gleixner -Date: Mon, 18 Jan 2016 17:21:59 +0100 -Subject: sched: provide a tsk_nr_cpus_allowed() helper -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -tsk_nr_cpus_allowed() is an accessor for task->nr_cpus_allowed which allows -us to change the representation of ->nr_cpus_allowed if required. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/sched.h | 5 +++++ - kernel/sched/core.c | 2 +- - kernel/sched/deadline.c | 28 ++++++++++++++-------------- - kernel/sched/rt.c | 24 ++++++++++++------------ - 4 files changed, 32 insertions(+), 27 deletions(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1833,6 +1833,11 @@ extern int arch_task_struct_size __read_ - /* Future-safe accessor for struct task_struct's cpus_allowed. */ - #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - -+static inline int tsk_nr_cpus_allowed(struct task_struct *p) -+{ -+ return p->nr_cpus_allowed; -+} -+ - #define TNF_MIGRATED 0x01 - #define TNF_NO_GROUP 0x02 - #define TNF_SHARED 0x04 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1624,7 +1624,7 @@ int select_task_rq(struct task_struct *p - { - lockdep_assert_held(&p->pi_lock); - -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); - - /* ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -134,7 +134,7 @@ static void inc_dl_migration(struct sche - { - struct task_struct *p = dl_task_of(dl_se); - -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - dl_rq->dl_nr_migratory++; - - update_dl_migration(dl_rq); -@@ -144,7 +144,7 @@ static void dec_dl_migration(struct sche - { - struct task_struct *p = dl_task_of(dl_se); - -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - dl_rq->dl_nr_migratory--; - - update_dl_migration(dl_rq); -@@ -989,7 +989,7 @@ static void enqueue_task_dl(struct rq *r - - enqueue_dl_entity(&p->dl, pi_se, flags); - -- if (!task_current(rq, p) && p->nr_cpus_allowed > 1) -+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) - enqueue_pushable_dl_task(rq, p); - } - -@@ -1067,9 +1067,9 @@ select_task_rq_dl(struct task_struct *p, - * try to make it stay here, it might be important. - */ - if (unlikely(dl_task(curr)) && -- (curr->nr_cpus_allowed < 2 || -+ (tsk_nr_cpus_allowed(curr) < 2 || - !dl_entity_preempt(&p->dl, &curr->dl)) && -- (p->nr_cpus_allowed > 1)) { -+ (tsk_nr_cpus_allowed(p) > 1)) { - int target = find_later_rq(p); - - if (target != -1 && -@@ -1090,7 +1090,7 @@ static void check_preempt_equal_dl(struc - * Current can't be migrated, useless to reschedule, - * let's hope p can move out. - */ -- if (rq->curr->nr_cpus_allowed == 1 || -+ if (tsk_nr_cpus_allowed(rq->curr) == 1 || - cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) - return; - -@@ -1098,7 +1098,7 @@ static void check_preempt_equal_dl(struc - * p is migratable, so let's not schedule it and - * see if it is pushed or pulled somewhere else. - */ -- if (p->nr_cpus_allowed != 1 && -+ if (tsk_nr_cpus_allowed(p) != 1 && - cpudl_find(&rq->rd->cpudl, p, NULL) != -1) - return; - -@@ -1212,7 +1212,7 @@ static void put_prev_task_dl(struct rq * - { - update_curr_dl(rq); - -- if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) -+ if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1) - enqueue_pushable_dl_task(rq, p); - } - -@@ -1335,7 +1335,7 @@ static int find_later_rq(struct task_str - if (unlikely(!later_mask)) - return -1; - -- if (task->nr_cpus_allowed == 1) -+ if (tsk_nr_cpus_allowed(task) == 1) - return -1; - - /* -@@ -1480,7 +1480,7 @@ static struct task_struct *pick_next_pus - - BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); -- BUG_ON(p->nr_cpus_allowed <= 1); -+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1); - - BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!dl_task(p)); -@@ -1519,7 +1519,7 @@ static int push_dl_task(struct rq *rq) - */ - if (dl_task(rq->curr) && - dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && -- rq->curr->nr_cpus_allowed > 1) { -+ tsk_nr_cpus_allowed(rq->curr) > 1) { - resched_curr(rq); - return 0; - } -@@ -1666,9 +1666,9 @@ static void task_woken_dl(struct rq *rq, - { - if (!task_running(rq, p) && - !test_tsk_need_resched(rq->curr) && -- p->nr_cpus_allowed > 1 && -+ tsk_nr_cpus_allowed(p) > 1 && - dl_task(rq->curr) && -- (rq->curr->nr_cpus_allowed < 2 || -+ (tsk_nr_cpus_allowed(rq->curr) < 2 || - !dl_entity_preempt(&p->dl, &rq->curr->dl))) { - push_dl_tasks(rq); - } -@@ -1769,7 +1769,7 @@ static void switched_to_dl(struct rq *rq - { - if (task_on_rq_queued(p) && rq->curr != p) { - #ifdef CONFIG_SMP -- if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) -+ if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) - queue_push_tasks(rq); - #else - if (dl_task(rq->curr)) ---- a/kernel/sched/rt.c -+++ b/kernel/sched/rt.c -@@ -326,7 +326,7 @@ static void inc_rt_migration(struct sche - rt_rq = &rq_of_rt_rq(rt_rq)->rt; - - rt_rq->rt_nr_total++; -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - rt_rq->rt_nr_migratory++; - - update_rt_migration(rt_rq); -@@ -343,7 +343,7 @@ static void dec_rt_migration(struct sche - rt_rq = &rq_of_rt_rq(rt_rq)->rt; - - rt_rq->rt_nr_total--; -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - rt_rq->rt_nr_migratory--; - - update_rt_migration(rt_rq); -@@ -1262,7 +1262,7 @@ enqueue_task_rt(struct rq *rq, struct ta - - enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); - -- if (!task_current(rq, p) && p->nr_cpus_allowed > 1) -+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) - enqueue_pushable_task(rq, p); - } - -@@ -1351,7 +1351,7 @@ select_task_rq_rt(struct task_struct *p, - * will have to sort it out. - */ - if (curr && unlikely(rt_task(curr)) && -- (curr->nr_cpus_allowed < 2 || -+ (tsk_nr_cpus_allowed(curr) < 2 || - curr->prio <= p->prio)) { - int target = find_lowest_rq(p); - -@@ -1375,7 +1375,7 @@ static void check_preempt_equal_prio(str - * Current can't be migrated, useless to reschedule, - * let's hope p can move out. - */ -- if (rq->curr->nr_cpus_allowed == 1 || -+ if (tsk_nr_cpus_allowed(rq->curr) == 1 || - !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) - return; - -@@ -1383,7 +1383,7 @@ static void check_preempt_equal_prio(str - * p is migratable, so let's not schedule it and - * see if it is pushed or pulled somewhere else. - */ -- if (p->nr_cpus_allowed != 1 -+ if (tsk_nr_cpus_allowed(p) != 1 - && cpupri_find(&rq->rd->cpupri, p, NULL)) - return; - -@@ -1517,7 +1517,7 @@ static void put_prev_task_rt(struct rq * - * The previous task needs to be made eligible for pushing - * if it is still active - */ -- if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) -+ if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1) - enqueue_pushable_task(rq, p); - } - -@@ -1567,7 +1567,7 @@ static int find_lowest_rq(struct task_st - if (unlikely(!lowest_mask)) - return -1; - -- if (task->nr_cpus_allowed == 1) -+ if (tsk_nr_cpus_allowed(task) == 1) - return -1; /* No other targets possible */ - - if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) -@@ -1699,7 +1699,7 @@ static struct task_struct *pick_next_pus - - BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); -- BUG_ON(p->nr_cpus_allowed <= 1); -+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1); - - BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!rt_task(p)); -@@ -2059,9 +2059,9 @@ static void task_woken_rt(struct rq *rq, - { - if (!task_running(rq, p) && - !test_tsk_need_resched(rq->curr) && -- p->nr_cpus_allowed > 1 && -+ tsk_nr_cpus_allowed(p) > 1 && - (dl_task(rq->curr) || rt_task(rq->curr)) && -- (rq->curr->nr_cpus_allowed < 2 || -+ (tsk_nr_cpus_allowed(rq->curr) < 2 || - rq->curr->prio <= p->prio)) - push_rt_tasks(rq); - } -@@ -2134,7 +2134,7 @@ static void switched_to_rt(struct rq *rq - */ - if (task_on_rq_queued(p) && rq->curr != p) { - #ifdef CONFIG_SMP -- if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) -+ if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) - queue_push_tasks(rq); - #else - if (p->prio < rq->curr->prio) diff --git a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch deleted file mode 100644 index bdd8df120..000000000 --- a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch +++ /dev/null @@ -1,94 +0,0 @@ -Subject: sched: Add saved_state for tasks blocked on sleeping locks -From: Thomas Gleixner -Date: Sat, 25 Jun 2011 09:21:04 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Spinlocks are state preserving in !RT. RT changes the state when a -task gets blocked on a lock. So we need to remember the state before -the lock contention. If a regular wakeup (not a RTmutex related -wakeup) happens, the saved_state is updated to running. When the lock -sleep is done, the saved state is restored. - -Signed-off-by: Thomas Gleixner ---- - include/linux/sched.h | 2 ++ - kernel/sched/core.c | 31 ++++++++++++++++++++++++++++++- - kernel/sched/sched.h | 1 + - 3 files changed, 33 insertions(+), 1 deletion(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1378,6 +1378,7 @@ struct tlbflush_unmap_batch { - - struct task_struct { - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ -+ volatile long saved_state; /* saved state for "spinlock sleepers" */ - void *stack; - atomic_t usage; - unsigned int flags; /* per process flags, defined below */ -@@ -2483,6 +2484,7 @@ extern void xtime_update(unsigned long t - - extern int wake_up_state(struct task_struct *tsk, unsigned int state); - extern int wake_up_process(struct task_struct *tsk); -+extern int wake_up_lock_sleeper(struct task_struct * tsk); - extern void wake_up_new_task(struct task_struct *tsk); - #ifdef CONFIG_SMP - extern void kick_process(struct task_struct *tsk); ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1949,8 +1949,25 @@ try_to_wake_up(struct task_struct *p, un - */ - smp_mb__before_spinlock(); - raw_spin_lock_irqsave(&p->pi_lock, flags); -- if (!(p->state & state)) -+ if (!(p->state & state)) { -+ /* -+ * The task might be running due to a spinlock sleeper -+ * wakeup. Check the saved state and set it to running -+ * if the wakeup condition is true. -+ */ -+ if (!(wake_flags & WF_LOCK_SLEEPER)) { -+ if (p->saved_state & state) -+ p->saved_state = TASK_RUNNING; -+ } - goto out; -+ } -+ -+ /* -+ * If this is a regular wakeup, then we can unconditionally -+ * clear the saved state of a "lock sleeper". -+ */ -+ if (!(wake_flags & WF_LOCK_SLEEPER)) -+ p->saved_state = TASK_RUNNING; - - trace_sched_waking(p); - -@@ -2083,6 +2100,18 @@ int wake_up_process(struct task_struct * - } - EXPORT_SYMBOL(wake_up_process); - -+/** -+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" -+ * @p: The process to be woken up. -+ * -+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate -+ * the nature of the wakeup. -+ */ -+int wake_up_lock_sleeper(struct task_struct *p) -+{ -+ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); -+} -+ - int wake_up_state(struct task_struct *p, unsigned int state) - { - return try_to_wake_up(p, state, 0); ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -1100,6 +1100,7 @@ static inline void finish_lock_switch(st - #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ - #define WF_FORK 0x02 /* child wakeup after fork */ - #define WF_MIGRATED 0x4 /* internal use, task got migrated */ -+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ - - /* - * To aid in avoiding the subversion of "niceness" due to uneven distribution diff --git a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch deleted file mode 100644 index 45522345e..000000000 --- a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch +++ /dev/null @@ -1,35 +0,0 @@ -Subject: sched: ttwu: Return success when only changing the saved_state value -From: Thomas Gleixner -Date: Tue, 13 Dec 2011 21:42:19 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -When a task blocks on a rt lock, it saves the current state in -p->saved_state, so a lock related wake up will not destroy the -original state. - -When a real wakeup happens, while the task is running due to a lock -wakeup already, we update p->saved_state to TASK_RUNNING, but we do -not return success, which might cause another wakeup in the waitqueue -code and the task remains in the waitqueue list. Return success in -that case as well. - -Signed-off-by: Thomas Gleixner - ---- - kernel/sched/core.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1956,8 +1956,10 @@ try_to_wake_up(struct task_struct *p, un - * if the wakeup condition is true. - */ - if (!(wake_flags & WF_LOCK_SLEEPER)) { -- if (p->saved_state & state) -+ if (p->saved_state & state) { - p->saved_state = TASK_RUNNING; -+ success = 1; -+ } - } - goto out; - } diff --git a/debian/patches/features/all/rt/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch b/debian/patches/features/all/rt/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch deleted file mode 100644 index 76ab8733d..000000000 --- a/debian/patches/features/all/rt/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch +++ /dev/null @@ -1,58 +0,0 @@ -From: Thomas Gleixner -Date: Mon, 18 Jan 2016 17:10:39 +0100 -Subject: sched: use tsk_cpus_allowed() instead of accessing - ->cpus_allowed -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use the future-safe accessor for struct task_struct's. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/cpudeadline.c | 4 ++-- - kernel/sched/cpupri.c | 4 ++-- - kernel/sched/deadline.c | 2 +- - 3 files changed, 5 insertions(+), 5 deletions(-) - ---- a/kernel/sched/cpudeadline.c -+++ b/kernel/sched/cpudeadline.c -@@ -103,10 +103,10 @@ int cpudl_find(struct cpudl *cp, struct - const struct sched_dl_entity *dl_se = &p->dl; - - if (later_mask && -- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { -+ cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) { - best_cpu = cpumask_any(later_mask); - goto out; -- } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && -+ } else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) && - dl_time_before(dl_se->deadline, cp->elements[0].dl)) { - best_cpu = cpudl_maximum(cp); - if (later_mask) ---- a/kernel/sched/cpupri.c -+++ b/kernel/sched/cpupri.c -@@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struc - if (skip) - continue; - -- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) -+ if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids) - continue; - - if (lowest_mask) { -- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); -+ cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask); - - /* - * We have to ensure that we have at least one bit ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -1441,7 +1441,7 @@ static struct rq *find_lock_later_rq(str - if (double_lock_balance(rq, later_rq)) { - if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(later_rq->cpu, -- &task->cpus_allowed) || -+ tsk_cpus_allowed(task)) || - task_running(rq, task) || - !task_on_rq_queued(task))) { - double_unlock_balance(rq, later_rq); diff --git a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch deleted file mode 100644 index 4d2905817..000000000 --- a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch +++ /dev/null @@ -1,38 +0,0 @@ -From: Steven Rostedt -Date: Mon, 18 Mar 2013 15:12:49 -0400 -Subject: sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -In -rt, most spin_locks() turn into mutexes. One of these spin_lock -conversions is performed on the workqueue gcwq->lock. When the idle -worker is worken, the first thing it will do is grab that same lock and -it too will block, possibly jumping into the same code, but because -nr_running would already be decremented it prevents an infinite loop. - -But this is still a waste of CPU cycles, and it doesn't follow the method -of mainline, as new workers should only be woken when a worker thread is -truly going to sleep, and not just blocked on a spin_lock(). - -Check the saved_state too before waking up new workers. - - -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/core.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3264,8 +3264,10 @@ static void __sched notrace __schedule(b - * If a worker went to sleep, notify and ask workqueue - * whether it wants to wake up a task to maintain - * concurrency. -+ * Only call wake up if prev isn't blocked on a sleeping -+ * spin lock. - */ -- if (prev->flags & PF_WQ_WORKER) { -+ if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { - struct task_struct *to_wakeup; - - to_wakeup = wq_worker_sleeping(prev, cpu); diff --git a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch deleted file mode 100644 index 4d9f12b56..000000000 --- a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch +++ /dev/null @@ -1,115 +0,0 @@ -Subject: scsi/fcoe: Make RT aware. -From: Thomas Gleixner -Date: Sat, 12 Nov 2011 14:00:48 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Do not disable preemption while taking sleeping locks. All user look safe -for migrate_diable() only. - -Signed-off-by: Thomas Gleixner ---- - drivers/scsi/fcoe/fcoe.c | 18 +++++++++--------- - drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++-- - drivers/scsi/libfc/fc_exch.c | 4 ++-- - 3 files changed, 13 insertions(+), 13 deletions(-) - ---- a/drivers/scsi/fcoe/fcoe.c -+++ b/drivers/scsi/fcoe/fcoe.c -@@ -1286,7 +1286,7 @@ static void fcoe_percpu_thread_destroy(u - struct sk_buff *skb; - #ifdef CONFIG_SMP - struct fcoe_percpu_s *p0; -- unsigned targ_cpu = get_cpu(); -+ unsigned targ_cpu = get_cpu_light(); - #endif /* CONFIG_SMP */ - - FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); -@@ -1342,7 +1342,7 @@ static void fcoe_percpu_thread_destroy(u - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); - } -- put_cpu(); -+ put_cpu_light(); - #else - /* - * This a non-SMP scenario where the singular Rx thread is -@@ -1566,11 +1566,11 @@ static int fcoe_rcv(struct sk_buff *skb, - static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) - { - struct fcoe_percpu_s *fps; -- int rc; -+ int rc, cpu = get_cpu_light(); - -- fps = &get_cpu_var(fcoe_percpu); -+ fps = &per_cpu(fcoe_percpu, cpu); - rc = fcoe_get_paged_crc_eof(skb, tlen, fps); -- put_cpu_var(fcoe_percpu); -+ put_cpu_light(); - - return rc; - } -@@ -1766,11 +1766,11 @@ static inline int fcoe_filter_frames(str - return 0; - } - -- stats = per_cpu_ptr(lport->stats, get_cpu()); -+ stats = per_cpu_ptr(lport->stats, get_cpu_light()); - stats->InvalidCRCCount++; - if (stats->InvalidCRCCount < 5) - printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); -- put_cpu(); -+ put_cpu_light(); - return -EINVAL; - } - -@@ -1846,13 +1846,13 @@ static void fcoe_recv_frame(struct sk_bu - goto drop; - - if (!fcoe_filter_frames(lport, fp)) { -- put_cpu(); -+ put_cpu_light(); - fc_exch_recv(lport, fp); - return; - } - drop: - stats->ErrorFrames++; -- put_cpu(); -+ put_cpu_light(); - kfree_skb(skb); - } - ---- a/drivers/scsi/fcoe/fcoe_ctlr.c -+++ b/drivers/scsi/fcoe/fcoe_ctlr.c -@@ -831,7 +831,7 @@ static unsigned long fcoe_ctlr_age_fcfs( - - INIT_LIST_HEAD(&del_list); - -- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); -+ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); - - list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { - deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; -@@ -867,7 +867,7 @@ static unsigned long fcoe_ctlr_age_fcfs( - sel_time = fcf->time; - } - } -- put_cpu(); -+ put_cpu_light(); - - list_for_each_entry_safe(fcf, next, &del_list, list) { - /* Removes fcf from current list */ ---- a/drivers/scsi/libfc/fc_exch.c -+++ b/drivers/scsi/libfc/fc_exch.c -@@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc( - } - memset(ep, 0, sizeof(*ep)); - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - pool = per_cpu_ptr(mp->pool, cpu); - spin_lock_bh(&pool->lock); -- put_cpu(); -+ put_cpu_light(); - - /* peek cache of free slot */ - if (pool->left != FC_XID_UNKNOWN) { diff --git a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch deleted file mode 100644 index 38a14ae6b..000000000 --- a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch +++ /dev/null @@ -1,48 +0,0 @@ -Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll -From: John Kacur -Date: Fri, 27 Apr 2012 12:48:46 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -RT triggers the following: - -[ 11.307652] [] __might_sleep+0xe7/0x110 -[ 11.307663] [] rt_spin_lock+0x24/0x60 -[ 11.307670] [] ? rt_spin_lock_slowunlock+0x78/0x90 -[ 11.307703] [] qla24xx_intr_handler+0x63/0x2d0 [qla2xxx] -[ 11.307736] [] qla2x00_poll+0x67/0x90 [qla2xxx] - -Function qla2x00_poll does local_irq_save() before calling qla24xx_intr_handler -which has a spinlock. Since spinlocks are sleepable on rt, it is not allowed -to call them with interrupts disabled. Therefore we use local_irq_save_nort() -instead which saves flags without disabling interrupts. - -This fix needs to be applied to v3.0-rt, v3.2-rt and v3.4-rt - -Suggested-by: Thomas Gleixner -Signed-off-by: John Kacur -Cc: Steven Rostedt -Cc: David Sommerseth -Link: http://lkml.kernel.org/r/1335523726-10024-1-git-send-email-jkacur@redhat.com - -Signed-off-by: Thomas Gleixner ---- - drivers/scsi/qla2xxx/qla_inline.h | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/scsi/qla2xxx/qla_inline.h -+++ b/drivers/scsi/qla2xxx/qla_inline.h -@@ -59,12 +59,12 @@ qla2x00_poll(struct rsp_que *rsp) - { - unsigned long flags; - struct qla_hw_data *ha = rsp->hw; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (IS_P3P_TYPE(ha)) - qla82xx_poll(0, rsp); - else - ha->isp_ops->intr_handler(0, rsp); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - - static inline uint8_t * diff --git a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch deleted file mode 100644 index 64e8d2c2e..000000000 --- a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch +++ /dev/null @@ -1,191 +0,0 @@ -Subject: seqlock: Prevent rt starvation -From: Thomas Gleixner -Date: Wed, 22 Feb 2012 12:03:30 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -If a low prio writer gets preempted while holding the seqlock write -locked, a high prio reader spins forever on RT. - -To prevent this let the reader grab the spinlock, so it blocks and -eventually boosts the writer. This way the writer can proceed and -endless spinning is prevented. - -For seqcount writers we disable preemption over the update code -path. Thanks to Al Viro for distangling some VFS code to make that -possible. - -Nicholas Mc Guire: -- spin_lock+unlock => spin_unlock_wait -- __write_seqcount_begin => __raw_write_seqcount_begin - -Signed-off-by: Thomas Gleixner - - ---- - include/linux/seqlock.h | 56 +++++++++++++++++++++++++++++++++++++----------- - include/net/dst.h | 2 - - include/net/neighbour.h | 4 +-- - 3 files changed, 47 insertions(+), 15 deletions(-) - ---- a/include/linux/seqlock.h -+++ b/include/linux/seqlock.h -@@ -220,20 +220,30 @@ static inline int read_seqcount_retry(co - return __read_seqcount_retry(s, start); - } - -- -- --static inline void raw_write_seqcount_begin(seqcount_t *s) -+static inline void __raw_write_seqcount_begin(seqcount_t *s) - { - s->sequence++; - smp_wmb(); - } - --static inline void raw_write_seqcount_end(seqcount_t *s) -+static inline void raw_write_seqcount_begin(seqcount_t *s) -+{ -+ preempt_disable_rt(); -+ __raw_write_seqcount_begin(s); -+} -+ -+static inline void __raw_write_seqcount_end(seqcount_t *s) - { - smp_wmb(); - s->sequence++; - } - -+static inline void raw_write_seqcount_end(seqcount_t *s) -+{ -+ __raw_write_seqcount_end(s); -+ preempt_enable_rt(); -+} -+ - /** - * raw_write_seqcount_barrier - do a seq write barrier - * @s: pointer to seqcount_t -@@ -425,10 +435,32 @@ typedef struct { - /* - * Read side functions for starting and finalizing a read side section. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - static inline unsigned read_seqbegin(const seqlock_t *sl) - { - return read_seqcount_begin(&sl->seqcount); - } -+#else -+/* -+ * Starvation safe read side for RT -+ */ -+static inline unsigned read_seqbegin(seqlock_t *sl) -+{ -+ unsigned ret; -+ -+repeat: -+ ret = ACCESS_ONCE(sl->seqcount.sequence); -+ if (unlikely(ret & 1)) { -+ /* -+ * Take the lock and let the writer proceed (i.e. evtl -+ * boost it), otherwise we could loop here forever. -+ */ -+ spin_unlock_wait(&sl->lock); -+ goto repeat; -+ } -+ return ret; -+} -+#endif - - static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) - { -@@ -443,36 +475,36 @@ static inline unsigned read_seqretry(con - static inline void write_seqlock(seqlock_t *sl) - { - spin_lock(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock(&sl->lock); - } - - static inline void write_seqlock_bh(seqlock_t *sl) - { - spin_lock_bh(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock_bh(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_bh(&sl->lock); - } - - static inline void write_seqlock_irq(seqlock_t *sl) - { - spin_lock_irq(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock_irq(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_irq(&sl->lock); - } - -@@ -481,7 +513,7 @@ static inline unsigned long __write_seql - unsigned long flags; - - spin_lock_irqsave(&sl->lock, flags); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - return flags; - } - -@@ -491,7 +523,7 @@ static inline unsigned long __write_seql - static inline void - write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_irqrestore(&sl->lock, flags); - } - ---- a/include/net/dst.h -+++ b/include/net/dst.h -@@ -437,7 +437,7 @@ static inline void dst_confirm(struct ds - static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, - struct sk_buff *skb) - { -- const struct hh_cache *hh; -+ struct hh_cache *hh; - - if (dst->pending_confirm) { - unsigned long now = jiffies; ---- a/include/net/neighbour.h -+++ b/include/net/neighbour.h -@@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct - } - #endif - --static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) -+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) - { - unsigned int seq; - int hh_len; -@@ -501,7 +501,7 @@ struct neighbour_cb { - - #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) - --static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, -+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, - const struct net_device *dev) - { - unsigned int seq; diff --git a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch deleted file mode 100644 index 272de11ed..000000000 --- a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch +++ /dev/null @@ -1,39 +0,0 @@ -Subject: signal: Make __lock_task_sighand() RT aware -From: Thomas Gleixner -Date: Fri, 22 Jul 2011 08:07:08 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -local_irq_save() + spin_lock(&sighand->siglock) does not work on --RT. Use the nort variants. - -Signed-off-by: Thomas Gleixner ---- - kernel/signal.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -1276,12 +1276,12 @@ struct sighand_struct *__lock_task_sigha - * Disable interrupts early to avoid deadlocks. - * See rcu_read_unlock() comment header for details. - */ -- local_irq_save(*flags); -+ local_irq_save_nort(*flags); - rcu_read_lock(); - sighand = rcu_dereference(tsk->sighand); - if (unlikely(sighand == NULL)) { - rcu_read_unlock(); -- local_irq_restore(*flags); -+ local_irq_restore_nort(*flags); - break; - } - /* -@@ -1302,7 +1302,7 @@ struct sighand_struct *__lock_task_sigha - } - spin_unlock(&sighand->siglock); - rcu_read_unlock(); -- local_irq_restore(*flags); -+ local_irq_restore_nort(*flags); - } - - return sighand; diff --git a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch deleted file mode 100644 index 1bd5154b0..000000000 --- a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch +++ /dev/null @@ -1,32 +0,0 @@ -Subject: signal: Revert ptrace preempt magic -From: Thomas Gleixner -Date: Wed, 21 Sep 2011 19:57:12 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more -than a bandaid around the ptrace design trainwreck. It's not a -correctness issue, it's merily a cosmetic bandaid. - -Signed-off-by: Thomas Gleixner ---- - kernel/signal.c | 8 -------- - 1 file changed, 8 deletions(-) - ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -1846,15 +1846,7 @@ static void ptrace_stop(int exit_code, i - if (gstop_done && ptrace_reparented(current)) - do_notify_parent_cldstop(current, false, why); - -- /* -- * Don't want to allow preemption here, because -- * sys_ptrace() needs this task to be inactive. -- * -- * XXX: implement read_unlock_no_resched(). -- */ -- preempt_disable(); - read_unlock(&tasklist_lock); -- preempt_enable_no_resched(); - freezable_schedule(); - } else { - /* diff --git a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch deleted file mode 100644 index d52e3fc95..000000000 --- a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ /dev/null @@ -1,199 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 3 Jul 2009 08:44:56 -0500 -Subject: signals: Allow rt tasks to cache one sigqueue struct -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -To avoid allocation allow rt tasks to cache one sigqueue struct in -task struct. - -Signed-off-by: Thomas Gleixner - ---- - include/linux/sched.h | 1 - include/linux/signal.h | 1 - kernel/exit.c | 2 - - kernel/fork.c | 1 - kernel/signal.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++--- - 5 files changed, 69 insertions(+), 5 deletions(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1568,6 +1568,7 @@ struct task_struct { - /* signal handlers */ - struct signal_struct *signal; - struct sighand_struct *sighand; -+ struct sigqueue *sigqueue_cache; - - sigset_t blocked, real_blocked; - sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ ---- a/include/linux/signal.h -+++ b/include/linux/signal.h -@@ -218,6 +218,7 @@ static inline void init_sigpending(struc - } - - extern void flush_sigqueue(struct sigpending *queue); -+extern void flush_task_sigqueue(struct task_struct *tsk); - - /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ - static inline int valid_signal(unsigned long sig) ---- a/kernel/exit.c -+++ b/kernel/exit.c -@@ -144,7 +144,7 @@ static void __exit_signal(struct task_st - * Do this under ->siglock, we can race with another thread - * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. - */ -- flush_sigqueue(&tsk->pending); -+ flush_task_sigqueue(tsk); - tsk->sighand = NULL; - spin_unlock(&sighand->siglock); - ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -1343,6 +1343,7 @@ static struct task_struct *copy_process( - spin_lock_init(&p->alloc_lock); - - init_sigpending(&p->pending); -+ p->sigqueue_cache = NULL; - - p->utime = p->stime = p->gtime = 0; - p->utimescaled = p->stimescaled = 0; ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -352,13 +353,30 @@ static bool task_participate_group_stop( - return false; - } - -+static inline struct sigqueue *get_task_cache(struct task_struct *t) -+{ -+ struct sigqueue *q = t->sigqueue_cache; -+ -+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) -+ return NULL; -+ return q; -+} -+ -+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) -+{ -+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) -+ return 0; -+ return 1; -+} -+ - /* - * allocate a new signal queue record - * - this may be called without locks if and only if t == current, otherwise an - * appropriate lock must be held to stop the target task from exiting - */ - static struct sigqueue * --__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) -+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, -+ int override_rlimit, int fromslab) - { - struct sigqueue *q = NULL; - struct user_struct *user; -@@ -375,7 +393,10 @@ static struct sigqueue * - if (override_rlimit || - atomic_read(&user->sigpending) <= - task_rlimit(t, RLIMIT_SIGPENDING)) { -- q = kmem_cache_alloc(sigqueue_cachep, flags); -+ if (!fromslab) -+ q = get_task_cache(t); -+ if (!q) -+ q = kmem_cache_alloc(sigqueue_cachep, flags); - } else { - print_dropped_signal(sig); - } -@@ -392,6 +413,13 @@ static struct sigqueue * - return q; - } - -+static struct sigqueue * -+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, -+ int override_rlimit) -+{ -+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); -+} -+ - static void __sigqueue_free(struct sigqueue *q) - { - if (q->flags & SIGQUEUE_PREALLOC) -@@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqu - kmem_cache_free(sigqueue_cachep, q); - } - -+static void sigqueue_free_current(struct sigqueue *q) -+{ -+ struct user_struct *up; -+ -+ if (q->flags & SIGQUEUE_PREALLOC) -+ return; -+ -+ up = q->user; -+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { -+ atomic_dec(&up->sigpending); -+ free_uid(up); -+ } else -+ __sigqueue_free(q); -+} -+ - void flush_sigqueue(struct sigpending *queue) - { - struct sigqueue *q; -@@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *q - } - - /* -+ * Called from __exit_signal. Flush tsk->pending and -+ * tsk->sigqueue_cache -+ */ -+void flush_task_sigqueue(struct task_struct *tsk) -+{ -+ struct sigqueue *q; -+ -+ flush_sigqueue(&tsk->pending); -+ -+ q = get_task_cache(tsk); -+ if (q) -+ kmem_cache_free(sigqueue_cachep, q); -+} -+ -+/* - * Flush all pending signals for this kthread. - */ - void flush_signals(struct task_struct *t) -@@ -525,7 +583,7 @@ static void collect_signal(int sig, stru - still_pending: - list_del_init(&first->list); - copy_siginfo(info, &first->info); -- __sigqueue_free(first); -+ sigqueue_free_current(first); - } else { - /* - * Ok, it wasn't in the queue. This must be -@@ -560,6 +618,8 @@ int dequeue_signal(struct task_struct *t - { - int signr; - -+ WARN_ON_ONCE(tsk != current); -+ - /* We only dequeue private signals from ourselves, we don't let - * signalfd steal them - */ -@@ -1485,7 +1545,8 @@ EXPORT_SYMBOL(kill_pid); - */ - struct sigqueue *sigqueue_alloc(void) - { -- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); -+ /* Preallocated sigqueue objects always from the slabcache ! */ -+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); - - if (q) - q->flags |= SIGQUEUE_PREALLOC; diff --git a/debian/patches/features/all/rt/skbufhead-raw-lock.patch b/debian/patches/features/all/rt/skbufhead-raw-lock.patch deleted file mode 100644 index a225c852e..000000000 --- a/debian/patches/features/all/rt/skbufhead-raw-lock.patch +++ /dev/null @@ -1,114 +0,0 @@ -From: Thomas Gleixner -Date: Tue, 12 Jul 2011 15:38:34 +0200 -Subject: net: Use skbufhead with raw lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use the rps lock as rawlock so we can keep irq-off regions. It looks low -latency. However we can't kfree() from this context therefore we defer this -to the softirq and use the tofree_queue list for it (similar to process_queue). - -Signed-off-by: Thomas Gleixner ---- - include/linux/netdevice.h | 1 + - include/linux/skbuff.h | 7 +++++++ - net/core/dev.c | 19 +++++++++++++------ - 3 files changed, 21 insertions(+), 6 deletions(-) - ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -2546,6 +2546,7 @@ struct softnet_data { - unsigned int dropped; - struct sk_buff_head input_pkt_queue; - struct napi_struct backlog; -+ struct sk_buff_head tofree_queue; - - }; - ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -203,6 +203,7 @@ struct sk_buff_head { - - __u32 qlen; - spinlock_t lock; -+ raw_spinlock_t raw_lock; - }; - - struct sk_buff; -@@ -1463,6 +1464,12 @@ static inline void skb_queue_head_init(s - __skb_queue_head_init(list); - } - -+static inline void skb_queue_head_init_raw(struct sk_buff_head *list) -+{ -+ raw_spin_lock_init(&list->raw_lock); -+ __skb_queue_head_init(list); -+} -+ - static inline void skb_queue_head_init_class(struct sk_buff_head *list, - struct lock_class_key *class) - { ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -207,14 +207,14 @@ static inline struct hlist_head *dev_ind - static inline void rps_lock(struct softnet_data *sd) - { - #ifdef CONFIG_RPS -- spin_lock(&sd->input_pkt_queue.lock); -+ raw_spin_lock(&sd->input_pkt_queue.raw_lock); - #endif - } - - static inline void rps_unlock(struct softnet_data *sd) - { - #ifdef CONFIG_RPS -- spin_unlock(&sd->input_pkt_queue.lock); -+ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); - #endif - } - -@@ -4050,7 +4050,7 @@ static void flush_backlog(void *arg) - skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { - if (skb->dev == dev) { - __skb_unlink(skb, &sd->input_pkt_queue); -- kfree_skb(skb); -+ __skb_queue_tail(&sd->tofree_queue, skb); - input_queue_head_incr(sd); - } - } -@@ -4059,10 +4059,13 @@ static void flush_backlog(void *arg) - skb_queue_walk_safe(&sd->process_queue, skb, tmp) { - if (skb->dev == dev) { - __skb_unlink(skb, &sd->process_queue); -- kfree_skb(skb); -+ __skb_queue_tail(&sd->tofree_queue, skb); - input_queue_head_incr(sd); - } - } -+ -+ if (!skb_queue_empty(&sd->tofree_queue)) -+ raise_softirq_irqoff(NET_RX_SOFTIRQ); - } - - static int napi_gro_complete(struct sk_buff *skb) -@@ -7469,6 +7472,9 @@ static int dev_cpu_callback(struct notif - netif_rx_ni(skb); - input_queue_head_incr(oldsd); - } -+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { -+ kfree_skb(skb); -+ } - - return NOTIFY_OK; - } -@@ -7770,8 +7776,9 @@ static int __init net_dev_init(void) - for_each_possible_cpu(i) { - struct softnet_data *sd = &per_cpu(softnet_data, i); - -- skb_queue_head_init(&sd->input_pkt_queue); -- skb_queue_head_init(&sd->process_queue); -+ skb_queue_head_init_raw(&sd->input_pkt_queue); -+ skb_queue_head_init_raw(&sd->process_queue); -+ skb_queue_head_init_raw(&sd->tofree_queue); - INIT_LIST_HEAD(&sd->poll_list); - sd->output_queue_tailp = &sd->output_queue; - #ifdef CONFIG_RPS diff --git a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch b/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch deleted file mode 100644 index 1acfe0e24..000000000 --- a/debian/patches/features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch +++ /dev/null @@ -1,48 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 15 Apr 2015 19:00:47 +0200 -Subject: slub: Disable SLUB_CPU_PARTIAL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 -|in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7 -|1 lock held by rcuop/7/87: -| #0: (rcu_callback){......}, at: [] rcu_nocb_kthread+0x1ca/0x5d0 -|Preemption disabled at:[] put_cpu_partial+0x29/0x220 -| -|CPU: 0 PID: 87 Comm: rcuop/7 Tainted: G W 4.0.0-rt0+ #477 -|Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.7.5-20140531_083030-gandalf 04/01/2014 -| 000000000007a9fc ffff88013987baf8 ffffffff817441c7 0000000000000007 -| 0000000000000000 ffff88013987bb18 ffffffff810eee51 0000000000000000 -| ffff88013fc10200 ffff88013987bb48 ffffffff8174a1c4 000000000007a9fc -|Call Trace: -| [] dump_stack+0x4f/0x90 -| [] ___might_sleep+0x121/0x1b0 -| [] rt_spin_lock+0x24/0x60 -| [] __free_pages_ok+0xaa/0x540 -| [] __free_pages+0x1d/0x30 -| [] __free_slab+0xc5/0x1e0 -| [] free_delayed+0x56/0x70 -| [] put_cpu_partial+0x14d/0x220 -| [] __slab_free+0x158/0x2c0 -| [] kmem_cache_free+0x221/0x2d0 -| [] file_free_rcu+0x2c/0x40 -| [] rcu_nocb_kthread+0x243/0x5d0 -| [] kthread+0xfc/0x120 -| [] ret_from_fork+0x58/0x90 - -Signed-off-by: Sebastian Andrzej Siewior ---- - init/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1748,7 +1748,7 @@ endchoice - - config SLUB_CPU_PARTIAL - default y -- depends on SLUB && SMP -+ depends on SLUB && SMP && !PREEMPT_RT_FULL - bool "SLUB per cpu partial cache" - help - Per cpu partial caches accellerate objects allocation and freeing diff --git a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch deleted file mode 100644 index 6c288a0dd..000000000 --- a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch +++ /dev/null @@ -1,48 +0,0 @@ -Subject: slub: Enable irqs for __GFP_WAIT -From: Thomas Gleixner -Date: Wed, 09 Jan 2013 12:08:15 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -SYSTEM_RUNNING might be too late for enabling interrupts. Allocations -with GFP_WAIT can happen before that. So use this as an indicator. - -Signed-off-by: Thomas Gleixner ---- - mm/slub.c | 13 ++++++------- - 1 file changed, 6 insertions(+), 7 deletions(-) - ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -1405,14 +1405,17 @@ static struct page *allocate_slab(struct - gfp_t alloc_gfp; - void *start, *p; - int idx, order; -+ bool enableirqs = false; - - flags &= gfp_allowed_mask; - -+ if (gfpflags_allow_blocking(flags)) -+ enableirqs = true; - #ifdef CONFIG_PREEMPT_RT_FULL - if (system_state == SYSTEM_RUNNING) --#else -- if (gfpflags_allow_blocking(flags)) -+ enableirqs = true; - #endif -+ if (enableirqs) - local_irq_enable(); - - flags |= s->allocflags; -@@ -1483,11 +1486,7 @@ static struct page *allocate_slab(struct - page->frozen = 1; - - out: --#ifdef CONFIG_PREEMPT_RT_FULL -- if (system_state == SYSTEM_RUNNING) --#else -- if (gfpflags_allow_blocking(flags)) --#endif -+ if (enableirqs) - local_irq_disable(); - if (!page) - return NULL; diff --git a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch b/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch deleted file mode 100644 index 2a11c0621..000000000 --- a/debian/patches/features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch +++ /dev/null @@ -1,70 +0,0 @@ -From: Mike Galbraith -Date: Wed, 18 Feb 2015 15:09:23 +0100 -Subject: snd/pcm: fix snd_pcm_stream_lock*() irqs_disabled() splats -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Locking functions previously using read_lock_irq()/read_lock_irqsave() were -changed to local_irq_disable/save(), leading to gripes. Use nort variants. - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 -|in_atomic(): 0, irqs_disabled(): 1, pid: 5947, name: alsa-sink-ALC88 -|CPU: 5 PID: 5947 Comm: alsa-sink-ALC88 Not tainted 3.18.7-rt1 #9 -|Hardware name: MEDION MS-7848/MS-7848, BIOS M7848W08.404 11/06/2014 -| ffff880409316240 ffff88040866fa38 ffffffff815bdeb5 0000000000000002 -| 0000000000000000 ffff88040866fa58 ffffffff81073c86 ffffffffa03b2640 -| ffff88040239ec00 ffff88040866fa78 ffffffff815c3d34 ffffffffa03b2640 -|Call Trace: -| [] dump_stack+0x4f/0x9e -| [] __might_sleep+0xe6/0x150 -| [] __rt_spin_lock+0x24/0x50 -| [] rt_read_lock+0x34/0x40 -| [] snd_pcm_stream_lock+0x29/0x70 [snd_pcm] -| [] snd_pcm_playback_poll+0x5d/0x120 [snd_pcm] -| [] do_sys_poll+0x322/0x5b0 -| [] SyS_ppoll+0x1a8/0x1c0 -| [] system_call_fastpath+0x16/0x1b - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - sound/core/pcm_native.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/sound/core/pcm_native.c -+++ b/sound/core/pcm_native.c -@@ -123,7 +123,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock) - void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) - { - if (!substream->pcm->nonatomic) -- local_irq_disable(); -+ local_irq_disable_nort(); - snd_pcm_stream_lock(substream); - } - EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); -@@ -138,7 +138,7 @@ void snd_pcm_stream_unlock_irq(struct sn - { - snd_pcm_stream_unlock(substream); - if (!substream->pcm->nonatomic) -- local_irq_enable(); -+ local_irq_enable_nort(); - } - EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq); - -@@ -146,7 +146,7 @@ unsigned long _snd_pcm_stream_lock_irqsa - { - unsigned long flags = 0; - if (!substream->pcm->nonatomic) -- local_irq_save(flags); -+ local_irq_save_nort(flags); - snd_pcm_stream_lock(substream); - return flags; - } -@@ -164,7 +164,7 @@ void snd_pcm_stream_unlock_irqrestore(st - { - snd_pcm_stream_unlock(substream); - if (!substream->pcm->nonatomic) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); - diff --git a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch deleted file mode 100644 index 21b4055c4..000000000 --- a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch +++ /dev/null @@ -1,157 +0,0 @@ -Subject: softirq: Disable softirq stacks for RT -From: Thomas Gleixner -Date: Mon, 18 Jul 2011 13:59:17 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Disable extra stacks for softirqs. We want to preempt softirqs and -having them on special IRQ-stack does not make this easier. - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/kernel/irq.c | 2 ++ - arch/powerpc/kernel/misc_32.S | 2 ++ - arch/powerpc/kernel/misc_64.S | 2 ++ - arch/sh/kernel/irq.c | 2 ++ - arch/sparc/kernel/irq_64.c | 2 ++ - arch/x86/entry/entry_64.S | 2 ++ - arch/x86/kernel/irq_32.c | 2 ++ - include/linux/interrupt.h | 2 +- - 8 files changed, 15 insertions(+), 1 deletion(-) - ---- a/arch/powerpc/kernel/irq.c -+++ b/arch/powerpc/kernel/irq.c -@@ -614,6 +614,7 @@ void irq_ctx_init(void) - } - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curtp, *irqtp; -@@ -631,6 +632,7 @@ void do_softirq_own_stack(void) - if (irqtp->flags) - set_bits(irqtp->flags, &curtp->flags); - } -+#endif - - irq_hw_number_t virq_to_hw(unsigned int virq) - { ---- a/arch/powerpc/kernel/misc_32.S -+++ b/arch/powerpc/kernel/misc_32.S -@@ -40,6 +40,7 @@ - * We store the saved ksp_limit in the unused part - * of the STACK_FRAME_OVERHEAD - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - _GLOBAL(call_do_softirq) - mflr r0 - stw r0,4(r1) -@@ -56,6 +57,7 @@ - stw r10,THREAD+KSP_LIMIT(r2) - mtlr r0 - blr -+#endif - - /* - * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); ---- a/arch/powerpc/kernel/misc_64.S -+++ b/arch/powerpc/kernel/misc_64.S -@@ -30,6 +30,7 @@ - - .text - -+#ifndef CONFIG_PREEMPT_RT_FULL - _GLOBAL(call_do_softirq) - mflr r0 - std r0,16(r1) -@@ -40,6 +41,7 @@ - ld r0,16(r1) - mtlr r0 - blr -+#endif - - _GLOBAL(call_do_irq) - mflr r0 ---- a/arch/sh/kernel/irq.c -+++ b/arch/sh/kernel/irq.c -@@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu) - hardirq_ctx[cpu] = NULL; - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curctx; -@@ -174,6 +175,7 @@ void do_softirq_own_stack(void) - "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" - ); - } -+#endif - #else - static inline void handle_one_irq(unsigned int irq) - { ---- a/arch/sparc/kernel/irq_64.c -+++ b/arch/sparc/kernel/irq_64.c -@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, st - set_irq_regs(old_regs); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - void *orig_sp, *sp = softirq_stack[smp_processor_id()]; -@@ -868,6 +869,7 @@ void do_softirq_own_stack(void) - __asm__ __volatile__("mov %0, %%sp" - : : "r" (orig_sp)); - } -+#endif - - #ifdef CONFIG_HOTPLUG_CPU - void fixup_irqs(void) ---- a/arch/x86/entry/entry_64.S -+++ b/arch/x86/entry/entry_64.S -@@ -867,6 +867,7 @@ END(native_load_gs_index) - jmp 2b - .previous - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* Call softirq on interrupt stack. Interrupts are off. */ - ENTRY(do_softirq_own_stack) - pushq %rbp -@@ -879,6 +880,7 @@ ENTRY(do_softirq_own_stack) - decl PER_CPU_VAR(irq_count) - ret - END(do_softirq_own_stack) -+#endif - - #ifdef CONFIG_XEN - idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 ---- a/arch/x86/kernel/irq_32.c -+++ b/arch/x86/kernel/irq_32.c -@@ -128,6 +128,7 @@ void irq_ctx_init(int cpu) - cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curstk; -@@ -146,6 +147,7 @@ void do_softirq_own_stack(void) - - call_on_stack(__do_softirq, isp); - } -+#endif - - bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) - { ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -447,7 +447,7 @@ struct softirq_action - asmlinkage void do_softirq(void); - asmlinkage void __do_softirq(void); - --#ifdef __ARCH_HAS_DO_SOFTIRQ -+#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT_FULL) - void do_softirq_own_stack(void); - #else - static inline void do_softirq_own_stack(void) diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch deleted file mode 100644 index 70a3d73a5..000000000 --- a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch +++ /dev/null @@ -1,154 +0,0 @@ -Subject: softirq: Check preemption after reenabling interrupts -From: Thomas Gleixner -Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET) -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -raise_softirq_irqoff() disables interrupts and wakes the softirq -daemon, but after reenabling interrupts there is no preemption check, -so the execution of the softirq thread might be delayed arbitrarily. - -In principle we could add that check to local_irq_enable/restore, but -that's overkill as the rasie_softirq_irqoff() sections are the only -ones which show this behaviour. - -Reported-by: Carsten Emde -Signed-off-by: Thomas Gleixner - ---- - block/blk-iopoll.c | 3 +++ - block/blk-softirq.c | 3 +++ - include/linux/preempt.h | 3 +++ - net/core/dev.c | 7 +++++++ - 4 files changed, 16 insertions(+) - ---- a/block/blk-iopoll.c -+++ b/block/blk-iopoll.c -@@ -35,6 +35,7 @@ void blk_iopoll_sched(struct blk_iopoll - list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(blk_iopoll_sched); - -@@ -132,6 +133,7 @@ static void blk_iopoll_softirq(struct so - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - /** -@@ -201,6 +203,7 @@ static int blk_iopoll_cpu_notify(struct - this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - return NOTIFY_OK; ---- a/block/blk-softirq.c -+++ b/block/blk-softirq.c -@@ -51,6 +51,7 @@ static void trigger_softirq(void *data) - raise_softirq_irqoff(BLOCK_SOFTIRQ); - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - /* -@@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifie - this_cpu_ptr(&blk_cpu_done)); - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - return NOTIFY_OK; -@@ -150,6 +152,7 @@ void __blk_complete_request(struct reque - goto do_local; - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - /** ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -160,8 +160,10 @@ do { \ - - #ifdef CONFIG_PREEMPT_RT_BASE - # define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+# define preempt_check_resched_rt() preempt_check_resched() - #else - # define preempt_enable_no_resched() preempt_enable() -+# define preempt_check_resched_rt() barrier(); - #endif - - #define preemptible() (preempt_count() == 0 && !irqs_disabled()) -@@ -232,6 +234,7 @@ do { \ - #define preempt_disable_notrace() barrier() - #define preempt_enable_no_resched_notrace() barrier() - #define preempt_enable_notrace() barrier() -+#define preempt_check_resched_rt() barrier() - #define preemptible() 0 - - #endif /* CONFIG_PREEMPT_COUNT */ ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -2246,6 +2246,7 @@ static inline void __netif_reschedule(st - sd->output_queue_tailp = &q->next_sched; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - void __netif_schedule(struct Qdisc *q) -@@ -2327,6 +2328,7 @@ void __dev_kfree_skb_irq(struct sk_buff - __this_cpu_write(softnet_data.completion_queue, skb); - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(__dev_kfree_skb_irq); - -@@ -3524,6 +3526,7 @@ static int enqueue_to_backlog(struct sk_ - rps_unlock(sd); - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - - atomic_long_inc(&skb->dev->rx_dropped); - kfree_skb(skb); -@@ -4512,6 +4515,7 @@ static void net_rps_action_and_irq_enabl - sd->rps_ipi_list = NULL; - - local_irq_enable(); -+ preempt_check_resched_rt(); - - /* Send pending IPI's to kick RPS processing on remote cpus. */ - while (remsd) { -@@ -4525,6 +4529,7 @@ static void net_rps_action_and_irq_enabl - } else - #endif - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -4606,6 +4611,7 @@ void __napi_schedule(struct napi_struct - local_irq_save(flags); - ____napi_schedule(this_cpu_ptr(&softnet_data), n); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(__napi_schedule); - -@@ -7454,6 +7460,7 @@ static int dev_cpu_callback(struct notif - - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - - /* Process offline CPU's input_pkt_queue */ - while ((skb = __skb_dequeue(&oldsd->process_queue))) { diff --git a/debian/patches/features/all/rt/softirq-split-locks.patch b/debian/patches/features/all/rt/softirq-split-locks.patch deleted file mode 100644 index 3c38f4f01..000000000 --- a/debian/patches/features/all/rt/softirq-split-locks.patch +++ /dev/null @@ -1,820 +0,0 @@ -From: Thomas Gleixner -Date: Thu, 04 Oct 2012 14:20:47 +0100 -Subject: softirq: Split softirq locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The 3.x RT series removed the split softirq implementation in favour -of pushing softirq processing into the context of the thread which -raised it. Though this prevents us from handling the various softirqs -at different priorities. Now instead of reintroducing the split -softirq threads we split the locks which serialize the softirq -processing. - -If a softirq is raised in context of a thread, then the softirq is -noted on a per thread field, if the thread is in a bh disabled -region. If the softirq is raised from hard interrupt context, then the -bit is set in the flag field of ksoftirqd and ksoftirqd is invoked. -When a thread leaves a bh disabled region, then it tries to execute -the softirqs which have been raised in its own context. It acquires -the per softirq / per cpu lock for the softirq and then checks, -whether the softirq is still pending in the per cpu -local_softirq_pending() field. If yes, it runs the softirq. If no, -then some other task executed it already. This allows for zero config -softirq elevation in the context of user space tasks or interrupt -threads. - -Signed-off-by: Thomas Gleixner ---- - include/linux/bottom_half.h | 34 +++ - include/linux/interrupt.h | 15 + - include/linux/preempt.h | 15 + - include/linux/sched.h | 3 - init/main.c | 1 - kernel/softirq.c | 488 +++++++++++++++++++++++++++++++++++++------- - kernel/time/tick-sched.c | 9 - net/core/dev.c | 6 - 8 files changed, 477 insertions(+), 94 deletions(-) - ---- a/include/linux/bottom_half.h -+++ b/include/linux/bottom_half.h -@@ -3,6 +3,39 @@ - - #include - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+extern void __local_bh_disable(void); -+extern void _local_bh_enable(void); -+extern void __local_bh_enable(void); -+ -+static inline void local_bh_disable(void) -+{ -+ __local_bh_disable(); -+} -+ -+static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) -+{ -+ __local_bh_disable(); -+} -+ -+static inline void local_bh_enable(void) -+{ -+ __local_bh_enable(); -+} -+ -+static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) -+{ -+ __local_bh_enable(); -+} -+ -+static inline void local_bh_enable_ip(unsigned long ip) -+{ -+ __local_bh_enable(); -+} -+ -+#else -+ - #ifdef CONFIG_TRACE_IRQFLAGS - extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); - #else -@@ -30,5 +63,6 @@ static inline void local_bh_enable(void) - { - __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); - } -+#endif - - #endif /* _LINUX_BH_H */ ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -444,10 +444,11 @@ struct softirq_action - void (*action)(struct softirq_action *); - }; - -+#ifndef CONFIG_PREEMPT_RT_FULL - asmlinkage void do_softirq(void); - asmlinkage void __do_softirq(void); -- --#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT_FULL) -+static inline void thread_do_softirq(void) { do_softirq(); } -+#ifdef __ARCH_HAS_DO_SOFTIRQ - void do_softirq_own_stack(void); - #else - static inline void do_softirq_own_stack(void) -@@ -455,6 +456,9 @@ static inline void do_softirq_own_stack( - __do_softirq(); - } - #endif -+#else -+extern void thread_do_softirq(void); -+#endif - - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); - extern void softirq_init(void); -@@ -462,6 +466,7 @@ extern void __raise_softirq_irqoff(unsig - - extern void raise_softirq_irqoff(unsigned int nr); - extern void raise_softirq(unsigned int nr); -+extern void softirq_check_pending_idle(void); - - DECLARE_PER_CPU(struct task_struct *, ksoftirqd); - -@@ -619,6 +624,12 @@ void tasklet_hrtimer_cancel(struct taskl - tasklet_kill(&ttimer->tasklet); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void softirq_early_init(void); -+#else -+static inline void softirq_early_init(void) { } -+#endif -+ - /* - * Autoprobing for irqs: - * ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -50,7 +50,11 @@ - #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) - #define NMI_OFFSET (1UL << NMI_SHIFT) - --#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -+#else -+# define SOFTIRQ_DISABLE_OFFSET (0) -+#endif - - /* We use the MSB mostly because its available */ - #define PREEMPT_NEED_RESCHED 0x80000000 -@@ -59,9 +63,15 @@ - #include - - #define hardirq_count() (preempt_count() & HARDIRQ_MASK) --#define softirq_count() (preempt_count() & SOFTIRQ_MASK) - #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ - | NMI_MASK)) -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define softirq_count() (preempt_count() & SOFTIRQ_MASK) -+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) -+#else -+# define softirq_count() (0UL) -+extern int in_serving_softirq(void); -+#endif - - /* - * Are we doing bottom half or hardware interrupt processing? -@@ -72,7 +82,6 @@ - #define in_irq() (hardirq_count()) - #define in_softirq() (softirq_count()) - #define in_interrupt() (irq_count()) --#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) - - /* - * Are we in NMI context? ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1834,6 +1834,8 @@ struct task_struct { - #endif - #ifdef CONFIG_PREEMPT_RT_BASE - struct rcu_head put_rcu; -+ int softirq_nestcnt; -+ unsigned int softirqs_raised; - #endif - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; -@@ -2099,6 +2101,7 @@ extern void thread_group_cputime_adjuste - /* - * Per process flags - */ -+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ - #define PF_EXITING 0x00000004 /* getting shut down */ - #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ - #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ ---- a/init/main.c -+++ b/init/main.c -@@ -530,6 +530,7 @@ asmlinkage __visible void __init start_k - setup_command_line(command_line); - setup_nr_cpu_ids(); - setup_per_cpu_areas(); -+ softirq_early_init(); - smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ - - build_all_zonelists(NULL, NULL); ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include - #include - - #define CREATE_TRACE_POINTS -@@ -63,6 +64,98 @@ const char * const softirq_to_name[NR_SO - "TASKLET", "SCHED", "HRTIMER", "RCU" - }; - -+#ifdef CONFIG_NO_HZ_COMMON -+# ifdef CONFIG_PREEMPT_RT_FULL -+ -+struct softirq_runner { -+ struct task_struct *runner[NR_SOFTIRQS]; -+}; -+ -+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); -+ -+static inline void softirq_set_runner(unsigned int sirq) -+{ -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ -+ sr->runner[sirq] = current; -+} -+ -+static inline void softirq_clr_runner(unsigned int sirq) -+{ -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ -+ sr->runner[sirq] = NULL; -+} -+ -+/* -+ * On preempt-rt a softirq running context might be blocked on a -+ * lock. There might be no other runnable task on this CPU because the -+ * lock owner runs on some other CPU. So we have to go into idle with -+ * the pending bit set. Therefor we need to check this otherwise we -+ * warn about false positives which confuses users and defeats the -+ * whole purpose of this test. -+ * -+ * This code is called with interrupts disabled. -+ */ -+void softirq_check_pending_idle(void) -+{ -+ static int rate_limit; -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ u32 warnpending; -+ int i; -+ -+ if (rate_limit >= 10) -+ return; -+ -+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; -+ for (i = 0; i < NR_SOFTIRQS; i++) { -+ struct task_struct *tsk = sr->runner[i]; -+ -+ /* -+ * The wakeup code in rtmutex.c wakes up the task -+ * _before_ it sets pi_blocked_on to NULL under -+ * tsk->pi_lock. So we need to check for both: state -+ * and pi_blocked_on. -+ */ -+ if (tsk) { -+ raw_spin_lock(&tsk->pi_lock); -+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { -+ /* Clear all bits pending in that task */ -+ warnpending &= ~(tsk->softirqs_raised); -+ warnpending &= ~(1 << i); -+ } -+ raw_spin_unlock(&tsk->pi_lock); -+ } -+ } -+ -+ if (warnpending) { -+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", -+ warnpending); -+ rate_limit++; -+ } -+} -+# else -+/* -+ * On !PREEMPT_RT we just printk rate limited: -+ */ -+void softirq_check_pending_idle(void) -+{ -+ static int rate_limit; -+ -+ if (rate_limit < 10 && -+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { -+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", -+ local_softirq_pending()); -+ rate_limit++; -+ } -+} -+# endif -+ -+#else /* !CONFIG_NO_HZ_COMMON */ -+static inline void softirq_set_runner(unsigned int sirq) { } -+static inline void softirq_clr_runner(unsigned int sirq) { } -+#endif -+ - /* - * we cannot loop indefinitely here to avoid userspace starvation, - * but we also don't want to introduce a worst case 1/HZ latency -@@ -78,6 +171,68 @@ static void wakeup_softirqd(void) - wake_up_process(tsk); - } - -+static void handle_softirq(unsigned int vec_nr) -+{ -+ struct softirq_action *h = softirq_vec + vec_nr; -+ int prev_count; -+ -+ prev_count = preempt_count(); -+ -+ kstat_incr_softirqs_this_cpu(vec_nr); -+ -+ trace_softirq_entry(vec_nr); -+ h->action(h); -+ trace_softirq_exit(vec_nr); -+ if (unlikely(prev_count != preempt_count())) { -+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", -+ vec_nr, softirq_to_name[vec_nr], h->action, -+ prev_count, preempt_count()); -+ preempt_count_set(prev_count); -+ } -+} -+ -+#ifndef CONFIG_PREEMPT_RT_FULL -+static inline int ksoftirqd_softirq_pending(void) -+{ -+ return local_softirq_pending(); -+} -+ -+static void handle_pending_softirqs(u32 pending) -+{ -+ struct softirq_action *h = softirq_vec; -+ int softirq_bit; -+ -+ local_irq_enable(); -+ -+ h = softirq_vec; -+ -+ while ((softirq_bit = ffs(pending))) { -+ unsigned int vec_nr; -+ -+ h += softirq_bit - 1; -+ vec_nr = h - softirq_vec; -+ handle_softirq(vec_nr); -+ -+ h++; -+ pending >>= softirq_bit; -+ } -+ -+ rcu_bh_qs(); -+ local_irq_disable(); -+} -+ -+static void run_ksoftirqd(unsigned int cpu) -+{ -+ local_irq_disable(); -+ if (ksoftirqd_softirq_pending()) { -+ __do_softirq(); -+ local_irq_enable(); -+ cond_resched_rcu_qs(); -+ return; -+ } -+ local_irq_enable(); -+} -+ - /* - * preempt_count and SOFTIRQ_OFFSET usage: - * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving -@@ -233,10 +388,8 @@ asmlinkage __visible void __do_softirq(v - unsigned long end = jiffies + MAX_SOFTIRQ_TIME; - unsigned long old_flags = current->flags; - int max_restart = MAX_SOFTIRQ_RESTART; -- struct softirq_action *h; - bool in_hardirq; - __u32 pending; -- int softirq_bit; - - /* - * Mask out PF_MEMALLOC s current task context is borrowed for the -@@ -255,36 +408,7 @@ asmlinkage __visible void __do_softirq(v - /* Reset the pending bitmask before enabling irqs */ - set_softirq_pending(0); - -- local_irq_enable(); -- -- h = softirq_vec; -- -- while ((softirq_bit = ffs(pending))) { -- unsigned int vec_nr; -- int prev_count; -- -- h += softirq_bit - 1; -- -- vec_nr = h - softirq_vec; -- prev_count = preempt_count(); -- -- kstat_incr_softirqs_this_cpu(vec_nr); -- -- trace_softirq_entry(vec_nr); -- h->action(h); -- trace_softirq_exit(vec_nr); -- if (unlikely(prev_count != preempt_count())) { -- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", -- vec_nr, softirq_to_name[vec_nr], h->action, -- prev_count, preempt_count()); -- preempt_count_set(prev_count); -- } -- h++; -- pending >>= softirq_bit; -- } -- -- rcu_bh_qs(); -- local_irq_disable(); -+ handle_pending_softirqs(pending); - - pending = local_softirq_pending(); - if (pending) { -@@ -321,6 +445,246 @@ asmlinkage __visible void do_softirq(voi - } - - /* -+ * This function must run with irqs disabled! -+ */ -+void raise_softirq_irqoff(unsigned int nr) -+{ -+ __raise_softirq_irqoff(nr); -+ -+ /* -+ * If we're in an interrupt or softirq, we're done -+ * (this also catches softirq-disabled code). We will -+ * actually run the softirq once we return from -+ * the irq or softirq. -+ * -+ * Otherwise we wake up ksoftirqd to make sure we -+ * schedule the softirq soon. -+ */ -+ if (!in_interrupt()) -+ wakeup_softirqd(); -+} -+ -+void __raise_softirq_irqoff(unsigned int nr) -+{ -+ trace_softirq_raise(nr); -+ or_softirq_pending(1UL << nr); -+} -+ -+static inline void local_bh_disable_nort(void) { local_bh_disable(); } -+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } -+static void ksoftirqd_set_sched_params(unsigned int cpu) { } -+static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } -+ -+#else /* !PREEMPT_RT_FULL */ -+ -+/* -+ * On RT we serialize softirq execution with a cpu local lock per softirq -+ */ -+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); -+ -+void __init softirq_early_init(void) -+{ -+ int i; -+ -+ for (i = 0; i < NR_SOFTIRQS; i++) -+ local_irq_lock_init(local_softirq_locks[i]); -+} -+ -+static void lock_softirq(int which) -+{ -+ local_lock(local_softirq_locks[which]); -+} -+ -+static void unlock_softirq(int which) -+{ -+ local_unlock(local_softirq_locks[which]); -+} -+ -+static void do_single_softirq(int which) -+{ -+ unsigned long old_flags = current->flags; -+ -+ current->flags &= ~PF_MEMALLOC; -+ vtime_account_irq_enter(current); -+ current->flags |= PF_IN_SOFTIRQ; -+ lockdep_softirq_enter(); -+ local_irq_enable(); -+ handle_softirq(which); -+ local_irq_disable(); -+ lockdep_softirq_exit(); -+ current->flags &= ~PF_IN_SOFTIRQ; -+ vtime_account_irq_enter(current); -+ tsk_restore_flags(current, old_flags, PF_MEMALLOC); -+} -+ -+/* -+ * Called with interrupts disabled. Process softirqs which were raised -+ * in current context (or on behalf of ksoftirqd). -+ */ -+static void do_current_softirqs(void) -+{ -+ while (current->softirqs_raised) { -+ int i = __ffs(current->softirqs_raised); -+ unsigned int pending, mask = (1U << i); -+ -+ current->softirqs_raised &= ~mask; -+ local_irq_enable(); -+ -+ /* -+ * If the lock is contended, we boost the owner to -+ * process the softirq or leave the critical section -+ * now. -+ */ -+ lock_softirq(i); -+ local_irq_disable(); -+ softirq_set_runner(i); -+ /* -+ * Check with the local_softirq_pending() bits, -+ * whether we need to process this still or if someone -+ * else took care of it. -+ */ -+ pending = local_softirq_pending(); -+ if (pending & mask) { -+ set_softirq_pending(pending & ~mask); -+ do_single_softirq(i); -+ } -+ softirq_clr_runner(i); -+ unlock_softirq(i); -+ WARN_ON(current->softirq_nestcnt != 1); -+ } -+} -+ -+void __local_bh_disable(void) -+{ -+ if (++current->softirq_nestcnt == 1) -+ migrate_disable(); -+} -+EXPORT_SYMBOL(__local_bh_disable); -+ -+void __local_bh_enable(void) -+{ -+ if (WARN_ON(current->softirq_nestcnt == 0)) -+ return; -+ -+ local_irq_disable(); -+ if (current->softirq_nestcnt == 1 && current->softirqs_raised) -+ do_current_softirqs(); -+ local_irq_enable(); -+ -+ if (--current->softirq_nestcnt == 0) -+ migrate_enable(); -+} -+EXPORT_SYMBOL(__local_bh_enable); -+ -+int in_serving_softirq(void) -+{ -+ return current->flags & PF_IN_SOFTIRQ; -+} -+EXPORT_SYMBOL(in_serving_softirq); -+ -+/* Called with preemption disabled */ -+static void run_ksoftirqd(unsigned int cpu) -+{ -+ local_irq_disable(); -+ current->softirq_nestcnt++; -+ -+ do_current_softirqs(); -+ current->softirq_nestcnt--; -+ local_irq_enable(); -+ cond_resched_rcu_qs(); -+} -+ -+/* -+ * Called from netif_rx_ni(). Preemption enabled, but migration -+ * disabled. So the cpu can't go away under us. -+ */ -+void thread_do_softirq(void) -+{ -+ if (!in_serving_softirq() && current->softirqs_raised) { -+ current->softirq_nestcnt++; -+ do_current_softirqs(); -+ current->softirq_nestcnt--; -+ } -+} -+ -+static void do_raise_softirq_irqoff(unsigned int nr) -+{ -+ trace_softirq_raise(nr); -+ or_softirq_pending(1UL << nr); -+ -+ /* -+ * If we are not in a hard interrupt and inside a bh disabled -+ * region, we simply raise the flag on current. local_bh_enable() -+ * will make sure that the softirq is executed. Otherwise we -+ * delegate it to ksoftirqd. -+ */ -+ if (!in_irq() && current->softirq_nestcnt) -+ current->softirqs_raised |= (1U << nr); -+ else if (__this_cpu_read(ksoftirqd)) -+ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); -+} -+ -+void __raise_softirq_irqoff(unsigned int nr) -+{ -+ do_raise_softirq_irqoff(nr); -+ if (!in_irq() && !current->softirq_nestcnt) -+ wakeup_softirqd(); -+} -+ -+/* -+ * This function must run with irqs disabled! -+ */ -+void raise_softirq_irqoff(unsigned int nr) -+{ -+ do_raise_softirq_irqoff(nr); -+ -+ /* -+ * If we're in an hard interrupt we let irq return code deal -+ * with the wakeup of ksoftirqd. -+ */ -+ if (in_irq()) -+ return; -+ /* -+ * If we are in thread context but outside of a bh disabled -+ * region, we need to wake ksoftirqd as well. -+ * -+ * CHECKME: Some of the places which do that could be wrapped -+ * into local_bh_disable/enable pairs. Though it's unclear -+ * whether this is worth the effort. To find those places just -+ * raise a WARN() if the condition is met. -+ */ -+ if (!current->softirq_nestcnt) -+ wakeup_softirqd(); -+} -+ -+static inline int ksoftirqd_softirq_pending(void) -+{ -+ return current->softirqs_raised; -+} -+ -+static inline void local_bh_disable_nort(void) { } -+static inline void _local_bh_enable_nort(void) { } -+ -+static inline void ksoftirqd_set_sched_params(unsigned int cpu) -+{ -+ struct sched_param param = { .sched_priority = 1 }; -+ -+ sched_setscheduler(current, SCHED_FIFO, ¶m); -+ /* Take over all pending softirqs when starting */ -+ local_irq_disable(); -+ current->softirqs_raised = local_softirq_pending(); -+ local_irq_enable(); -+} -+ -+static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) -+{ -+ struct sched_param param = { .sched_priority = 0 }; -+ -+ sched_setscheduler(current, SCHED_NORMAL, ¶m); -+} -+ -+#endif /* PREEMPT_RT_FULL */ -+/* - * Enter an interrupt context. - */ - void irq_enter(void) -@@ -331,9 +695,9 @@ void irq_enter(void) - * Prevent raise_softirq from needlessly waking up ksoftirqd - * here, as softirq will be serviced on return from interrupt. - */ -- local_bh_disable(); -+ local_bh_disable_nort(); - tick_irq_enter(); -- _local_bh_enable(); -+ _local_bh_enable_nort(); - } - - __irq_enter(); -@@ -341,6 +705,7 @@ void irq_enter(void) - - static inline void invoke_softirq(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - if (!force_irqthreads) { - #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK - /* -@@ -360,6 +725,15 @@ static inline void invoke_softirq(void) - } else { - wakeup_softirqd(); - } -+#else /* PREEMPT_RT_FULL */ -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ if (__this_cpu_read(ksoftirqd) && -+ __this_cpu_read(ksoftirqd)->softirqs_raised) -+ wakeup_softirqd(); -+ local_irq_restore(flags); -+#endif - } - - static inline void tick_irq_exit(void) -@@ -396,26 +770,6 @@ void irq_exit(void) - trace_hardirq_exit(); /* must be last! */ - } - --/* -- * This function must run with irqs disabled! -- */ --inline void raise_softirq_irqoff(unsigned int nr) --{ -- __raise_softirq_irqoff(nr); -- -- /* -- * If we're in an interrupt or softirq, we're done -- * (this also catches softirq-disabled code). We will -- * actually run the softirq once we return from -- * the irq or softirq. -- * -- * Otherwise we wake up ksoftirqd to make sure we -- * schedule the softirq soon. -- */ -- if (!in_interrupt()) -- wakeup_softirqd(); --} -- - void raise_softirq(unsigned int nr) - { - unsigned long flags; -@@ -425,12 +779,6 @@ void raise_softirq(unsigned int nr) - local_irq_restore(flags); - } - --void __raise_softirq_irqoff(unsigned int nr) --{ -- trace_softirq_raise(nr); -- or_softirq_pending(1UL << nr); --} -- - void open_softirq(int nr, void (*action)(struct softirq_action *)) - { - softirq_vec[nr].action = action; -@@ -733,23 +1081,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait); - - static int ksoftirqd_should_run(unsigned int cpu) - { -- return local_softirq_pending(); --} -- --static void run_ksoftirqd(unsigned int cpu) --{ -- local_irq_disable(); -- if (local_softirq_pending()) { -- /* -- * We can safely run softirq on inline stack, as we are not deep -- * in the task stack here. -- */ -- __do_softirq(); -- local_irq_enable(); -- cond_resched_rcu_qs(); -- return; -- } -- local_irq_enable(); -+ return ksoftirqd_softirq_pending(); - } - - #ifdef CONFIG_HOTPLUG_CPU -@@ -831,6 +1163,8 @@ static struct notifier_block cpu_nfb = { - - static struct smp_hotplug_thread softirq_threads = { - .store = &ksoftirqd, -+ .setup = ksoftirqd_set_sched_params, -+ .cleanup = ksoftirqd_clr_sched_params, - .thread_should_run = ksoftirqd_should_run, - .thread_fn = run_ksoftirqd, - .thread_comm = "ksoftirqd/%u", ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -758,14 +758,7 @@ static bool can_stop_idle_tick(int cpu, - return false; - - if (unlikely(local_softirq_pending() && cpu_online(cpu))) { -- static int ratelimit; -- -- if (ratelimit < 10 && -- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { -- pr_warn("NOHZ: local_softirq_pending %02x\n", -- (unsigned int) local_softirq_pending()); -- ratelimit++; -- } -+ softirq_check_pending_idle(); - return false; - } - ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -3595,11 +3595,9 @@ int netif_rx_ni(struct sk_buff *skb) - - trace_netif_rx_ni_entry(skb); - -- preempt_disable(); -+ local_bh_disable(); - err = netif_rx_internal(skb); -- if (local_softirq_pending()) -- do_softirq(); -- preempt_enable(); -+ local_bh_enable(); - - return err; - } diff --git a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch deleted file mode 100644 index 38bb4547e..000000000 --- a/debian/patches/features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch +++ /dev/null @@ -1,208 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 20 Jan 2016 16:34:17 +0100 -Subject: softirq: split timer softirqs out of ksoftirqd -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with -timer wakeup which can not happen in hardirq context. The prio has been -risen from the normal SCHED_OTHER so the timer wakeup does not happen -too late. -With enough networking load it is possible that the system never goes -idle and schedules ksoftirqd and everything else with a higher priority. -One of the tasks left behind is one of RCU's threads and so we see stalls -and eventually run out of memory. -This patch moves the TIMER and HRTIMER softirqs out of the `ksoftirqd` -thread into its own `ktimersoftd`. The former can now run SCHED_OTHER -(same as mainline) and the latter at SCHED_FIFO due to the wakeups. - -From networking point of view: The NAPI callback runs after the network -interrupt thread completes. If its run time takes too long the NAPI code -itself schedules the `ksoftirqd`. Here in the thread it can run at -SCHED_OTHER priority and it won't defer RCU anymore. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/softirq.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++-------- - 1 file changed, 74 insertions(+), 11 deletions(-) - ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -58,6 +58,10 @@ EXPORT_SYMBOL(irq_stat); - static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; - - DEFINE_PER_CPU(struct task_struct *, ksoftirqd); -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) -+DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); -+#endif - - const char * const softirq_to_name[NR_SOFTIRQS] = { - "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", -@@ -171,6 +175,17 @@ static void wakeup_softirqd(void) - wake_up_process(tsk); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void wakeup_timer_softirqd(void) -+{ -+ /* Interrupts are disabled: no need to stop preemption */ -+ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); -+ -+ if (tsk && tsk->state != TASK_RUNNING) -+ wake_up_process(tsk); -+} -+#endif -+ - static void handle_softirq(unsigned int vec_nr) - { - struct softirq_action *h = softirq_vec + vec_nr; -@@ -473,7 +488,6 @@ void __raise_softirq_irqoff(unsigned int - static inline void local_bh_disable_nort(void) { local_bh_disable(); } - static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } - static void ksoftirqd_set_sched_params(unsigned int cpu) { } --static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } - - #else /* !PREEMPT_RT_FULL */ - -@@ -618,8 +632,12 @@ void thread_do_softirq(void) - - static void do_raise_softirq_irqoff(unsigned int nr) - { -+ unsigned int mask; -+ -+ mask = 1UL << nr; -+ - trace_softirq_raise(nr); -- or_softirq_pending(1UL << nr); -+ or_softirq_pending(mask); - - /* - * If we are not in a hard interrupt and inside a bh disabled -@@ -628,16 +646,30 @@ static void do_raise_softirq_irqoff(unsi - * delegate it to ksoftirqd. - */ - if (!in_irq() && current->softirq_nestcnt) -- current->softirqs_raised |= (1U << nr); -- else if (__this_cpu_read(ksoftirqd)) -- __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); -+ current->softirqs_raised |= mask; -+ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) -+ return; -+ -+ if (mask & TIMER_SOFTIRQS) -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; -+ else -+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; -+} -+ -+static void wakeup_proper_softirq(unsigned int nr) -+{ -+ if ((1UL << nr) & TIMER_SOFTIRQS) -+ wakeup_timer_softirqd(); -+ else -+ wakeup_softirqd(); - } - -+ - void __raise_softirq_irqoff(unsigned int nr) - { - do_raise_softirq_irqoff(nr); - if (!in_irq() && !current->softirq_nestcnt) -- wakeup_softirqd(); -+ wakeup_proper_softirq(nr); - } - - /* -@@ -663,7 +695,7 @@ void raise_softirq_irqoff(unsigned int n - * raise a WARN() if the condition is met. - */ - if (!current->softirq_nestcnt) -- wakeup_softirqd(); -+ wakeup_proper_softirq(nr); - } - - static inline int ksoftirqd_softirq_pending(void) -@@ -676,22 +708,37 @@ static inline void _local_bh_enable_nort - - static inline void ksoftirqd_set_sched_params(unsigned int cpu) - { -+ /* Take over all but timer pending softirqs when starting */ -+ local_irq_disable(); -+ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; -+ local_irq_enable(); -+} -+ -+static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) -+{ - struct sched_param param = { .sched_priority = 1 }; - - sched_setscheduler(current, SCHED_FIFO, ¶m); -- /* Take over all pending softirqs when starting */ -+ -+ /* Take over timer pending softirqs when starting */ - local_irq_disable(); -- current->softirqs_raised = local_softirq_pending(); -+ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; - local_irq_enable(); - } - --static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) -+static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, -+ bool online) - { - struct sched_param param = { .sched_priority = 0 }; - - sched_setscheduler(current, SCHED_NORMAL, ¶m); - } - -+static int ktimer_softirqd_should_run(unsigned int cpu) -+{ -+ return current->softirqs_raised; -+} -+ - #endif /* PREEMPT_RT_FULL */ - /* - * Enter an interrupt context. -@@ -741,6 +788,9 @@ static inline void invoke_softirq(void) - if (__this_cpu_read(ksoftirqd) && - __this_cpu_read(ksoftirqd)->softirqs_raised) - wakeup_softirqd(); -+ if (__this_cpu_read(ktimer_softirqd) && -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised) -+ wakeup_timer_softirqd(); - local_irq_restore(flags); - #endif - } -@@ -1173,17 +1223,30 @@ static struct notifier_block cpu_nfb = { - static struct smp_hotplug_thread softirq_threads = { - .store = &ksoftirqd, - .setup = ksoftirqd_set_sched_params, -- .cleanup = ksoftirqd_clr_sched_params, - .thread_should_run = ksoftirqd_should_run, - .thread_fn = run_ksoftirqd, - .thread_comm = "ksoftirqd/%u", - }; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static struct smp_hotplug_thread softirq_timer_threads = { -+ .store = &ktimer_softirqd, -+ .setup = ktimer_softirqd_set_sched_params, -+ .cleanup = ktimer_softirqd_clr_sched_params, -+ .thread_should_run = ktimer_softirqd_should_run, -+ .thread_fn = run_ksoftirqd, -+ .thread_comm = "ktimersoftd/%u", -+}; -+#endif -+ - static __init int spawn_ksoftirqd(void) - { - register_cpu_notifier(&cpu_nfb); - - BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); -+#endif - - return 0; - } diff --git a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch deleted file mode 100644 index 9377108a5..000000000 --- a/debian/patches/features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: Allen Pais -Date: Fri, 13 Dec 2013 09:44:41 +0530 -Subject: sparc64: use generic rwsem spinlocks rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Signed-off-by: Allen Pais -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/sparc/Kconfig | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - ---- a/arch/sparc/Kconfig -+++ b/arch/sparc/Kconfig -@@ -189,12 +189,10 @@ config NR_CPUS - source kernel/Kconfig.hz - - config RWSEM_GENERIC_SPINLOCK -- bool -- default y if SPARC32 -+ def_bool PREEMPT_RT_FULL - - config RWSEM_XCHGADD_ALGORITHM -- bool -- default y if SPARC64 -+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL - - config GENERIC_HWEIGHT - bool diff --git a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch deleted file mode 100644 index 38f008222..000000000 --- a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch +++ /dev/null @@ -1,209 +0,0 @@ -Subject: spinlock: Split the lock types header -From: Thomas Gleixner -Date: Wed, 29 Jun 2011 19:34:01 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Split raw_spinlock into its own file and the remaining spinlock_t into -its own non-RT header. The non-RT header will be replaced later by sleeping -spinlocks. - -Signed-off-by: Thomas Gleixner ---- - include/linux/rwlock_types.h | 4 + - include/linux/spinlock_types.h | 74 ------------------------------------ - include/linux/spinlock_types_nort.h | 33 ++++++++++++++++ - include/linux/spinlock_types_raw.h | 56 +++++++++++++++++++++++++++ - 4 files changed, 95 insertions(+), 72 deletions(-) - ---- a/include/linux/rwlock_types.h -+++ b/include/linux/rwlock_types.h -@@ -1,6 +1,10 @@ - #ifndef __LINUX_RWLOCK_TYPES_H - #define __LINUX_RWLOCK_TYPES_H - -+#if !defined(__LINUX_SPINLOCK_TYPES_H) -+# error "Do not include directly, include spinlock_types.h" -+#endif -+ - /* - * include/linux/rwlock_types.h - generic rwlock type definitions - * and initializers ---- a/include/linux/spinlock_types.h -+++ b/include/linux/spinlock_types.h -@@ -9,79 +9,9 @@ - * Released under the General Public License (GPL). - */ - --#if defined(CONFIG_SMP) --# include --#else --# include --#endif -+#include - --#include -- --typedef struct raw_spinlock { -- arch_spinlock_t raw_lock; --#ifdef CONFIG_GENERIC_LOCKBREAK -- unsigned int break_lock; --#endif --#ifdef CONFIG_DEBUG_SPINLOCK -- unsigned int magic, owner_cpu; -- void *owner; --#endif --#ifdef CONFIG_DEBUG_LOCK_ALLOC -- struct lockdep_map dep_map; --#endif --} raw_spinlock_t; -- --#define SPINLOCK_MAGIC 0xdead4ead -- --#define SPINLOCK_OWNER_INIT ((void *)-1L) -- --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } --#else --# define SPIN_DEP_MAP_INIT(lockname) --#endif -- --#ifdef CONFIG_DEBUG_SPINLOCK --# define SPIN_DEBUG_INIT(lockname) \ -- .magic = SPINLOCK_MAGIC, \ -- .owner_cpu = -1, \ -- .owner = SPINLOCK_OWNER_INIT, --#else --# define SPIN_DEBUG_INIT(lockname) --#endif -- --#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ -- { \ -- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ -- SPIN_DEBUG_INIT(lockname) \ -- SPIN_DEP_MAP_INIT(lockname) } -- --#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ -- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) -- --#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) -- --typedef struct spinlock { -- union { -- struct raw_spinlock rlock; -- --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) -- struct { -- u8 __padding[LOCK_PADSIZE]; -- struct lockdep_map dep_map; -- }; --#endif -- }; --} spinlock_t; -- --#define __SPIN_LOCK_INITIALIZER(lockname) \ -- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } -- --#define __SPIN_LOCK_UNLOCKED(lockname) \ -- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) -- --#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -+#include - - #include - ---- /dev/null -+++ b/include/linux/spinlock_types_nort.h -@@ -0,0 +1,33 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H -+#define __LINUX_SPINLOCK_TYPES_NORT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+/* -+ * The non RT version maps spinlocks to raw_spinlocks -+ */ -+typedef struct spinlock { -+ union { -+ struct raw_spinlock rlock; -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) -+ struct { -+ u8 __padding[LOCK_PADSIZE]; -+ struct lockdep_map dep_map; -+ }; -+#endif -+ }; -+} spinlock_t; -+ -+#define __SPIN_LOCK_INITIALIZER(lockname) \ -+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } -+ -+#define __SPIN_LOCK_UNLOCKED(lockname) \ -+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) -+ -+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -+ -+#endif ---- /dev/null -+++ b/include/linux/spinlock_types_raw.h -@@ -0,0 +1,56 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H -+#define __LINUX_SPINLOCK_TYPES_RAW_H -+ -+#if defined(CONFIG_SMP) -+# include -+#else -+# include -+#endif -+ -+#include -+ -+typedef struct raw_spinlock { -+ arch_spinlock_t raw_lock; -+#ifdef CONFIG_GENERIC_LOCKBREAK -+ unsigned int break_lock; -+#endif -+#ifdef CONFIG_DEBUG_SPINLOCK -+ unsigned int magic, owner_cpu; -+ void *owner; -+#endif -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} raw_spinlock_t; -+ -+#define SPINLOCK_MAGIC 0xdead4ead -+ -+#define SPINLOCK_OWNER_INIT ((void *)-1L) -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -+#else -+# define SPIN_DEP_MAP_INIT(lockname) -+#endif -+ -+#ifdef CONFIG_DEBUG_SPINLOCK -+# define SPIN_DEBUG_INIT(lockname) \ -+ .magic = SPINLOCK_MAGIC, \ -+ .owner_cpu = -1, \ -+ .owner = SPINLOCK_OWNER_INIT, -+#else -+# define SPIN_DEBUG_INIT(lockname) -+#endif -+ -+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ -+ { \ -+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ -+ SPIN_DEBUG_INIT(lockname) \ -+ SPIN_DEP_MAP_INIT(lockname) } -+ -+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ -+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) -+ -+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) -+ -+#endif diff --git a/debian/patches/features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch b/debian/patches/features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch deleted file mode 100644 index 03813930e..000000000 --- a/debian/patches/features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch +++ /dev/null @@ -1,87 +0,0 @@ -From: Mike Galbraith -Date: Fri, 2 May 2014 13:13:22 +0200 -Subject: stomp-machine: create lg_global_trylock_relax() primitive -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Create lg_global_trylock_relax() for use by stopper thread when it cannot -schedule, to deal with stop_cpus_lock, which is now an lglock. - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/lglock.h | 6 ++++++ - include/linux/spinlock_rt.h | 1 + - kernel/locking/lglock.c | 25 +++++++++++++++++++++++++ - kernel/locking/rtmutex.c | 5 +++++ - 4 files changed, 37 insertions(+) - ---- a/include/linux/lglock.h -+++ b/include/linux/lglock.h -@@ -82,6 +82,12 @@ void lg_double_unlock(struct lglock *lg, - void lg_global_lock(struct lglock *lg); - void lg_global_unlock(struct lglock *lg); - -+#ifndef CONFIG_PREEMPT_RT_FULL -+#define lg_global_trylock_relax(name) lg_global_lock(name) -+#else -+void lg_global_trylock_relax(struct lglock *lg); -+#endif -+ - #else - /* When !CONFIG_SMP, map lglock to spinlock */ - #define lglock spinlock ---- a/include/linux/spinlock_rt.h -+++ b/include/linux/spinlock_rt.h -@@ -34,6 +34,7 @@ extern int atomic_dec_and_spin_lock(atom - */ - extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); - extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); -+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock); - - #define spin_lock(lock) \ - do { \ ---- a/kernel/locking/lglock.c -+++ b/kernel/locking/lglock.c -@@ -127,3 +127,28 @@ void lg_global_unlock(struct lglock *lg) - preempt_enable_nort(); - } - EXPORT_SYMBOL(lg_global_unlock); -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * HACK: If you use this, you get to keep the pieces. -+ * Used in queue_stop_cpus_work() when stop machinery -+ * is called from inactive CPU, so we can't schedule. -+ */ -+# define lg_do_trylock_relax(l) \ -+ do { \ -+ while (!__rt_spin_trylock(l)) \ -+ cpu_relax(); \ -+ } while (0) -+ -+void lg_global_trylock_relax(struct lglock *lg) -+{ -+ int i; -+ -+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); -+ for_each_possible_cpu(i) { -+ lg_lock_ptr *lock; -+ lock = per_cpu_ptr(lg->lock, i); -+ lg_do_trylock_relax(lock); -+ } -+} -+#endif ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1153,6 +1153,11 @@ void __lockfunc rt_spin_unlock_wait(spin - } - EXPORT_SYMBOL(rt_spin_unlock_wait); - -+int __lockfunc __rt_spin_trylock(struct rt_mutex *lock) -+{ -+ return rt_mutex_trylock(lock); -+} -+ - int __lockfunc rt_spin_trylock(spinlock_t *lock) - { - int ret = rt_mutex_trylock(&lock->lock); diff --git a/debian/patches/features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch b/debian/patches/features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch deleted file mode 100644 index 8f23be9fb..000000000 --- a/debian/patches/features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch +++ /dev/null @@ -1,96 +0,0 @@ -From: Mike Galbraith -Date: Fri, 2 May 2014 13:13:34 +0200 -Subject: stomp-machine: use lg_global_trylock_relax() to dead with stop_cpus_lock lglock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -If the stop machinery is called from inactive CPU we cannot use -lg_global_lock(), because some other stomp machine invocation might be -in progress and the lock can be contended. We cannot schedule from this -context, so use the lovely new lg_global_trylock_relax() primitive to -do what we used to do via one mutex_trylock()/cpu_relax() loop. We -now do that trylock()/relax() across an entire herd of locks. Joy. - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/stop_machine.c | 25 +++++++++++++++---------- - 1 file changed, 15 insertions(+), 10 deletions(-) - ---- a/kernel/stop_machine.c -+++ b/kernel/stop_machine.c -@@ -276,7 +276,7 @@ int stop_two_cpus(unsigned int cpu1, uns - struct cpu_stop_work work1, work2; - struct multi_stop_data msdata; - -- preempt_disable(); -+ preempt_disable_nort(); - msdata = (struct multi_stop_data){ - .fn = fn, - .data = arg, -@@ -296,11 +296,11 @@ int stop_two_cpus(unsigned int cpu1, uns - if (cpu1 > cpu2) - swap(cpu1, cpu2); - if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) { -- preempt_enable(); -+ preempt_enable_nort(); - return -ENOENT; - } - -- preempt_enable(); -+ preempt_enable_nort(); - - wait_for_stop_done(&done); - -@@ -333,17 +333,20 @@ static DEFINE_MUTEX(stop_cpus_mutex); - - static void queue_stop_cpus_work(const struct cpumask *cpumask, - cpu_stop_fn_t fn, void *arg, -- struct cpu_stop_done *done) -+ struct cpu_stop_done *done, bool inactive) - { - struct cpu_stop_work *work; - unsigned int cpu; - - /* -- * Disable preemption while queueing to avoid getting -- * preempted by a stopper which might wait for other stoppers -- * to enter @fn which can lead to deadlock. -+ * Make sure that all work is queued on all cpus before -+ * any of the cpus can execute it. - */ -- lg_global_lock(&stop_cpus_lock); -+ if (!inactive) -+ lg_global_lock(&stop_cpus_lock); -+ else -+ lg_global_trylock_relax(&stop_cpus_lock); -+ - for_each_cpu(cpu, cpumask) { - work = &per_cpu(cpu_stopper.stop_work, cpu); - work->fn = fn; -@@ -360,7 +363,7 @@ static int __stop_cpus(const struct cpum - struct cpu_stop_done done; - - cpu_stop_init_done(&done, cpumask_weight(cpumask)); -- queue_stop_cpus_work(cpumask, fn, arg, &done); -+ queue_stop_cpus_work(cpumask, fn, arg, &done, false); - wait_for_stop_done(&done); - return done.executed ? done.ret : -ENOENT; - } -@@ -558,6 +561,8 @@ static int __init cpu_stop_init(void) - INIT_LIST_HEAD(&stopper->works); - } - -+ lg_lock_init(&stop_cpus_lock, "stop_cpus_lock"); -+ - BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); - stop_machine_unpark(raw_smp_processor_id()); - stop_machine_initialized = true; -@@ -654,7 +659,7 @@ int stop_machine_from_inactive_cpu(cpu_s - set_state(&msdata, MULTI_STOP_PREPARE); - cpu_stop_init_done(&done, num_active_cpus()); - queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, -- &done); -+ &done, true); - ret = multi_cpu_stop(&msdata); - - /* Busy wait for completion. */ diff --git a/debian/patches/features/all/rt/stop-machine-raw-lock.patch b/debian/patches/features/all/rt/stop-machine-raw-lock.patch deleted file mode 100644 index fc2cc3b80..000000000 --- a/debian/patches/features/all/rt/stop-machine-raw-lock.patch +++ /dev/null @@ -1,193 +0,0 @@ -Subject: stop_machine: Use raw spinlocks -From: Thomas Gleixner -Date: Wed, 29 Jun 2011 11:01:51 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use raw-locks in stomp_machine() to allow locking in irq-off regions. - -Signed-off-by: Thomas Gleixner ---- - kernel/stop_machine.c | 64 ++++++++++++++++++++++++++++++++++---------------- - 1 file changed, 44 insertions(+), 20 deletions(-) - ---- a/kernel/stop_machine.c -+++ b/kernel/stop_machine.c -@@ -30,14 +30,14 @@ struct cpu_stop_done { - atomic_t nr_todo; /* nr left to execute */ - bool executed; /* actually executed? */ - int ret; /* collected return value */ -- struct completion completion; /* fired if nr_todo reaches 0 */ -+ struct task_struct *waiter; /* woken when nr_todo reaches 0 */ - }; - - /* the actual stopper, one per every possible cpu, enabled on online cpus */ - struct cpu_stopper { - struct task_struct *thread; - -- spinlock_t lock; -+ raw_spinlock_t lock; - bool enabled; /* is this stopper enabled? */ - struct list_head works; /* list of pending works */ - -@@ -59,7 +59,7 @@ static void cpu_stop_init_done(struct cp - { - memset(done, 0, sizeof(*done)); - atomic_set(&done->nr_todo, nr_todo); -- init_completion(&done->completion); -+ done->waiter = current; - } - - /* signal completion unless @done is NULL */ -@@ -68,8 +68,10 @@ static void cpu_stop_signal_done(struct - if (done) { - if (executed) - done->executed = true; -- if (atomic_dec_and_test(&done->nr_todo)) -- complete(&done->completion); -+ if (atomic_dec_and_test(&done->nr_todo)) { -+ wake_up_process(done->waiter); -+ done->waiter = NULL; -+ } - } - } - -@@ -86,12 +88,28 @@ static void cpu_stop_queue_work(unsigned - struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - unsigned long flags; - -- spin_lock_irqsave(&stopper->lock, flags); -+ raw_spin_lock_irqsave(&stopper->lock, flags); - if (stopper->enabled) - __cpu_stop_queue_work(stopper, work); - else - cpu_stop_signal_done(work->done, false); -- spin_unlock_irqrestore(&stopper->lock, flags); -+ raw_spin_unlock_irqrestore(&stopper->lock, flags); -+} -+ -+static void wait_for_stop_done(struct cpu_stop_done *done) -+{ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ while (atomic_read(&done->nr_todo)) { -+ schedule(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+ /* -+ * We need to wait until cpu_stop_signal_done() has cleared -+ * done->waiter. -+ */ -+ while (done->waiter) -+ cpu_relax(); -+ set_current_state(TASK_RUNNING); - } - - /** -@@ -125,7 +143,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s - - cpu_stop_init_done(&done, 1); - cpu_stop_queue_work(cpu, &work); -- wait_for_completion(&done.completion); -+ wait_for_stop_done(&done); - return done.executed ? done.ret : -ENOENT; - } - -@@ -224,8 +242,8 @@ static int cpu_stop_queue_two_works(int - int err; - - lg_double_lock(&stop_cpus_lock, cpu1, cpu2); -- spin_lock_irq(&stopper1->lock); -- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); -+ raw_spin_lock_irq(&stopper1->lock); -+ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); - - err = -ENOENT; - if (!stopper1->enabled || !stopper2->enabled) -@@ -235,8 +253,8 @@ static int cpu_stop_queue_two_works(int - __cpu_stop_queue_work(stopper1, work1); - __cpu_stop_queue_work(stopper2, work2); - unlock: -- spin_unlock(&stopper2->lock); -- spin_unlock_irq(&stopper1->lock); -+ raw_spin_unlock(&stopper2->lock); -+ raw_spin_unlock_irq(&stopper1->lock); - lg_double_unlock(&stop_cpus_lock, cpu1, cpu2); - - return err; -@@ -284,7 +302,7 @@ int stop_two_cpus(unsigned int cpu1, uns - - preempt_enable(); - -- wait_for_completion(&done.completion); -+ wait_for_stop_done(&done); - - return done.executed ? done.ret : -ENOENT; - } -@@ -343,7 +361,7 @@ static int __stop_cpus(const struct cpum - - cpu_stop_init_done(&done, cpumask_weight(cpumask)); - queue_stop_cpus_work(cpumask, fn, arg, &done); -- wait_for_completion(&done.completion); -+ wait_for_stop_done(&done); - return done.executed ? done.ret : -ENOENT; - } - -@@ -422,9 +440,9 @@ static int cpu_stop_should_run(unsigned - unsigned long flags; - int run; - -- spin_lock_irqsave(&stopper->lock, flags); -+ raw_spin_lock_irqsave(&stopper->lock, flags); - run = !list_empty(&stopper->works); -- spin_unlock_irqrestore(&stopper->lock, flags); -+ raw_spin_unlock_irqrestore(&stopper->lock, flags); - return run; - } - -@@ -436,13 +454,13 @@ static void cpu_stopper_thread(unsigned - - repeat: - work = NULL; -- spin_lock_irq(&stopper->lock); -+ raw_spin_lock_irq(&stopper->lock); - if (!list_empty(&stopper->works)) { - work = list_first_entry(&stopper->works, - struct cpu_stop_work, list); - list_del_init(&work->list); - } -- spin_unlock_irq(&stopper->lock); -+ raw_spin_unlock_irq(&stopper->lock); - - if (work) { - cpu_stop_fn_t fn = work->fn; -@@ -474,7 +492,13 @@ static void cpu_stopper_thread(unsigned - kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, - ksym_buf), arg); - -+ /* -+ * Make sure that the wakeup and setting done->waiter -+ * to NULL is atomic. -+ */ -+ local_irq_disable(); - cpu_stop_signal_done(done, true); -+ local_irq_enable(); - goto repeat; - } - } -@@ -530,7 +554,7 @@ static int __init cpu_stop_init(void) - for_each_possible_cpu(cpu) { - struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - -- spin_lock_init(&stopper->lock); -+ raw_spin_lock_init(&stopper->lock); - INIT_LIST_HEAD(&stopper->works); - } - -@@ -634,7 +658,7 @@ int stop_machine_from_inactive_cpu(cpu_s - ret = multi_cpu_stop(&msdata); - - /* Busy wait for completion. */ -- while (!completion_done(&done.completion)) -+ while (atomic_read(&done.nr_todo)) - cpu_relax(); - - mutex_unlock(&stop_cpus_mutex); diff --git a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch deleted file mode 100644 index 654adfe43..000000000 --- a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch +++ /dev/null @@ -1,35 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:30:27 -0500 -Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Instead of playing with non-preemption, introduce explicit -startup serialization. This is more robust and cleaner as -well. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner -[bigeasy: XXX: stopper_lock -> stop_cpus_lock] ---- - kernel/stop_machine.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - ---- a/kernel/stop_machine.c -+++ b/kernel/stop_machine.c -@@ -450,6 +450,16 @@ static void cpu_stopper_thread(unsigned - struct cpu_stop_done *done = work->done; - char ksym_buf[KSYM_NAME_LEN] __maybe_unused; - -+ /* -+ * Wait until the stopper finished scheduling on all -+ * cpus -+ */ -+ lg_global_lock(&stop_cpus_lock); -+ /* -+ * Let other cpu threads continue as well -+ */ -+ lg_global_unlock(&stop_cpus_lock); -+ - /* cpu stop callbacks are not allowed to sleep */ - preempt_disable(); - diff --git a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch deleted file mode 100644 index 2aa7f5a5c..000000000 --- a/debian/patches/features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch +++ /dev/null @@ -1,63 +0,0 @@ -From: Mike Galbraith -Date: Wed, 18 Feb 2015 16:05:28 +0100 -Subject: sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 -|in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd -|Preemption disabled at:[] svc_xprt_received+0x4b/0xc0 [sunrpc] -|CPU: 6 PID: 3194 Comm: rpc.nfsd Not tainted 3.18.7-rt1 #9 -|Hardware name: MEDION MS-7848/MS-7848, BIOS M7848W08.404 11/06/2014 -| ffff880409630000 ffff8800d9a33c78 ffffffff815bdeb5 0000000000000002 -| 0000000000000000 ffff8800d9a33c98 ffffffff81073c86 ffff880408dd6008 -| ffff880408dd6000 ffff8800d9a33cb8 ffffffff815c3d84 ffff88040b3ac000 -|Call Trace: -| [] dump_stack+0x4f/0x9e -| [] __might_sleep+0xe6/0x150 -| [] rt_spin_lock+0x24/0x50 -| [] svc_xprt_do_enqueue+0x80/0x230 [sunrpc] -| [] svc_xprt_received+0x4b/0xc0 [sunrpc] -| [] svc_add_new_perm_xprt+0x6d/0x80 [sunrpc] -| [] svc_addsock+0x143/0x200 [sunrpc] -| [] write_ports+0x28c/0x340 [nfsd] -| [] nfsctl_transaction_write+0x4c/0x80 [nfsd] -| [] vfs_write+0xb3/0x1d0 -| [] SyS_write+0x49/0xb0 -| [] system_call_fastpath+0x16/0x1b - - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - net/sunrpc/svc_xprt.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/net/sunrpc/svc_xprt.c -+++ b/net/sunrpc/svc_xprt.c -@@ -340,7 +340,7 @@ void svc_xprt_do_enqueue(struct svc_xprt - goto out; - } - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - pool = svc_pool_for_cpu(xprt->xpt_server, cpu); - - atomic_long_inc(&pool->sp_stats.packets); -@@ -376,7 +376,7 @@ void svc_xprt_do_enqueue(struct svc_xprt - - atomic_long_inc(&pool->sp_stats.threads_woken); - wake_up_process(rqstp->rq_task); -- put_cpu(); -+ put_cpu_light(); - goto out; - } - rcu_read_unlock(); -@@ -397,7 +397,7 @@ void svc_xprt_do_enqueue(struct svc_xprt - goto redo_search; - } - rqstp = NULL; -- put_cpu(); -+ put_cpu_light(); - out: - trace_svc_xprt_do_enqueue(xprt, rqstp); - } diff --git a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch deleted file mode 100644 index d252a8a24..000000000 --- a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch +++ /dev/null @@ -1,107 +0,0 @@ -From: Thomas Gleixner -Date: Thu, 15 Jul 2010 10:29:00 +0200 -Subject: suspend: Prevent might sleep splats -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -timekeeping suspend/resume calls read_persistant_clock() which takes -rtc_lock. That results in might sleep warnings because at that point -we run with interrupts disabled. - -We cannot convert rtc_lock to a raw spinlock as that would trigger -other might sleep warnings. - -As a temporary workaround we disable the might sleep warnings by -setting system_state to SYSTEM_SUSPEND before calling sysdev_suspend() -and restoring it to SYSTEM_RUNNING afer sysdev_resume(). - -Needs to be revisited. - -Signed-off-by: Thomas Gleixner - ---- - include/linux/kernel.h | 1 + - kernel/power/hibernate.c | 7 +++++++ - kernel/power/suspend.c | 4 ++++ - 3 files changed, 12 insertions(+) - ---- a/include/linux/kernel.h -+++ b/include/linux/kernel.h -@@ -473,6 +473,7 @@ extern enum system_states { - SYSTEM_HALT, - SYSTEM_POWER_OFF, - SYSTEM_RESTART, -+ SYSTEM_SUSPEND, - } system_state; - - #define TAINT_PROPRIETARY_MODULE 0 ---- a/kernel/power/hibernate.c -+++ b/kernel/power/hibernate.c -@@ -285,6 +285,8 @@ static int create_image(int platform_mod - - local_irq_disable(); - -+ system_state = SYSTEM_SUSPEND; -+ - error = syscore_suspend(); - if (error) { - printk(KERN_ERR "PM: Some system devices failed to power down, " -@@ -314,6 +316,7 @@ static int create_image(int platform_mod - syscore_resume(); - - Enable_irqs: -+ system_state = SYSTEM_RUNNING; - local_irq_enable(); - - Enable_cpus: -@@ -437,6 +440,7 @@ static int resume_target_kernel(bool pla - goto Enable_cpus; - - local_irq_disable(); -+ system_state = SYSTEM_SUSPEND; - - error = syscore_suspend(); - if (error) -@@ -470,6 +474,7 @@ static int resume_target_kernel(bool pla - syscore_resume(); - - Enable_irqs: -+ system_state = SYSTEM_RUNNING; - local_irq_enable(); - - Enable_cpus: -@@ -555,6 +560,7 @@ int hibernation_platform_enter(void) - goto Enable_cpus; - - local_irq_disable(); -+ system_state = SYSTEM_SUSPEND; - syscore_suspend(); - if (pm_wakeup_pending()) { - error = -EAGAIN; -@@ -567,6 +573,7 @@ int hibernation_platform_enter(void) - - Power_up: - syscore_resume(); -+ system_state = SYSTEM_RUNNING; - local_irq_enable(); - - Enable_cpus: ---- a/kernel/power/suspend.c -+++ b/kernel/power/suspend.c -@@ -359,6 +359,8 @@ static int suspend_enter(suspend_state_t - arch_suspend_disable_irqs(); - BUG_ON(!irqs_disabled()); - -+ system_state = SYSTEM_SUSPEND; -+ - error = syscore_suspend(); - if (!error) { - *wakeup = pm_wakeup_pending(); -@@ -375,6 +377,8 @@ static int suspend_enter(suspend_state_t - syscore_resume(); - } - -+ system_state = SYSTEM_RUNNING; -+ - arch_suspend_enable_irqs(); - BUG_ON(irqs_disabled()); - diff --git a/debian/patches/features/all/rt/sysfs-realtime-entry.patch b/debian/patches/features/all/rt/sysfs-realtime-entry.patch deleted file mode 100644 index 7ea523acf..000000000 --- a/debian/patches/features/all/rt/sysfs-realtime-entry.patch +++ /dev/null @@ -1,48 +0,0 @@ -Subject: sysfs: Add /sys/kernel/realtime entry -From: Clark Williams -Date: Sat Jul 30 21:55:53 2011 -0500 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Add a /sys/kernel entry to indicate that the kernel is a -realtime kernel. - -Clark says that he needs this for udev rules, udev needs to evaluate -if its a PREEMPT_RT kernel a few thousand times and parsing uname -output is too slow or so. - -Are there better solutions? Should it exist and return 0 on !-rt? - -Signed-off-by: Clark Williams -Signed-off-by: Peter Zijlstra ---- - kernel/ksysfs.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - ---- a/kernel/ksysfs.c -+++ b/kernel/ksysfs.c -@@ -136,6 +136,15 @@ KERNEL_ATTR_RO(vmcoreinfo); - - #endif /* CONFIG_KEXEC_CORE */ - -+#if defined(CONFIG_PREEMPT_RT_FULL) -+static ssize_t realtime_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%d\n", 1); -+} -+KERNEL_ATTR_RO(realtime); -+#endif -+ - /* whether file capabilities are enabled */ - static ssize_t fscaps_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -@@ -203,6 +212,9 @@ static struct attribute * kernel_attrs[] - &vmcoreinfo_attr.attr, - #endif - &rcu_expedited_attr.attr, -+#ifdef CONFIG_PREEMPT_RT_FULL -+ &realtime_attr.attr, -+#endif - NULL - }; - diff --git a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch deleted file mode 100644 index baf147ddb..000000000 --- a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch +++ /dev/null @@ -1,392 +0,0 @@ -Subject: tasklet: Prevent tasklets from going into infinite spin in RT -From: Ingo Molnar -Date: Tue Nov 29 20:18:22 2011 -0500 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, -and spinlocks turn are mutexes. But this can cause issues with -tasks disabling tasklets. A tasklet runs under ksoftirqd, and -if a tasklets are disabled with tasklet_disable(), the tasklet -count is increased. When a tasklet runs, it checks this counter -and if it is set, it adds itself back on the softirq queue and -returns. - -The problem arises in RT because ksoftirq will see that a softirq -is ready to run (the tasklet softirq just re-armed itself), and will -not sleep, but instead run the softirqs again. The tasklet softirq -will still see that the count is non-zero and will not execute -the tasklet and requeue itself on the softirq again, which will -cause ksoftirqd to run it again and again and again. - -It gets worse because ksoftirqd runs as a real-time thread. -If it preempted the task that disabled tasklets, and that task -has migration disabled, or can't run for other reasons, the tasklet -softirq will never run because the count will never be zero, and -ksoftirqd will go into an infinite loop. As an RT task, it this -becomes a big problem. - -This is a hack solution to have tasklet_disable stop tasklets, and -when a tasklet runs, instead of requeueing the tasklet softirqd -it delays it. When tasklet_enable() is called, and tasklets are -waiting, then the tasklet_enable() will kick the tasklets to continue. -This prevents the lock up from ksoftirq going into an infinite loop. - -[ rostedt@goodmis.org: ported to 3.0-rt ] - -Signed-off-by: Ingo Molnar -Signed-off-by: Steven Rostedt -Signed-off-by: Thomas Gleixner - ---- - include/linux/interrupt.h | 33 ++++--- - kernel/softirq.c | 201 ++++++++++++++++++++++++++++++++-------------- - 2 files changed, 162 insertions(+), 72 deletions(-) - ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -483,8 +483,9 @@ static inline struct task_struct *this_c - to be executed on some cpu at least once after this. - * If the tasklet is already scheduled, but its execution is still not - started, it will be executed only once. -- * If this tasklet is already running on another CPU (or schedule is called -- from tasklet itself), it is rescheduled for later. -+ * If this tasklet is already running on another CPU, it is rescheduled -+ for later. -+ * Schedule must not be called from the tasklet itself (a lockup occurs) - * Tasklet is strictly serialized wrt itself, but not - wrt another tasklets. If client needs some intertask synchronization, - he makes it with spinlocks. -@@ -509,27 +510,36 @@ struct tasklet_struct name = { NULL, 0, - enum - { - TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ -- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ -+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ -+ TASKLET_STATE_PENDING /* Tasklet is pending */ - }; - --#ifdef CONFIG_SMP -+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) -+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) -+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) -+ -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - static inline int tasklet_trylock(struct tasklet_struct *t) - { - return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); - } - -+static inline int tasklet_tryunlock(struct tasklet_struct *t) -+{ -+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; -+} -+ - static inline void tasklet_unlock(struct tasklet_struct *t) - { - smp_mb__before_atomic(); - clear_bit(TASKLET_STATE_RUN, &(t)->state); - } - --static inline void tasklet_unlock_wait(struct tasklet_struct *t) --{ -- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } --} -+extern void tasklet_unlock_wait(struct tasklet_struct *t); -+ - #else - #define tasklet_trylock(t) 1 -+#define tasklet_tryunlock(t) 1 - #define tasklet_unlock_wait(t) do { } while (0) - #define tasklet_unlock(t) do { } while (0) - #endif -@@ -578,12 +588,7 @@ static inline void tasklet_disable(struc - smp_mb(); - } - --static inline void tasklet_enable(struct tasklet_struct *t) --{ -- smp_mb__before_atomic(); -- atomic_dec(&t->count); --} -- -+extern void tasklet_enable(struct tasklet_struct *t); - extern void tasklet_kill(struct tasklet_struct *t); - extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); - extern void tasklet_init(struct tasklet_struct *t, ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -446,15 +447,45 @@ struct tasklet_head { - static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); - static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); - -+static void inline -+__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) -+{ -+ if (tasklet_trylock(t)) { -+again: -+ /* We may have been preempted before tasklet_trylock -+ * and __tasklet_action may have already run. -+ * So double check the sched bit while the takslet -+ * is locked before adding it to the list. -+ */ -+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { -+ t->next = NULL; -+ *head->tail = t; -+ head->tail = &(t->next); -+ raise_softirq_irqoff(nr); -+ tasklet_unlock(t); -+ } else { -+ /* This is subtle. If we hit the corner case above -+ * It is possible that we get preempted right here, -+ * and another task has successfully called -+ * tasklet_schedule(), then this function, and -+ * failed on the trylock. Thus we must be sure -+ * before releasing the tasklet lock, that the -+ * SCHED_BIT is clear. Otherwise the tasklet -+ * may get its SCHED_BIT set, but not added to the -+ * list -+ */ -+ if (!tasklet_tryunlock(t)) -+ goto again; -+ } -+ } -+} -+ - void __tasklet_schedule(struct tasklet_struct *t) - { - unsigned long flags; - - local_irq_save(flags); -- t->next = NULL; -- *__this_cpu_read(tasklet_vec.tail) = t; -- __this_cpu_write(tasklet_vec.tail, &(t->next)); -- raise_softirq_irqoff(TASKLET_SOFTIRQ); -+ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); - local_irq_restore(flags); - } - EXPORT_SYMBOL(__tasklet_schedule); -@@ -464,10 +495,7 @@ void __tasklet_hi_schedule(struct taskle - unsigned long flags; - - local_irq_save(flags); -- t->next = NULL; -- *__this_cpu_read(tasklet_hi_vec.tail) = t; -- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); -- raise_softirq_irqoff(HI_SOFTIRQ); -+ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); - local_irq_restore(flags); - } - EXPORT_SYMBOL(__tasklet_hi_schedule); -@@ -476,82 +504,122 @@ void __tasklet_hi_schedule_first(struct - { - BUG_ON(!irqs_disabled()); - -- t->next = __this_cpu_read(tasklet_hi_vec.head); -- __this_cpu_write(tasklet_hi_vec.head, t); -- __raise_softirq_irqoff(HI_SOFTIRQ); -+ __tasklet_hi_schedule(t); - } - EXPORT_SYMBOL(__tasklet_hi_schedule_first); - --static void tasklet_action(struct softirq_action *a) -+void tasklet_enable(struct tasklet_struct *t) - { -- struct tasklet_struct *list; -+ if (!atomic_dec_and_test(&t->count)) -+ return; -+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) -+ tasklet_schedule(t); -+} -+EXPORT_SYMBOL(tasklet_enable); - -- local_irq_disable(); -- list = __this_cpu_read(tasklet_vec.head); -- __this_cpu_write(tasklet_vec.head, NULL); -- __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); -- local_irq_enable(); -+static void __tasklet_action(struct softirq_action *a, -+ struct tasklet_struct *list) -+{ -+ int loops = 1000000; - - while (list) { - struct tasklet_struct *t = list; - - list = list->next; - -- if (tasklet_trylock(t)) { -- if (!atomic_read(&t->count)) { -- if (!test_and_clear_bit(TASKLET_STATE_SCHED, -- &t->state)) -- BUG(); -- t->func(t->data); -- tasklet_unlock(t); -- continue; -- } -- tasklet_unlock(t); -+ /* -+ * Should always succeed - after a tasklist got on the -+ * list (after getting the SCHED bit set from 0 to 1), -+ * nothing but the tasklet softirq it got queued to can -+ * lock it: -+ */ -+ if (!tasklet_trylock(t)) { -+ WARN_ON(1); -+ continue; - } - -- local_irq_disable(); - t->next = NULL; -- *__this_cpu_read(tasklet_vec.tail) = t; -- __this_cpu_write(tasklet_vec.tail, &(t->next)); -- __raise_softirq_irqoff(TASKLET_SOFTIRQ); -- local_irq_enable(); -+ -+ /* -+ * If we cannot handle the tasklet because it's disabled, -+ * mark it as pending. tasklet_enable() will later -+ * re-schedule the tasklet. -+ */ -+ if (unlikely(atomic_read(&t->count))) { -+out_disabled: -+ /* implicit unlock: */ -+ wmb(); -+ t->state = TASKLET_STATEF_PENDING; -+ continue; -+ } -+ -+ /* -+ * After this point on the tasklet might be rescheduled -+ * on another CPU, but it can only be added to another -+ * CPU's tasklet list if we unlock the tasklet (which we -+ * dont do yet). -+ */ -+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) -+ WARN_ON(1); -+ -+again: -+ t->func(t->data); -+ -+ /* -+ * Try to unlock the tasklet. We must use cmpxchg, because -+ * another CPU might have scheduled or disabled the tasklet. -+ * We only allow the STATE_RUN -> 0 transition here. -+ */ -+ while (!tasklet_tryunlock(t)) { -+ /* -+ * If it got disabled meanwhile, bail out: -+ */ -+ if (atomic_read(&t->count)) -+ goto out_disabled; -+ /* -+ * If it got scheduled meanwhile, re-execute -+ * the tasklet function: -+ */ -+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) -+ goto again; -+ if (!--loops) { -+ printk("hm, tasklet state: %08lx\n", t->state); -+ WARN_ON(1); -+ tasklet_unlock(t); -+ break; -+ } -+ } - } - } - -+static void tasklet_action(struct softirq_action *a) -+{ -+ struct tasklet_struct *list; -+ -+ local_irq_disable(); -+ -+ list = __this_cpu_read(tasklet_vec.head); -+ __this_cpu_write(tasklet_vec.head, NULL); -+ __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); -+ -+ local_irq_enable(); -+ -+ __tasklet_action(a, list); -+} -+ - static void tasklet_hi_action(struct softirq_action *a) - { - struct tasklet_struct *list; - - local_irq_disable(); -+ - list = __this_cpu_read(tasklet_hi_vec.head); - __this_cpu_write(tasklet_hi_vec.head, NULL); - __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); -- local_irq_enable(); -- -- while (list) { -- struct tasklet_struct *t = list; - -- list = list->next; -- -- if (tasklet_trylock(t)) { -- if (!atomic_read(&t->count)) { -- if (!test_and_clear_bit(TASKLET_STATE_SCHED, -- &t->state)) -- BUG(); -- t->func(t->data); -- tasklet_unlock(t); -- continue; -- } -- tasklet_unlock(t); -- } -+ local_irq_enable(); - -- local_irq_disable(); -- t->next = NULL; -- *__this_cpu_read(tasklet_hi_vec.tail) = t; -- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); -- __raise_softirq_irqoff(HI_SOFTIRQ); -- local_irq_enable(); -- } -+ __tasklet_action(a, list); - } - - void tasklet_init(struct tasklet_struct *t, -@@ -572,7 +640,7 @@ void tasklet_kill(struct tasklet_struct - - while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { - do { -- yield(); -+ msleep(1); - } while (test_bit(TASKLET_STATE_SCHED, &t->state)); - } - tasklet_unlock_wait(t); -@@ -646,6 +714,23 @@ void __init softirq_init(void) - open_softirq(HI_SOFTIRQ, tasklet_hi_action); - } - -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) -+void tasklet_unlock_wait(struct tasklet_struct *t) -+{ -+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { -+ /* -+ * Hack for now to avoid this busy-loop: -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ msleep(1); -+#else -+ barrier(); -+#endif -+ } -+} -+EXPORT_SYMBOL(tasklet_unlock_wait); -+#endif -+ - static int ksoftirqd_should_run(unsigned int cpu) - { - return local_softirq_pending(); diff --git a/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch b/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch deleted file mode 100644 index a4ef8cb8e..000000000 --- a/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch +++ /dev/null @@ -1,56 +0,0 @@ -Subject: rwlocks: Fix section mismatch -From: John Kacur -Date: Mon, 19 Sep 2011 11:09:27 +0200 (CEST) -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -This fixes the following build error for the preempt-rt kernel. - -make kernel/fork.o - CC kernel/fork.o -kernel/fork.c:90: error: section of tasklist_lock conflicts with previous declaration -make[2]: *** [kernel/fork.o] Error 1 -make[1]: *** [kernel/fork.o] Error 2 - -The rt kernel cache aligns the RWLOCK in DEFINE_RWLOCK by default. -The non-rt kernels explicitly cache align only the tasklist_lock in -kernel/fork.c -That can create a build conflict. This fixes the build problem by making the -non-rt kernels cache align RWLOCKs by default. The side effect is that -the other RWLOCKs are also cache aligned for non-rt. - -This is a short term solution for rt only. -The longer term solution would be to push the cache aligned DEFINE_RWLOCK -to mainline. If there are objections, then we could create a -DEFINE_RWLOCK_CACHE_ALIGNED or something of that nature. - -Signed-off-by: John Kacur -Cc: Peter Zijlstra -Link: http://lkml.kernel.org/r/alpine.LFD.2.00.1109191104010.23118@localhost6.localdomain6 -Signed-off-by: Thomas Gleixner ---- - include/linux/rwlock_types.h | 3 ++- - kernel/fork.c | 2 +- - 2 files changed, 3 insertions(+), 2 deletions(-) - ---- a/include/linux/rwlock_types.h -+++ b/include/linux/rwlock_types.h -@@ -47,6 +47,7 @@ typedef struct { - RW_DEP_MAP_INIT(lockname) } - #endif - --#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) -+#define DEFINE_RWLOCK(name) \ -+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) - - #endif /* __LINUX_RWLOCK_TYPES_H */ ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -108,7 +108,7 @@ int max_threads; /* tunable limit on nr - - DEFINE_PER_CPU(unsigned long, process_counts) = 0; - --__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ -+DEFINE_RWLOCK(tasklist_lock); /* outer */ - - #ifdef CONFIG_PROVE_RCU - int lockdep_tasklist_lock_is_held(void) diff --git a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch deleted file mode 100644 index 0fbd24668..000000000 --- a/debian/patches/features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch +++ /dev/null @@ -1,133 +0,0 @@ -From: Daniel Wagner -Date: Tue, 17 Feb 2015 09:37:44 +0100 -Subject: thermal: Defer thermal wakups to threads -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will -call schedule while we run in irq context. - -[] dump_stack+0x4e/0x8f -[] __schedule_bug+0xa6/0xb4 -[] __schedule+0x5b4/0x700 -[] schedule+0x2a/0x90 -[] rt_spin_lock_slowlock+0xe5/0x2d0 -[] rt_spin_lock+0x25/0x30 -[] pkg_temp_thermal_platform_thermal_notify+0x45/0x134 [x86_pkg_temp_thermal] -[] ? therm_throt_process+0x1b/0x160 -[] intel_thermal_interrupt+0x211/0x250 -[] smp_thermal_interrupt+0x21/0x40 -[] thermal_interrupt+0x6d/0x80 - -Let's defer the work to a kthread. - -Signed-off-by: Daniel Wagner -[bigeasy: reoder init/denit position. TODO: flush swork on exit] -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/thermal/x86_pkg_temp_thermal.c | 50 +++++++++++++++++++++++++++++++-- - 1 file changed, 47 insertions(+), 3 deletions(-) - ---- a/drivers/thermal/x86_pkg_temp_thermal.c -+++ b/drivers/thermal/x86_pkg_temp_thermal.c -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -352,7 +353,7 @@ static void pkg_temp_thermal_threshold_w - } - } - --static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+static void platform_thermal_notify_work(struct swork_event *event) - { - unsigned long flags; - int cpu = smp_processor_id(); -@@ -369,7 +370,7 @@ static int pkg_temp_thermal_platform_the - pkg_work_scheduled[phy_id]) { - disable_pkg_thres_interrupt(); - spin_unlock_irqrestore(&pkg_work_lock, flags); -- return -EINVAL; -+ return; - } - pkg_work_scheduled[phy_id] = 1; - spin_unlock_irqrestore(&pkg_work_lock, flags); -@@ -378,9 +379,48 @@ static int pkg_temp_thermal_platform_the - schedule_delayed_work_on(cpu, - &per_cpu(pkg_temp_thermal_threshold_work, cpu), - msecs_to_jiffies(notify_delay_ms)); -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static struct swork_event notify_work; -+ -+static int thermal_notify_work_init(void) -+{ -+ int err; -+ -+ err = swork_get(); -+ if (err) -+ return err; -+ -+ INIT_SWORK(¬ify_work, platform_thermal_notify_work); - return 0; - } - -+static void thermal_notify_work_cleanup(void) -+{ -+ swork_put(); -+} -+ -+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+{ -+ swork_queue(¬ify_work); -+ return 0; -+} -+ -+#else /* !CONFIG_PREEMPT_RT_FULL */ -+ -+static int thermal_notify_work_init(void) { return 0; } -+ -+static void thermal_notify_work_cleanup(void) { } -+ -+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+{ -+ platform_thermal_notify_work(NULL); -+ -+ return 0; -+} -+#endif /* CONFIG_PREEMPT_RT_FULL */ -+ - static int find_siblings_cpu(int cpu) - { - int i; -@@ -584,6 +624,9 @@ static int __init pkg_temp_thermal_init( - if (!x86_match_cpu(pkg_temp_thermal_ids)) - return -ENODEV; - -+ if (!thermal_notify_work_init()) -+ return -ENODEV; -+ - spin_lock_init(&pkg_work_lock); - platform_thermal_package_notify = - pkg_temp_thermal_platform_thermal_notify; -@@ -608,7 +651,7 @@ static int __init pkg_temp_thermal_init( - kfree(pkg_work_scheduled); - platform_thermal_package_notify = NULL; - platform_thermal_package_rate_control = NULL; -- -+ thermal_notify_work_cleanup(); - return -ENODEV; - } - -@@ -633,6 +676,7 @@ static void __exit pkg_temp_thermal_exit - mutex_unlock(&phy_dev_list_mutex); - platform_thermal_package_notify = NULL; - platform_thermal_package_rate_control = NULL; -+ thermal_notify_work_cleanup(); - for_each_online_cpu(i) - cancel_delayed_work_sync( - &per_cpu(pkg_temp_thermal_threshold_work, i)); diff --git a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch deleted file mode 100644 index 7aee576a4..000000000 --- a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch +++ /dev/null @@ -1,157 +0,0 @@ -Subject: timekeeping: Split jiffies seqlock -From: Thomas Gleixner -Date: Thu, 14 Feb 2013 22:36:59 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so -it can be taken in atomic context on RT. - -Signed-off-by: Thomas Gleixner ---- - kernel/time/jiffies.c | 7 ++++--- - kernel/time/tick-common.c | 10 ++++++---- - kernel/time/tick-sched.c | 19 ++++++++++++------- - kernel/time/timekeeping.c | 6 ++++-- - kernel/time/timekeeping.h | 3 ++- - 5 files changed, 28 insertions(+), 17 deletions(-) - ---- a/kernel/time/jiffies.c -+++ b/kernel/time/jiffies.c -@@ -74,7 +74,8 @@ static struct clocksource clocksource_ji - .max_cycles = 10, - }; - --__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); -+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); -+__cacheline_aligned_in_smp seqcount_t jiffies_seq; - - #if (BITS_PER_LONG < 64) - u64 get_jiffies_64(void) -@@ -83,9 +84,9 @@ u64 get_jiffies_64(void) - u64 ret; - - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - ret = jiffies_64; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - return ret; - } - EXPORT_SYMBOL(get_jiffies_64); ---- a/kernel/time/tick-common.c -+++ b/kernel/time/tick-common.c -@@ -79,13 +79,15 @@ int tick_is_oneshot_available(void) - static void tick_periodic(int cpu) - { - if (tick_do_timer_cpu == cpu) { -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - - /* Keep track of the next tick event */ - tick_next_period = ktime_add(tick_next_period, tick_period); - - do_timer(1); -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } - -@@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_ev - ktime_t next; - - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - next = tick_next_period; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - - clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); - ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(kti - return; - - /* Reevalute with jiffies_lock held */ -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - - delta = ktime_sub(now, last_jiffies_update); - if (delta.tv64 >= tick_period.tv64) { -@@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(kti - /* Keep the tick_next_period variable up to date */ - tick_next_period = ktime_add(last_jiffies_update, tick_period); - } else { -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - return; - } -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } - -@@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(vo - { - ktime_t period; - -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - /* Did we start the jiffies update yet ? */ - if (last_jiffies_update.tv64 == 0) - last_jiffies_update = tick_next_period; - period = last_jiffies_update; -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - return period; - } - -@@ -578,10 +583,10 @@ static ktime_t tick_nohz_stop_sched_tick - - /* Read jiffies and the time when jiffies were updated last */ - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - basemono = last_jiffies_update.tv64; - basejiff = jiffies; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - ts->last_jiffies = basejiff; - - if (rcu_needs_cpu(basemono, &next_rcu) || ---- a/kernel/time/timekeeping.c -+++ b/kernel/time/timekeeping.c -@@ -2048,8 +2048,10 @@ EXPORT_SYMBOL(hardpps); - */ - void xtime_update(unsigned long ticks) - { -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - do_timer(ticks); -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } ---- a/kernel/time/timekeeping.h -+++ b/kernel/time/timekeeping.h -@@ -19,7 +19,8 @@ extern void timekeeping_resume(void); - extern void do_timer(unsigned long ticks); - extern void update_wall_time(void); - --extern seqlock_t jiffies_lock; -+extern raw_spinlock_t jiffies_lock; -+extern seqcount_t jiffies_seq; - - #define CS_NAME_LEN 32 - diff --git a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch deleted file mode 100644 index 61b55b073..000000000 --- a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch +++ /dev/null @@ -1,76 +0,0 @@ -From: Peter Zijlstra -Date: Fri, 21 Aug 2009 11:56:45 +0200 -Subject: timer: delay waking softirqs from the jiffy tick -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -People were complaining about broken balancing with the recent -rt -series. - -A look at /proc/sched_debug yielded: - -cpu#0, 2393.874 MHz - .nr_running : 0 - .load : 0 - .cpu_load[0] : 177522 - .cpu_load[1] : 177522 - .cpu_load[2] : 177522 - .cpu_load[3] : 177522 - .cpu_load[4] : 177522 -cpu#1, 2393.874 MHz - .nr_running : 4 - .load : 4096 - .cpu_load[0] : 181618 - .cpu_load[1] : 180850 - .cpu_load[2] : 180274 - .cpu_load[3] : 179938 - .cpu_load[4] : 179758 - -Which indicated the cpu_load computation was hosed, the 177522 value -indicates that there is one RT task runnable. Initially I thought the -old problem of calculating the cpu_load from a softirq had re-surfaced, -however looking at the code shows its being done from scheduler_tick(). - -[ we really should fix this RT/cfs interaction some day... ] - -A few trace_printk()s later: - - sirq-timer/1-19 [001] 174.289744: 19: 50:S ==> [001] 0:140:R - -0 [001] 174.290724: enqueue_task_rt: adding task: 19/sirq-timer/1 with load: 177522 - -0 [001] 174.290725: 0:140:R + [001] 19: 50:S sirq-timer/1 - -0 [001] 174.290730: scheduler_tick: current load: 177522 - -0 [001] 174.290732: scheduler_tick: current: 0/swapper - -0 [001] 174.290736: 0:140:R ==> [001] 19: 50:R sirq-timer/1 - sirq-timer/1-19 [001] 174.290741: dequeue_task_rt: removing task: 19/sirq-timer/1 with load: 177522 - sirq-timer/1-19 [001] 174.290743: 19: 50:S ==> [001] 0:140:R - -We see that we always raise the timer softirq before doing the load -calculation. Avoid this by re-ordering the scheduler_tick() call in -update_process_times() to occur before we deal with timers. - -This lowers the load back to sanity and restores regular load-balancing -behaviour. - -Signed-off-by: Peter Zijlstra -Signed-off-by: Thomas Gleixner - ---- - kernel/time/timer.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -1456,13 +1456,13 @@ void update_process_times(int user_tick) - - /* Note: this timer irq context must be accounted for as well. */ - account_process_tick(p, user_tick); -+ scheduler_tick(); - run_local_timers(); - rcu_check_callbacks(user_tick); - #ifdef CONFIG_IRQ_WORK - if (in_irq()) - irq_work_tick(); - #endif -- scheduler_tick(); - run_posix_cpu_timers(p); - } - diff --git a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch deleted file mode 100644 index 609723ab2..000000000 --- a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch +++ /dev/null @@ -1,31 +0,0 @@ -Subject: timer-fd: Prevent live lock -From: Thomas Gleixner -Date: Wed, 25 Jan 2012 11:08:40 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -If hrtimer_try_to_cancel() requires a retry, then depending on the -priority setting te retry loop might prevent timer callback completion -on RT. Prevent that by waiting for completion on RT, no change for a -non RT kernel. - -Reported-by: Sankara Muthukrishnan -Signed-off-by: Thomas Gleixner - ---- - fs/timerfd.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/fs/timerfd.c -+++ b/fs/timerfd.c -@@ -450,7 +450,10 @@ static int do_timerfd_settime(int ufd, i - break; - } - spin_unlock_irq(&ctx->wqh.lock); -- cpu_relax(); -+ if (isalarm(ctx)) -+ hrtimer_wait_for_timer(&ctx->t.alarm.timer); -+ else -+ hrtimer_wait_for_timer(&ctx->t.tmr); - } - - /* diff --git a/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch b/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch deleted file mode 100644 index ba23aefbd..000000000 --- a/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch +++ /dev/null @@ -1,74 +0,0 @@ -Subject: timers: Avoid the switch timers base set to NULL trick on RT -From: Thomas Gleixner -Date: Thu, 21 Jul 2011 15:23:39 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -On RT that code is preemptible, so we cannot assign NULL to timers -base as a preempter would spin forever in lock_timer_base(). - -Signed-off-by: Thomas Gleixner ---- - kernel/time/timer.c | 45 +++++++++++++++++++++++++++++++++++---------- - 1 file changed, 35 insertions(+), 10 deletions(-) - ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -780,6 +780,39 @@ static struct tvec_base *lock_timer_base - cpu_relax(); - } - } -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline struct tvec_base *switch_timer_base(struct timer_list *timer, -+ struct tvec_base *old, -+ struct tvec_base *new) -+{ -+ /* -+ * We cannot do the below because we might be preempted and -+ * then the preempter would see NULL and loop forever. -+ */ -+ if (spin_trylock(&new->lock)) { -+ WRITE_ONCE(timer->flags, -+ (timer->flags & ~TIMER_BASEMASK) | new->cpu); -+ spin_unlock(&old->lock); -+ return new; -+ } -+ return old; -+} -+ -+#else -+static inline struct tvec_base *switch_timer_base(struct timer_list *timer, -+ struct tvec_base *old, -+ struct tvec_base *new) -+{ -+ /* See the comment in lock_timer_base() */ -+ timer->flags |= TIMER_MIGRATING; -+ -+ spin_unlock(&old->lock); -+ spin_lock(&new->lock); -+ WRITE_ONCE(timer->flags, -+ (timer->flags & ~TIMER_BASEMASK) | new->cpu); -+ return new; -+} -+#endif - - static inline int - __mod_timer(struct timer_list *timer, unsigned long expires, -@@ -810,16 +843,8 @@ static inline int - * handler yet has not finished. This also guarantees that - * the timer is serialized wrt itself. - */ -- if (likely(base->running_timer != timer)) { -- /* See the comment in lock_timer_base() */ -- timer->flags |= TIMER_MIGRATING; -- -- spin_unlock(&base->lock); -- base = new_base; -- spin_lock(&base->lock); -- WRITE_ONCE(timer->flags, -- (timer->flags & ~TIMER_BASEMASK) | base->cpu); -- } -+ if (likely(base->running_timer != timer)) -+ base = switch_timer_base(timer, base, new_base); - } - - timer->expires = expires; diff --git a/debian/patches/features/all/rt/timers-preempt-rt-support.patch b/debian/patches/features/all/rt/timers-preempt-rt-support.patch deleted file mode 100644 index 631b38790..000000000 --- a/debian/patches/features/all/rt/timers-preempt-rt-support.patch +++ /dev/null @@ -1,55 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:30:20 -0500 -Subject: timers: Preempt-rt support -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The base->lock is a sleeping lock. Try not to workaround with a -spin_trylock(). The rt-mutex lock is not irq save even the try-lock -due to way how the inner lock accessed. Even with this fixed have the -problem that the owner is not the current process on the CPU but his -pid is used while taking the lock. Therefore we go with ext jiffy for -the wakeup. Also drop preempt_disable() usage since we need just to -ensure not to switch CPUs (the data structures have own locks). - -[bigeasy: dropped that spin try lock] -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - kernel/time/timer.c | 12 ++++++++++-- - 1 file changed, 10 insertions(+), 2 deletions(-) - ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -1422,6 +1422,14 @@ u64 get_next_timer_interrupt(unsigned lo - if (cpu_is_offline(smp_processor_id())) - return expires; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * On PREEMPT_RT we cannot sleep here. As a result we can't take -+ * the base lock to check when the next timer is pending and so -+ * we assume the next jiffy. -+ */ -+ return basem + TICK_NSEC; -+#endif - spin_lock(&base->lock); - if (base->active_timers) { - if (time_before_eq(base->next_timer, base->timer_jiffies)) -@@ -1621,7 +1629,7 @@ static void migrate_timers(int cpu) - - BUG_ON(cpu_online(cpu)); - old_base = per_cpu_ptr(&tvec_bases, cpu); -- new_base = get_cpu_ptr(&tvec_bases); -+ new_base = get_local_ptr(&tvec_bases); - /* - * The caller is globally serialized and nobody else - * takes two locks at once, deadlock is not possible. -@@ -1645,7 +1653,7 @@ static void migrate_timers(int cpu) - - spin_unlock(&old_base->lock); - spin_unlock_irq(&new_base->lock); -- put_cpu_ptr(&tvec_bases); -+ put_local_ptr(&tvec_bases); - } - - static int timer_cpu_notify(struct notifier_block *self, diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch deleted file mode 100644 index 61bbd0970..000000000 --- a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch +++ /dev/null @@ -1,150 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: timers: Prepare for full preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -When softirqs can be preempted we need to make sure that cancelling -the timer from the active thread can not deadlock vs. a running timer -callback. Add a waitqueue to resolve that. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - include/linux/timer.h | 2 +- - kernel/sched/core.c | 9 +++++++-- - kernel/time/timer.c | 41 ++++++++++++++++++++++++++++++++++++++--- - 3 files changed, 46 insertions(+), 6 deletions(-) - ---- a/include/linux/timer.h -+++ b/include/linux/timer.h -@@ -225,7 +225,7 @@ extern void add_timer(struct timer_list - - extern int try_to_del_timer_sync(struct timer_list *timer); - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - extern int del_timer_sync(struct timer_list *timer); - #else - # define del_timer_sync(t) del_timer(t) ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -618,11 +618,14 @@ void resched_cpu(int cpu) - */ - int get_nohz_timer_target(void) - { -- int i, cpu = smp_processor_id(); -+ int i, cpu; - struct sched_domain *sd; - -+ preempt_disable_rt(); -+ cpu = smp_processor_id(); -+ - if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) -- return cpu; -+ goto preempt_en_rt; - - rcu_read_lock(); - for_each_domain(cpu, sd) { -@@ -638,6 +641,8 @@ int get_nohz_timer_target(void) - cpu = housekeeping_any_cpu(); - unlock: - rcu_read_unlock(); -+preempt_en_rt: -+ preempt_enable_rt(); - return cpu; - } - /* ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -80,6 +80,9 @@ struct tvec_root { - struct tvec_base { - spinlock_t lock; - struct timer_list *running_timer; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ wait_queue_head_t wait_for_running_timer; -+#endif - unsigned long timer_jiffies; - unsigned long next_timer; - unsigned long active_timers; -@@ -1006,6 +1009,33 @@ void add_timer_on(struct timer_list *tim - } - EXPORT_SYMBOL_GPL(add_timer_on); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * Wait for a running timer -+ */ -+static void wait_for_running_timer(struct timer_list *timer) -+{ -+ struct tvec_base *base; -+ u32 tf = timer->flags; -+ -+ if (tf & TIMER_MIGRATING) -+ return; -+ -+ base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); -+ wait_event(base->wait_for_running_timer, -+ base->running_timer != timer); -+} -+ -+# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer) -+#else -+static inline void wait_for_running_timer(struct timer_list *timer) -+{ -+ cpu_relax(); -+} -+ -+# define wakeup_timer_waiters(b) do { } while (0) -+#endif -+ - /** - * del_timer - deactive a timer. - * @timer: the timer to be deactivated -@@ -1063,7 +1093,7 @@ int try_to_del_timer_sync(struct timer_l - } - EXPORT_SYMBOL(try_to_del_timer_sync); - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - /** - * del_timer_sync - deactivate a timer and wait for the handler to finish. - * @timer: the timer to be deactivated -@@ -1123,7 +1153,7 @@ int del_timer_sync(struct timer_list *ti - int ret = try_to_del_timer_sync(timer); - if (ret >= 0) - return ret; -- cpu_relax(); -+ wait_for_running_timer(timer); - } - } - EXPORT_SYMBOL(del_timer_sync); -@@ -1248,15 +1278,17 @@ static inline void __run_timers(struct t - if (irqsafe) { - spin_unlock(&base->lock); - call_timer_fn(timer, fn, data); -+ base->running_timer = NULL; - spin_lock(&base->lock); - } else { - spin_unlock_irq(&base->lock); - call_timer_fn(timer, fn, data); -+ base->running_timer = NULL; - spin_lock_irq(&base->lock); - } - } - } -- base->running_timer = NULL; -+ wakeup_timer_waiters(base); - spin_unlock_irq(&base->lock); - } - -@@ -1645,6 +1677,9 @@ static void __init init_timer_cpu(int cp - - base->cpu = cpu; - spin_lock_init(&base->lock); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ init_waitqueue_head(&base->wait_for_running_timer); -+#endif - - base->timer_jiffies = jiffies; - base->next_timer = base->timer_jiffies; diff --git a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch b/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch deleted file mode 100644 index 3bc8114e6..000000000 --- a/debian/patches/features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch +++ /dev/null @@ -1,38 +0,0 @@ -From: Carsten Emde -Date: Tue, 5 Jan 2016 10:21:59 +0100 -Subject: trace/latency-hist: Consider new argument when probing the - sched_switch tracer -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The sched_switch tracer has got a new argument. Fix the latency tracer -accordingly. - -Recently: c73464b1c843 ("sched/core: Fix trace_sched_switch()") since -v4.4-rc1. - -Signed-off-by: Carsten Emde -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/trace/latency_hist.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/kernel/trace/latency_hist.c -+++ b/kernel/trace/latency_hist.c -@@ -117,7 +117,7 @@ static char *wakeup_latency_hist_dir_sha - static notrace void probe_wakeup_latency_hist_start(void *v, - struct task_struct *p); - static notrace void probe_wakeup_latency_hist_stop(void *v, -- struct task_struct *prev, struct task_struct *next); -+ bool preempt, struct task_struct *prev, struct task_struct *next); - static notrace void probe_sched_migrate_task(void *, - struct task_struct *task, int cpu); - static struct enable_data wakeup_latency_enabled_data = { -@@ -907,7 +907,7 @@ static notrace void probe_wakeup_latency - } - - static notrace void probe_wakeup_latency_hist_stop(void *v, -- struct task_struct *prev, struct task_struct *next) -+ bool preempt, struct task_struct *prev, struct task_struct *next) - { - unsigned long flags; - int cpu = task_cpu(next); diff --git a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch deleted file mode 100644 index 159d8de25..000000000 --- a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Steven Rostedt -Date: Thu, 29 Sep 2011 12:24:30 -0500 -Subject: tracing: Account for preempt off in preempt_schedule() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -The preempt_schedule() uses the preempt_disable_notrace() version -because it can cause infinite recursion by the function tracer as -the function tracer uses preempt_enable_notrace() which may call -back into the preempt_schedule() code as the NEED_RESCHED is still -set and the PREEMPT_ACTIVE has not been set yet. - -See commit: d1f74e20b5b064a130cd0743a256c2d3cfe84010 that made this -change. - -The preemptoff and preemptirqsoff latency tracers require the first -and last preempt count modifiers to enable tracing. But this skips -the checks. Since we can not convert them back to the non notrace -version, we can use the idle() hooks for the latency tracers here. -That is, the start/stop_critical_timings() works well to manually -start and stop the latency tracer for preempt off timings. - -Signed-off-by: Steven Rostedt -Signed-off-by: Clark Williams -Signed-off-by: Thomas Gleixner ---- - kernel/sched/core.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3306,7 +3306,16 @@ asmlinkage __visible void __sched notrac - * an infinite recursion. - */ - prev_ctx = exception_enter(); -+ /* -+ * The add/subtract must not be traced by the function -+ * tracer. But we still want to account for the -+ * preempt off latency tracer. Since the _notrace versions -+ * of add/subtract skip the accounting for latency tracer -+ * we must force it manually. -+ */ -+ start_critical_timings(); - __schedule(true); -+ stop_critical_timings(); - exception_exit(prev_ctx); - - preempt_enable_no_resched_notrace(); diff --git a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch deleted file mode 100644 index 70759e0e9..000000000 --- a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch +++ /dev/null @@ -1,66 +0,0 @@ -Subject: net: Remove preemption disabling in netif_rx() -From: Priyanka Jain -Date: Thu, 17 May 2012 09:35:11 +0530 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -1)enqueue_to_backlog() (called from netif_rx) should be - bind to a particluar CPU. This can be achieved by - disabling migration. No need to disable preemption - -2)Fixes crash "BUG: scheduling while atomic: ksoftirqd" - in case of RT. - If preemption is disabled, enqueue_to_backog() is called - in atomic context. And if backlog exceeds its count, - kfree_skb() is called. But in RT, kfree_skb() might - gets scheduled out, so it expects non atomic context. - -3)When CONFIG_PREEMPT_RT_FULL is not defined, - migrate_enable(), migrate_disable() maps to - preempt_enable() and preempt_disable(), so no - change in functionality in case of non-RT. - --Replace preempt_enable(), preempt_disable() with - migrate_enable(), migrate_disable() respectively --Replace get_cpu(), put_cpu() with get_cpu_light(), - put_cpu_light() respectively - -Signed-off-by: Priyanka Jain -Acked-by: Rajan Srivastava -Cc: -Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com - -Signed-off-by: Thomas Gleixner ---- - Testing: Tested successfully on p4080ds(8-core SMP system) - - net/core/dev.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -3580,7 +3580,7 @@ static int netif_rx_internal(struct sk_b - struct rps_dev_flow voidflow, *rflow = &voidflow; - int cpu; - -- preempt_disable(); -+ migrate_disable(); - rcu_read_lock(); - - cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -3590,13 +3590,13 @@ static int netif_rx_internal(struct sk_b - ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); - - rcu_read_unlock(); -- preempt_enable(); -+ migrate_enable(); - } else - #endif - { - unsigned int qtail; -- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); -- put_cpu(); -+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); -+ put_cpu_light(); - } - return ret; - } diff --git a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch b/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch deleted file mode 100644 index 2f0637537..000000000 --- a/debian/patches/features/all/rt/usb-use-_nort-in-giveback.patch +++ /dev/null @@ -1,58 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 8 Nov 2013 17:34:54 +0100 -Subject: usb: Use _nort in giveback function -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet -context") I see - -|BUG: sleeping function called from invalid context at kernel/rtmutex.c:673 -|in_atomic(): 0, irqs_disabled(): 1, pid: 109, name: irq/11-uhci_hcd -|no locks held by irq/11-uhci_hcd/109. -|irq event stamp: 440 -|hardirqs last enabled at (439): [] _raw_spin_unlock_irqrestore+0x75/0x90 -|hardirqs last disabled at (440): [] __usb_hcd_giveback_urb+0x46/0xc0 -|softirqs last enabled at (0): [] copy_process.part.52+0x511/0x1510 -|softirqs last disabled at (0): [< (null)>] (null) -|CPU: 3 PID: 109 Comm: irq/11-uhci_hcd Not tainted 3.12.0-rt0-rc1+ #13 -|Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 -| 0000000000000000 ffff8800db9ffbe0 ffffffff8169f064 0000000000000000 -| ffff8800db9ffbf8 ffffffff810b2122 ffff88020f03e888 ffff8800db9ffc18 -| ffffffff816a6944 ffffffff810b5748 ffff88020f03c000 ffff8800db9ffc50 -|Call Trace: -| [] dump_stack+0x4e/0x8f -| [] __might_sleep+0x112/0x190 -| [] rt_spin_lock+0x24/0x60 -| [] hid_ctrl+0x3b/0x190 -| [] __usb_hcd_giveback_urb+0x4f/0xc0 -| [] usb_hcd_giveback_urb+0x3f/0x140 -| [] uhci_giveback_urb+0xaf/0x280 -| [] uhci_scan_schedule+0x47a/0xb10 -| [] uhci_irq+0xa6/0x1a0 -| [] usb_hcd_irq+0x28/0x40 -| [] irq_forced_thread_fn+0x23/0x70 -| [] irq_thread+0x10f/0x150 -| [] kthread+0xcd/0xe0 -| [] ret_from_fork+0x7c/0xb0 - -on -RT we run threaded so no need to disable interrupts. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/usb/core/hcd.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/usb/core/hcd.c -+++ b/drivers/usb/core/hcd.c -@@ -1735,9 +1735,9 @@ static void __usb_hcd_giveback_urb(struc - * and no one may trigger the above deadlock situation when - * running complete() in tasklet. - */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - urb->complete(urb); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - usb_anchor_resume_wakeups(anchor); - atomic_dec(&urb->use_count); diff --git a/debian/patches/features/all/rt/user-use-local-irq-nort.patch b/debian/patches/features/all/rt/user-use-local-irq-nort.patch deleted file mode 100644 index df49bc921..000000000 --- a/debian/patches/features/all/rt/user-use-local-irq-nort.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Thomas Gleixner -Date: Tue, 21 Jul 2009 23:06:05 +0200 -Subject: core: Do not disable interrupts on RT in kernel/users.c -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use the local_irq_*_nort variants to reduce latencies in RT. The code -is serialized by the locks. No need to disable interrupts. - -Signed-off-by: Thomas Gleixner - ---- - kernel/user.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/kernel/user.c -+++ b/kernel/user.c -@@ -161,11 +161,11 @@ void free_uid(struct user_struct *up) - if (!up) - return; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) - free_user(up, flags); - else -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - - struct user_struct *alloc_uid(kuid_t uid) diff --git a/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch b/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch deleted file mode 100644 index 66a846418..000000000 --- a/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch +++ /dev/null @@ -1,206 +0,0 @@ -Subject: vtime: Split lock and seqcount -From: Thomas Gleixner -Date: Tue, 23 Jul 2013 15:45:51 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Replace vtime_seqlock seqlock with a simple seqcounter and a rawlock -so it can taken in atomic context on RT. - -Signed-off-by: Thomas Gleixner ---- - include/linux/init_task.h | 3 +- - include/linux/sched.h | 3 +- - kernel/fork.c | 3 +- - kernel/sched/cputime.c | 62 +++++++++++++++++++++++++++++----------------- - 4 files changed, 46 insertions(+), 25 deletions(-) ---- a/include/linux/init_task.h -+++ b/include/linux/init_task.h -@@ -150,7 +150,8 @@ extern struct task_group root_task_group - - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - # define INIT_VTIME(tsk) \ -- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ -+ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ -+ .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \ - .vtime_snap = 0, \ - .vtime_snap_whence = VTIME_SYS, - #else ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1521,7 +1521,8 @@ struct task_struct { - cputime_t gtime; - struct prev_cputime prev_cputime; - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -- seqlock_t vtime_seqlock; -+ raw_spinlock_t vtime_lock; -+ seqcount_t vtime_seq; - unsigned long long vtime_snap; - enum { - VTIME_SLEEPING = 0, ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -1349,7 +1349,8 @@ static struct task_struct *copy_process( - prev_cputime_init(&p->prev_cputime); - - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -- seqlock_init(&p->vtime_seqlock); -+ raw_spin_lock_init(&p->vtime_lock); -+ seqcount_init(&p->vtime_seq); - p->vtime_snap = 0; - p->vtime_snap_whence = VTIME_SLEEPING; - #endif ---- a/kernel/sched/cputime.c -+++ b/kernel/sched/cputime.c -@@ -696,37 +696,45 @@ static void __vtime_account_system(struc - - void vtime_account_system(struct task_struct *tsk) - { -- write_seqlock(&tsk->vtime_seqlock); -+ raw_spin_lock(&tsk->vtime_lock); -+ write_seqcount_begin(&tsk->vtime_seq); - __vtime_account_system(tsk); -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seq); -+ raw_spin_unlock(&tsk->vtime_lock); - } - - void vtime_gen_account_irq_exit(struct task_struct *tsk) - { -- write_seqlock(&tsk->vtime_seqlock); -+ raw_spin_lock(&tsk->vtime_lock); -+ write_seqcount_begin(&tsk->vtime_seq); - __vtime_account_system(tsk); - if (context_tracking_in_user()) - tsk->vtime_snap_whence = VTIME_USER; -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seq); -+ raw_spin_unlock(&tsk->vtime_lock); - } - - void vtime_account_user(struct task_struct *tsk) - { - cputime_t delta_cpu; - -- write_seqlock(&tsk->vtime_seqlock); -+ raw_spin_lock(&tsk->vtime_lock); -+ write_seqcount_begin(&tsk->vtime_seq); - delta_cpu = get_vtime_delta(tsk); - tsk->vtime_snap_whence = VTIME_SYS; - account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seq); -+ raw_spin_unlock(&tsk->vtime_lock); - } - - void vtime_user_enter(struct task_struct *tsk) - { -- write_seqlock(&tsk->vtime_seqlock); -+ raw_spin_lock(&tsk->vtime_lock); -+ write_seqcount_begin(&tsk->vtime_seq); - __vtime_account_system(tsk); - tsk->vtime_snap_whence = VTIME_USER; -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seq); -+ raw_spin_unlock(&tsk->vtime_lock); - } - - void vtime_guest_enter(struct task_struct *tsk) -@@ -738,19 +746,23 @@ void vtime_guest_enter(struct task_struc - * synchronization against the reader (task_gtime()) - * that can thus safely catch up with a tickless delta. - */ -- write_seqlock(&tsk->vtime_seqlock); -+ raw_spin_lock(&tsk->vtime_lock); -+ write_seqcount_begin(&tsk->vtime_seq); - __vtime_account_system(tsk); - current->flags |= PF_VCPU; -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seq); -+ raw_spin_unlock(&tsk->vtime_lock); - } - EXPORT_SYMBOL_GPL(vtime_guest_enter); - - void vtime_guest_exit(struct task_struct *tsk) - { -- write_seqlock(&tsk->vtime_seqlock); -+ raw_spin_lock(&tsk->vtime_lock); -+ write_seqcount_begin(&tsk->vtime_seq); - __vtime_account_system(tsk); - current->flags &= ~PF_VCPU; -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seq); -+ raw_spin_unlock(&tsk->vtime_lock); - } - EXPORT_SYMBOL_GPL(vtime_guest_exit); - -@@ -763,24 +775,30 @@ void vtime_account_idle(struct task_stru - - void arch_vtime_task_switch(struct task_struct *prev) - { -- write_seqlock(&prev->vtime_seqlock); -+ raw_spin_lock(&prev->vtime_lock); -+ write_seqcount_begin(&prev->vtime_seq); - prev->vtime_snap_whence = VTIME_SLEEPING; -- write_sequnlock(&prev->vtime_seqlock); -+ write_seqcount_end(&prev->vtime_seq); -+ raw_spin_unlock(&prev->vtime_lock); - -- write_seqlock(¤t->vtime_seqlock); -+ raw_spin_lock(¤t->vtime_lock); -+ write_seqcount_begin(¤t->vtime_seq); - current->vtime_snap_whence = VTIME_SYS; - current->vtime_snap = sched_clock_cpu(smp_processor_id()); -- write_sequnlock(¤t->vtime_seqlock); -+ write_seqcount_end(¤t->vtime_seq); -+ raw_spin_unlock(¤t->vtime_lock); - } - - void vtime_init_idle(struct task_struct *t, int cpu) - { - unsigned long flags; - -- write_seqlock_irqsave(&t->vtime_seqlock, flags); -+ raw_spin_lock_irqsave(&t->vtime_lock, flags); -+ write_seqcount_begin(&t->vtime_seq); - t->vtime_snap_whence = VTIME_SYS; - t->vtime_snap = sched_clock_cpu(cpu); -- write_sequnlock_irqrestore(&t->vtime_seqlock, flags); -+ write_seqcount_end(&t->vtime_seq); -+ raw_spin_unlock_irqrestore(&t->vtime_lock, flags); - } - - cputime_t task_gtime(struct task_struct *t) -@@ -792,13 +810,13 @@ cputime_t task_gtime(struct task_struct - return t->gtime; - - do { -- seq = read_seqbegin(&t->vtime_seqlock); -+ seq = read_seqcount_begin(&t->vtime_seq); - - gtime = t->gtime; - if (t->flags & PF_VCPU) - gtime += vtime_delta(t); - -- } while (read_seqretry(&t->vtime_seqlock, seq)); -+ } while (read_seqcount_retry(&t->vtime_seq, seq)); - - return gtime; - } -@@ -821,7 +839,7 @@ fetch_task_cputime(struct task_struct *t - *udelta = 0; - *sdelta = 0; - -- seq = read_seqbegin(&t->vtime_seqlock); -+ seq = read_seqcount_begin(&t->vtime_seq); - - if (u_dst) - *u_dst = *u_src; -@@ -845,7 +863,7 @@ fetch_task_cputime(struct task_struct *t - if (t->vtime_snap_whence == VTIME_SYS) - *sdelta = delta; - } -- } while (read_seqretry(&t->vtime_seqlock, seq)); -+ } while (read_seqcount_retry(&t->vtime_seq, seq)); - } - - diff --git a/debian/patches/features/all/rt/wait-simple-implementation.patch b/debian/patches/features/all/rt/wait-simple-implementation.patch deleted file mode 100644 index a76cfa059..000000000 --- a/debian/patches/features/all/rt/wait-simple-implementation.patch +++ /dev/null @@ -1,363 +0,0 @@ -From: Thomas Gleixner -Date: Mon Dec 12 12:29:04 2011 +0100 -Subject: wait-simple: Simple waitqueue implementation - -wait_queue is a swiss army knife and in most of the cases the -complexity is not needed. For RT waitqueues are a constant source of -trouble as we can't convert the head lock to a raw spinlock due to -fancy and long lasting callbacks. - -Provide a slim version, which allows RT to replace wait queues. This -should go mainline as well, as it lowers memory consumption and -runtime overhead. - -Signed-off-by: Thomas Gleixner -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -smp_mb() added by Steven Rostedt to fix a race condition with swait -wakeups vs adding items to the list. ---- - include/linux/wait-simple.h | 207 ++++++++++++++++++++++++++++++++++++++++++++ - kernel/sched/Makefile | 2 - kernel/sched/wait-simple.c | 115 ++++++++++++++++++++++++ - 3 files changed, 323 insertions(+), 1 deletion(-) - ---- /dev/null -+++ b/include/linux/wait-simple.h -@@ -0,0 +1,207 @@ -+#ifndef _LINUX_WAIT_SIMPLE_H -+#define _LINUX_WAIT_SIMPLE_H -+ -+#include -+#include -+ -+#include -+ -+struct swaiter { -+ struct task_struct *task; -+ struct list_head node; -+}; -+ -+#define DEFINE_SWAITER(name) \ -+ struct swaiter name = { \ -+ .task = current, \ -+ .node = LIST_HEAD_INIT((name).node), \ -+ } -+ -+struct swait_head { -+ raw_spinlock_t lock; -+ struct list_head list; -+}; -+ -+#define SWAIT_HEAD_INITIALIZER(name) { \ -+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ -+ .list = LIST_HEAD_INIT((name).list), \ -+ } -+ -+#define DEFINE_SWAIT_HEAD(name) \ -+ struct swait_head name = SWAIT_HEAD_INITIALIZER(name) -+ -+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); -+ -+#define init_swait_head(swh) \ -+ do { \ -+ static struct lock_class_key __key; \ -+ \ -+ __init_swait_head((swh), &__key); \ -+ } while (0) -+ -+/* -+ * Waiter functions -+ */ -+extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w); -+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); -+extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); -+extern void swait_finish(struct swait_head *head, struct swaiter *w); -+ -+/* Check whether a head has waiters enqueued */ -+static inline bool swaitqueue_active(struct swait_head *h) -+{ -+ /* Make sure the condition is visible before checking list_empty() */ -+ smp_mb(); -+ return !list_empty(&h->list); -+} -+ -+/* -+ * Wakeup functions -+ */ -+extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num); -+extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num); -+ -+#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1) -+#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1) -+#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0) -+#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0) -+ -+/* -+ * Event API -+ */ -+#define __swait_event(wq, condition) \ -+do { \ -+ DEFINE_SWAITER(__wait); \ -+ \ -+ for (;;) { \ -+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ -+ if (condition) \ -+ break; \ -+ schedule(); \ -+ } \ -+ swait_finish(&wq, &__wait); \ -+} while (0) -+ -+/** -+ * swait_event - sleep until a condition gets true -+ * @wq: the waitqueue to wait on -+ * @condition: a C expression for the event to wait for -+ * -+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the -+ * @condition evaluates to true. The @condition is checked each time -+ * the waitqueue @wq is woken up. -+ * -+ * wake_up() has to be called after changing any variable that could -+ * change the result of the wait condition. -+ */ -+#define swait_event(wq, condition) \ -+do { \ -+ if (condition) \ -+ break; \ -+ __swait_event(wq, condition); \ -+} while (0) -+ -+#define __swait_event_interruptible(wq, condition, ret) \ -+do { \ -+ DEFINE_SWAITER(__wait); \ -+ \ -+ for (;;) { \ -+ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ -+ if (condition) \ -+ break; \ -+ if (signal_pending(current)) { \ -+ ret = -ERESTARTSYS; \ -+ break; \ -+ } \ -+ schedule(); \ -+ } \ -+ swait_finish(&wq, &__wait); \ -+} while (0) -+ -+#define __swait_event_interruptible_timeout(wq, condition, ret) \ -+do { \ -+ DEFINE_SWAITER(__wait); \ -+ \ -+ for (;;) { \ -+ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ -+ if (condition) \ -+ break; \ -+ if (signal_pending(current)) { \ -+ ret = -ERESTARTSYS; \ -+ break; \ -+ } \ -+ ret = schedule_timeout(ret); \ -+ if (!ret) \ -+ break; \ -+ } \ -+ swait_finish(&wq, &__wait); \ -+} while (0) -+ -+/** -+ * swait_event_interruptible - sleep until a condition gets true -+ * @wq: the waitqueue to wait on -+ * @condition: a C expression for the event to wait for -+ * -+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the -+ * @condition evaluates to true. The @condition is checked each time -+ * the waitqueue @wq is woken up. -+ * -+ * wake_up() has to be called after changing any variable that could -+ * change the result of the wait condition. -+ */ -+#define swait_event_interruptible(wq, condition) \ -+({ \ -+ int __ret = 0; \ -+ if (!(condition)) \ -+ __swait_event_interruptible(wq, condition, __ret); \ -+ __ret; \ -+}) -+ -+#define swait_event_interruptible_timeout(wq, condition, timeout) \ -+({ \ -+ int __ret = timeout; \ -+ if (!(condition)) \ -+ __swait_event_interruptible_timeout(wq, condition, __ret); \ -+ __ret; \ -+}) -+ -+#define __swait_event_timeout(wq, condition, ret) \ -+do { \ -+ DEFINE_SWAITER(__wait); \ -+ \ -+ for (;;) { \ -+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ -+ if (condition) \ -+ break; \ -+ ret = schedule_timeout(ret); \ -+ if (!ret) \ -+ break; \ -+ } \ -+ swait_finish(&wq, &__wait); \ -+} while (0) -+ -+/** -+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses -+ * @wq: the waitqueue to wait on -+ * @condition: a C expression for the event to wait for -+ * @timeout: timeout, in jiffies -+ * -+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the -+ * @condition evaluates to true. The @condition is checked each time -+ * the waitqueue @wq is woken up. -+ * -+ * wake_up() has to be called after changing any variable that could -+ * change the result of the wait condition. -+ * -+ * The function returns 0 if the @timeout elapsed, and the remaining -+ * jiffies if the condition evaluated to true before the timeout elapsed. -+ */ -+#define swait_event_timeout(wq, condition, timeout) \ -+({ \ -+ long __ret = timeout; \ -+ if (!(condition)) \ -+ __swait_event_timeout(wq, condition, __ret); \ -+ __ret; \ -+}) -+ -+#endif ---- a/kernel/sched/Makefile -+++ b/kernel/sched/Makefile -@@ -13,7 +13,7 @@ endif - - obj-y += core.o loadavg.o clock.o cputime.o - obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o --obj-y += wait.o completion.o idle.o -+obj-y += wait.o wait-simple.o completion.o idle.o - obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o - obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o - obj-$(CONFIG_SCHEDSTATS) += stats.o ---- /dev/null -+++ b/kernel/sched/wait-simple.c -@@ -0,0 +1,115 @@ -+/* -+ * Simple waitqueues without fancy flags and callbacks -+ * -+ * (C) 2011 Thomas Gleixner -+ * -+ * Based on kernel/wait.c -+ * -+ * For licencing details see kernel-base/COPYING -+ */ -+#include -+#include -+#include -+#include -+ -+/* Adds w to head->list. Must be called with head->lock locked. */ -+static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) -+{ -+ list_add(&w->node, &head->list); -+ /* We can't let the condition leak before the setting of head */ -+ smp_mb(); -+} -+ -+/* Removes w from head->list. Must be called with head->lock locked. */ -+static inline void __swait_dequeue(struct swaiter *w) -+{ -+ list_del_init(&w->node); -+} -+ -+void __init_swait_head(struct swait_head *head, struct lock_class_key *key) -+{ -+ raw_spin_lock_init(&head->lock); -+ lockdep_set_class(&head->lock, key); -+ INIT_LIST_HEAD(&head->list); -+} -+EXPORT_SYMBOL(__init_swait_head); -+ -+void swait_prepare_locked(struct swait_head *head, struct swaiter *w) -+{ -+ w->task = current; -+ if (list_empty(&w->node)) -+ __swait_enqueue(head, w); -+} -+ -+void swait_prepare(struct swait_head *head, struct swaiter *w, int state) -+{ -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&head->lock, flags); -+ swait_prepare_locked(head, w); -+ __set_current_state(state); -+ raw_spin_unlock_irqrestore(&head->lock, flags); -+} -+EXPORT_SYMBOL(swait_prepare); -+ -+void swait_finish_locked(struct swait_head *head, struct swaiter *w) -+{ -+ __set_current_state(TASK_RUNNING); -+ if (w->task) -+ __swait_dequeue(w); -+} -+ -+void swait_finish(struct swait_head *head, struct swaiter *w) -+{ -+ unsigned long flags; -+ -+ __set_current_state(TASK_RUNNING); -+ if (w->task) { -+ raw_spin_lock_irqsave(&head->lock, flags); -+ __swait_dequeue(w); -+ raw_spin_unlock_irqrestore(&head->lock, flags); -+ } -+} -+EXPORT_SYMBOL(swait_finish); -+ -+unsigned int -+__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num) -+{ -+ struct swaiter *curr, *next; -+ int woken = 0; -+ -+ list_for_each_entry_safe(curr, next, &head->list, node) { -+ if (wake_up_state(curr->task, state)) { -+ __swait_dequeue(curr); -+ /* -+ * The waiting task can free the waiter as -+ * soon as curr->task = NULL is written, -+ * without taking any locks. A memory barrier -+ * is required here to prevent the following -+ * store to curr->task from getting ahead of -+ * the dequeue operation. -+ */ -+ smp_wmb(); -+ curr->task = NULL; -+ if (++woken == num) -+ break; -+ } -+ } -+ return woken; -+} -+ -+unsigned int -+__swait_wake(struct swait_head *head, unsigned int state, unsigned int num) -+{ -+ unsigned long flags; -+ int woken; -+ -+ if (!swaitqueue_active(head)) -+ return 0; -+ -+ raw_spin_lock_irqsave(&head->lock, flags); -+ woken = __swait_wake_locked(head, state, num); -+ raw_spin_unlock_irqrestore(&head->lock, flags); -+ return woken; -+} -+EXPORT_SYMBOL(__swait_wake); diff --git a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch b/debian/patches/features/all/rt/wait.h-include-atomic.h.patch deleted file mode 100644 index 210d36f3f..000000000 --- a/debian/patches/features/all/rt/wait.h-include-atomic.h.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 28 Oct 2013 12:19:57 +0100 -Subject: wait.h: include atomic.h -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -| CC init/main.o -|In file included from include/linux/mmzone.h:9:0, -| from include/linux/gfp.h:4, -| from include/linux/kmod.h:22, -| from include/linux/module.h:13, -| from init/main.c:15: -|include/linux/wait.h: In function ‘wait_on_atomic_t’: -|include/linux/wait.h:982:2: error: implicit declaration of function ‘atomic_read’ [-Werror=implicit-function-declaration] -| if (atomic_read(val) == 0) -| ^ - -This pops up on ARM. Non-RT gets its atomic.h include from spinlock.h - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/wait.h | 1 + - 1 file changed, 1 insertion(+) - ---- a/include/linux/wait.h -+++ b/include/linux/wait.h -@@ -8,6 +8,7 @@ - #include - #include - #include -+#include - - typedef struct __wait_queue wait_queue_t; - typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); diff --git a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch deleted file mode 100644 index ab57e9b36..000000000 --- a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch +++ /dev/null @@ -1,133 +0,0 @@ -From: Thomas Gleixner -Date: Mon, 01 Jul 2013 11:02:42 +0200 -Subject: workqueue: Prevent workqueue versus ata-piix livelock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -An Intel i7 system regularly detected rcu_preempt stalls after the kernel -was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no -longer possible, unless the system was restarted. - -The kernel message was: -INFO: rcu_preempt self-detected stall on CPU { 6} -[..] -NMI backtrace for cpu 6 -CPU 6 -Pid: 119, comm: irq/19-ata_piix Not tainted 3.8.13-rt13 #11 Shuttle Inc. SX58/SX58 -RIP: 0010:[] [] ip_compute_csum+0x30/0x30 -RSP: 0018:ffff880333303cb0 EFLAGS: 00000002 -RAX: 0000000000000006 RBX: 00000000000003e9 RCX: 0000000000000034 -RDX: 0000000000000000 RSI: ffffffff81aa16d0 RDI: 0000000000000001 -RBP: ffff880333303ce8 R08: ffffffff81aa16d0 R09: ffffffff81c1b8cc -R10: 0000000000000000 R11: 0000000000000000 R12: 000000000005161f -R13: 0000000000000006 R14: ffffffff81aa16d0 R15: 0000000000000002 -FS: 0000000000000000(0000) GS:ffff880333300000(0000) knlGS:0000000000000000 -CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b -CR2: 0000003c1b2bb420 CR3: 0000000001a0f000 CR4: 00000000000007e0 -DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 -DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 -Process irq/19-ata_piix (pid: 119, threadinfo ffff88032d88a000, task ffff88032df80000) -Stack: -ffffffff8124cb32 000000000005161e 00000000000003e9 0000000000001000 -0000000000009022 ffffffff81aa16d0 0000000000000002 ffff880333303cf8 -ffffffff8124caa9 ffff880333303d08 ffffffff8124cad2 ffff880333303d28 -Call Trace: - -[] ? delay_tsc+0x33/0xe3 -[] __delay+0xf/0x11 -[] __const_udelay+0x27/0x29 -[] native_safe_apic_wait_icr_idle+0x39/0x45 -[] __default_send_IPI_dest_field.constprop.0+0x1e/0x58 -[] default_send_IPI_mask_sequence_phys+0x49/0x7d -[] physflat_send_IPI_all+0x17/0x19 -[] arch_trigger_all_cpu_backtrace+0x50/0x79 -[] rcu_check_callbacks+0x1cb/0x568 -[] ? raise_softirq+0x2e/0x35 -[] ? tick_sched_do_timer+0x38/0x38 -[] update_process_times+0x44/0x55 -[] tick_sched_handle+0x4a/0x59 -[] tick_sched_timer+0x3c/0x5b -[] __run_hrtimer+0x9b/0x158 -[] hrtimer_interrupt+0x172/0x2aa -[] smp_apic_timer_interrupt+0x76/0x89 -[] apic_timer_interrupt+0x6d/0x80 - -[] ? __local_lock_irqsave+0x17/0x4a -[] try_to_grab_pending+0x42/0x17e -[] mod_delayed_work_on+0x32/0x88 -[] mod_delayed_work+0x1c/0x1e -[] blk_run_queue_async+0x37/0x39 -[] flush_end_io+0xf1/0x107 -[] blk_finish_request+0x21e/0x264 -[] blk_end_bidi_request+0x42/0x60 -[] blk_end_request+0x10/0x12 -[] scsi_io_completion+0x1bf/0x492 -[] ? sd_done+0x298/0x2ef -[] scsi_finish_command+0xe9/0xf2 -[] scsi_softirq_done+0x106/0x10f -[] blk_done_softirq+0x77/0x87 -[] do_current_softirqs+0x172/0x2e1 -[] ? irq_thread_fn+0x3a/0x3a -[] local_bh_enable+0x43/0x72 -[] irq_forced_thread_fn+0x46/0x52 -[] irq_thread+0x8c/0x17c -[] ? irq_thread+0x17c/0x17c -[] ? wake_threads_waitq+0x44/0x44 -[] kthread+0x8d/0x95 -[] ? __kthread_parkme+0x65/0x65 -[] ret_from_fork+0x7c/0xb0 -[] ? __kthread_parkme+0x65/0x65 - -The state of softirqd of this CPU at the time of the crash was: -ksoftirqd/6 R running task 0 53 2 0x00000000 -ffff88032fc39d18 0000000000000046 ffff88033330c4c0 ffff8803303f4710 -ffff88032fc39fd8 ffff88032fc39fd8 0000000000000000 0000000000062500 -ffff88032df88000 ffff8803303f4710 0000000000000000 ffff88032fc38000 -Call Trace: -[] ? __queue_work+0x27c/0x27c -[] preempt_schedule+0x61/0x76 -[] migrate_enable+0xe5/0x1df -[] ? __queue_work+0x27c/0x27c -[] run_timer_softirq+0x161/0x1d6 -[] do_current_softirqs+0x172/0x2e1 -[] run_ksoftirqd+0x2d/0x45 -[] smpboot_thread_fn+0x2ea/0x308 -[] ? test_ti_thread_flag+0xc/0xc -[] ? test_ti_thread_flag+0xc/0xc -[] kthread+0x8d/0x95 -[] ? __kthread_parkme+0x65/0x65 -[] ret_from_fork+0x7c/0xb0 -[] ? __kthread_parkme+0x65/0x65 - -Apparently, the softirq demon and the ata_piix IRQ handler were waiting -for each other to finish ending up in a livelock. After the below patch -was applied, the system no longer crashes. - -Reported-by: Carsten Emde -Proposed-by: Thomas Gleixner -Tested by: Carsten Emde -Signed-off-by: Carsten Emde -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/workqueue.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -49,6 +49,7 @@ - #include - #include - #include -+#include - - #include "workqueue_internal.h" - -@@ -1246,7 +1247,7 @@ static int try_to_grab_pending(struct wo - local_unlock_irqrestore(pendingb_lock, *flags); - if (work_is_canceling(work)) - return -ENOENT; -- cpu_relax(); -+ cpu_chill(); - return -EAGAIN; - } - diff --git a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch b/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch deleted file mode 100644 index a95ba0d27..000000000 --- a/debian/patches/features/all/rt/work-simple-Simple-work-queue-implemenation.patch +++ /dev/null @@ -1,234 +0,0 @@ -From: Daniel Wagner -Date: Fri, 11 Jul 2014 15:26:11 +0200 -Subject: work-simple: Simple work queue implemenation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Provides a framework for enqueuing callbacks from irq context -PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. - -Bases on wait-simple. - -Signed-off-by: Daniel Wagner -Cc: Sebastian Andrzej Siewior ---- - include/linux/work-simple.h | 24 ++++++ - kernel/sched/Makefile | 2 - kernel/sched/work-simple.c | 173 ++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 198 insertions(+), 1 deletion(-) - create mode 100644 include/linux/work-simple.h - create mode 100644 kernel/sched/work-simple.c - ---- /dev/null -+++ b/include/linux/work-simple.h -@@ -0,0 +1,24 @@ -+#ifndef _LINUX_SWORK_H -+#define _LINUX_SWORK_H -+ -+#include -+ -+struct swork_event { -+ struct list_head item; -+ unsigned long flags; -+ void (*func)(struct swork_event *); -+}; -+ -+static inline void INIT_SWORK(struct swork_event *event, -+ void (*func)(struct swork_event *)) -+{ -+ event->flags = 0; -+ event->func = func; -+} -+ -+bool swork_queue(struct swork_event *sev); -+ -+int swork_get(void); -+void swork_put(void); -+ -+#endif /* _LINUX_SWORK_H */ ---- a/kernel/sched/Makefile -+++ b/kernel/sched/Makefile -@@ -13,7 +13,7 @@ endif - - obj-y += core.o loadavg.o clock.o cputime.o - obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o --obj-y += wait.o wait-simple.o completion.o idle.o -+obj-y += wait.o wait-simple.o work-simple.o completion.o idle.o - obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o - obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o - obj-$(CONFIG_SCHEDSTATS) += stats.o ---- /dev/null -+++ b/kernel/sched/work-simple.c -@@ -0,0 +1,173 @@ -+/* -+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de -+ * -+ * Provides a framework for enqueuing callbacks from irq context -+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define SWORK_EVENT_PENDING (1 << 0) -+ -+static DEFINE_MUTEX(worker_mutex); -+static struct sworker *glob_worker; -+ -+struct sworker { -+ struct list_head events; -+ struct swait_head wq; -+ -+ raw_spinlock_t lock; -+ -+ struct task_struct *task; -+ int refs; -+}; -+ -+static bool swork_readable(struct sworker *worker) -+{ -+ bool r; -+ -+ if (kthread_should_stop()) -+ return true; -+ -+ raw_spin_lock_irq(&worker->lock); -+ r = !list_empty(&worker->events); -+ raw_spin_unlock_irq(&worker->lock); -+ -+ return r; -+} -+ -+static int swork_kthread(void *arg) -+{ -+ struct sworker *worker = arg; -+ -+ for (;;) { -+ swait_event_interruptible(worker->wq, -+ swork_readable(worker)); -+ if (kthread_should_stop()) -+ break; -+ -+ raw_spin_lock_irq(&worker->lock); -+ while (!list_empty(&worker->events)) { -+ struct swork_event *sev; -+ -+ sev = list_first_entry(&worker->events, -+ struct swork_event, item); -+ list_del(&sev->item); -+ raw_spin_unlock_irq(&worker->lock); -+ -+ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, -+ &sev->flags)); -+ sev->func(sev); -+ raw_spin_lock_irq(&worker->lock); -+ } -+ raw_spin_unlock_irq(&worker->lock); -+ } -+ return 0; -+} -+ -+static struct sworker *swork_create(void) -+{ -+ struct sworker *worker; -+ -+ worker = kzalloc(sizeof(*worker), GFP_KERNEL); -+ if (!worker) -+ return ERR_PTR(-ENOMEM); -+ -+ INIT_LIST_HEAD(&worker->events); -+ raw_spin_lock_init(&worker->lock); -+ init_swait_head(&worker->wq); -+ -+ worker->task = kthread_run(swork_kthread, worker, "kswork"); -+ if (IS_ERR(worker->task)) { -+ kfree(worker); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ return worker; -+} -+ -+static void swork_destroy(struct sworker *worker) -+{ -+ kthread_stop(worker->task); -+ -+ WARN_ON(!list_empty(&worker->events)); -+ kfree(worker); -+} -+ -+/** -+ * swork_queue - queue swork -+ * -+ * Returns %false if @work was already on a queue, %true otherwise. -+ * -+ * The work is queued and processed on a random CPU -+ */ -+bool swork_queue(struct swork_event *sev) -+{ -+ unsigned long flags; -+ -+ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) -+ return false; -+ -+ raw_spin_lock_irqsave(&glob_worker->lock, flags); -+ list_add_tail(&sev->item, &glob_worker->events); -+ raw_spin_unlock_irqrestore(&glob_worker->lock, flags); -+ -+ swait_wake(&glob_worker->wq); -+ return true; -+} -+EXPORT_SYMBOL_GPL(swork_queue); -+ -+/** -+ * swork_get - get an instance of the sworker -+ * -+ * Returns an negative error code if the initialization if the worker did not -+ * work, %0 otherwise. -+ * -+ */ -+int swork_get(void) -+{ -+ struct sworker *worker; -+ -+ mutex_lock(&worker_mutex); -+ if (!glob_worker) { -+ worker = swork_create(); -+ if (IS_ERR(worker)) { -+ mutex_unlock(&worker_mutex); -+ return -ENOMEM; -+ } -+ -+ glob_worker = worker; -+ } -+ -+ glob_worker->refs++; -+ mutex_unlock(&worker_mutex); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(swork_get); -+ -+/** -+ * swork_put - puts an instance of the sworker -+ * -+ * Will destroy the sworker thread. This function must not be called until all -+ * queued events have been completed. -+ */ -+void swork_put(void) -+{ -+ mutex_lock(&worker_mutex); -+ -+ glob_worker->refs--; -+ if (glob_worker->refs > 0) -+ goto out; -+ -+ swork_destroy(glob_worker); -+ glob_worker = NULL; -+out: -+ mutex_unlock(&worker_mutex); -+} -+EXPORT_SYMBOL_GPL(swork_put); diff --git a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch deleted file mode 100644 index 73ad7141f..000000000 --- a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch +++ /dev/null @@ -1,271 +0,0 @@ -From: Thomas Gleixner -Date: Wed Jun 22 19:47:03 2011 +0200 -Subject: sched: Distangle worker accounting from rqlock - -The worker accounting for cpu bound workers is plugged into the core -scheduler code and the wakeup code. This is not a hard requirement and -can be avoided by keeping track of the state in the workqueue code -itself. - -Keep track of the sleeping state in the worker itself and call the -notifier before entering the core scheduler. There might be false -positives when the task is woken between that call and actually -scheduling, but that's not really different from scheduling and being -woken immediately after switching away. There is also no harm from -updating nr_running when the task returns from scheduling instead of -accounting it in the wakeup code. - -Signed-off-by: Thomas Gleixner -Cc: Peter Zijlstra -Cc: Tejun Heo -Cc: Jens Axboe -Cc: Linus Torvalds -Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de -Signed-off-by: Thomas Gleixner -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - ---- - kernel/sched/core.c | 80 ++++++++------------------------------------ - kernel/workqueue.c | 55 ++++++++++++------------------ - kernel/workqueue_internal.h | 5 +- - 3 files changed, 41 insertions(+), 99 deletions(-) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1735,10 +1735,6 @@ static inline void ttwu_activate(struct - { - activate_task(rq, p, en_flags); - p->on_rq = TASK_ON_RQ_QUEUED; -- -- /* if a worker is waking up, notify workqueue */ -- if (p->flags & PF_WQ_WORKER) -- wq_worker_waking_up(p, cpu_of(rq)); - } - - /* -@@ -2055,52 +2051,6 @@ try_to_wake_up(struct task_struct *p, un - } - - /** -- * try_to_wake_up_local - try to wake up a local task with rq lock held -- * @p: the thread to be awakened -- * -- * Put @p on the run-queue if it's not already there. The caller must -- * ensure that this_rq() is locked, @p is bound to this_rq() and not -- * the current task. -- */ --static void try_to_wake_up_local(struct task_struct *p) --{ -- struct rq *rq = task_rq(p); -- -- if (WARN_ON_ONCE(rq != this_rq()) || -- WARN_ON_ONCE(p == current)) -- return; -- -- lockdep_assert_held(&rq->lock); -- -- if (!raw_spin_trylock(&p->pi_lock)) { -- /* -- * This is OK, because current is on_cpu, which avoids it being -- * picked for load-balance and preemption/IRQs are still -- * disabled avoiding further scheduler activity on it and we've -- * not yet picked a replacement task. -- */ -- lockdep_unpin_lock(&rq->lock); -- raw_spin_unlock(&rq->lock); -- raw_spin_lock(&p->pi_lock); -- raw_spin_lock(&rq->lock); -- lockdep_pin_lock(&rq->lock); -- } -- -- if (!(p->state & TASK_NORMAL)) -- goto out; -- -- trace_sched_waking(p); -- -- if (!task_on_rq_queued(p)) -- ttwu_activate(rq, p, ENQUEUE_WAKEUP); -- -- ttwu_do_wakeup(rq, p, 0); -- ttwu_stat(p, smp_processor_id(), 0); --out: -- raw_spin_unlock(&p->pi_lock); --} -- --/** - * wake_up_process - Wake up a specific process - * @p: The process to be woken up. - * -@@ -3281,21 +3231,6 @@ static void __sched notrace __schedule(b - } else { - deactivate_task(rq, prev, DEQUEUE_SLEEP); - prev->on_rq = 0; -- -- /* -- * If a worker went to sleep, notify and ask workqueue -- * whether it wants to wake up a task to maintain -- * concurrency. -- * Only call wake up if prev isn't blocked on a sleeping -- * spin lock. -- */ -- if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { -- struct task_struct *to_wakeup; -- -- to_wakeup = wq_worker_sleeping(prev, cpu); -- if (to_wakeup) -- try_to_wake_up_local(to_wakeup); -- } - } - switch_count = &prev->nvcsw; - } -@@ -3328,6 +3263,14 @@ static inline void sched_submit_work(str - { - if (!tsk->state || tsk_is_pi_blocked(tsk)) - return; -+ -+ /* -+ * If a worker went to sleep, notify and ask workqueue whether -+ * it wants to wake up a task to maintain concurrency. -+ */ -+ if (tsk->flags & PF_WQ_WORKER) -+ wq_worker_sleeping(tsk); -+ - /* - * If we are going to sleep and we have plugged IO queued, - * make sure to submit it to avoid deadlocks. -@@ -3336,6 +3279,12 @@ static inline void sched_submit_work(str - blk_schedule_flush_plug(tsk); - } - -+static void sched_update_worker(struct task_struct *tsk) -+{ -+ if (tsk->flags & PF_WQ_WORKER) -+ wq_worker_running(tsk); -+} -+ - asmlinkage __visible void __sched schedule(void) - { - struct task_struct *tsk = current; -@@ -3346,6 +3295,7 @@ asmlinkage __visible void __sched schedu - __schedule(false); - sched_preempt_enable_no_resched(); - } while (need_resched()); -+ sched_update_worker(tsk); - } - EXPORT_SYMBOL(schedule); - ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -811,44 +811,31 @@ static void wake_up_worker(struct worker - } - - /** -- * wq_worker_waking_up - a worker is waking up -- * @task: task waking up -- * @cpu: CPU @task is waking up to -+ * wq_worker_running - a worker is running again -+ * @task: task returning from sleep - * -- * This function is called during try_to_wake_up() when a worker is -- * being awoken. -- * -- * CONTEXT: -- * spin_lock_irq(rq->lock) -+ * This function is called when a worker returns from schedule() - */ --void wq_worker_waking_up(struct task_struct *task, int cpu) -+void wq_worker_running(struct task_struct *task) - { - struct worker *worker = kthread_data(task); - -- if (!(worker->flags & WORKER_NOT_RUNNING)) { -- WARN_ON_ONCE(worker->pool->cpu != cpu); -+ if (!worker->sleeping) -+ return; -+ if (!(worker->flags & WORKER_NOT_RUNNING)) - atomic_inc(&worker->pool->nr_running); -- } -+ worker->sleeping = 0; - } - - /** - * wq_worker_sleeping - a worker is going to sleep - * @task: task going to sleep -- * @cpu: CPU in question, must be the current CPU number -- * -- * This function is called during schedule() when a busy worker is -- * going to sleep. Worker on the same cpu can be woken up by -- * returning pointer to its task. -- * -- * CONTEXT: -- * spin_lock_irq(rq->lock) -- * -- * Return: -- * Worker task on @cpu to wake up, %NULL if none. -+ * This function is called from schedule() when a busy worker is -+ * going to sleep. - */ --struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) -+void wq_worker_sleeping(struct task_struct *task) - { -- struct worker *worker = kthread_data(task), *to_wakeup = NULL; -+ struct worker *next, *worker = kthread_data(task); - struct worker_pool *pool; - - /* -@@ -857,14 +844,15 @@ struct task_struct *wq_worker_sleeping(s - * checking NOT_RUNNING. - */ - if (worker->flags & WORKER_NOT_RUNNING) -- return NULL; -+ return; - - pool = worker->pool; - -- /* this can only happen on the local cpu */ -- if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu)) -- return NULL; -+ if (WARN_ON_ONCE(worker->sleeping)) -+ return; - -+ worker->sleeping = 1; -+ spin_lock_irq(&pool->lock); - /* - * The counterpart of the following dec_and_test, implied mb, - * worklist not empty test sequence is in insert_work(). -@@ -877,9 +865,12 @@ struct task_struct *wq_worker_sleeping(s - * lock is safe. - */ - if (atomic_dec_and_test(&pool->nr_running) && -- !list_empty(&pool->worklist)) -- to_wakeup = first_idle_worker(pool); -- return to_wakeup ? to_wakeup->task : NULL; -+ !list_empty(&pool->worklist)) { -+ next = first_idle_worker(pool); -+ if (next) -+ wake_up_process(next->task); -+ } -+ spin_unlock_irq(&pool->lock); - } - - /** ---- a/kernel/workqueue_internal.h -+++ b/kernel/workqueue_internal.h -@@ -43,6 +43,7 @@ struct worker { - unsigned long last_active; /* L: last active timestamp */ - unsigned int flags; /* X: flags */ - int id; /* I: worker id */ -+ int sleeping; /* None */ - - /* - * Opaque string set with work_set_desc(). Printed out with task -@@ -68,7 +69,7 @@ static inline struct worker *current_wq_ - * Scheduler hooks for concurrency managed workqueue. Only to be used from - * sched/core.c and workqueue.c. - */ --void wq_worker_waking_up(struct task_struct *task, int cpu); --struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); -+void wq_worker_running(struct task_struct *task); -+void wq_worker_sleeping(struct task_struct *task); - - #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ diff --git a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch b/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch deleted file mode 100644 index 57aae7d27..000000000 --- a/debian/patches/features/all/rt/workqueue-prevent-deadlock-stall.patch +++ /dev/null @@ -1,201 +0,0 @@ -Subject: workqueue: Prevent deadlock/stall on RT -From: Thomas Gleixner -Date: Fri, 27 Jun 2014 16:24:52 +0200 (CEST) -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Austin reported a XFS deadlock/stall on RT where scheduled work gets -never exececuted and tasks are waiting for each other for ever. - -The underlying problem is the modification of the RT code to the -handling of workers which are about to go to sleep. In mainline a -worker thread which goes to sleep wakes an idle worker if there is -more work to do. This happens from the guts of the schedule() -function. On RT this must be outside and the accessed data structures -are not protected against scheduling due to the spinlock to rtmutex -conversion. So the naive solution to this was to move the code outside -of the scheduler and protect the data structures by the pool -lock. That approach turned out to be a little naive as we cannot call -into that code when the thread blocks on a lock, as it is not allowed -to block on two locks in parallel. So we dont call into the worker -wakeup magic when the worker is blocked on a lock, which causes the -deadlock/stall observed by Austin and Mike. - -Looking deeper into that worker code it turns out that the only -relevant data structure which needs to be protected is the list of -idle workers which can be woken up. - -So the solution is to protect the list manipulation operations with -preempt_enable/disable pairs on RT and call unconditionally into the -worker code even when the worker is blocked on a lock. The preemption -protection is safe as there is nothing which can fiddle with the list -outside of thread context. - -Reported-and_tested-by: Austin Schuh -Reported-and_tested-by: Mike Galbraith -Signed-off-by: Thomas Gleixner -Link: http://vger.kernel.org/r/alpine.DEB.2.10.1406271249510.5170@nanos -Cc: Richard Weinberger -Cc: Steven Rostedt - ---- - kernel/sched/core.c | 7 ++++- - kernel/workqueue.c | 61 ++++++++++++++++++++++++++++++++++++++++------------ - 2 files changed, 53 insertions(+), 15 deletions(-) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3377,9 +3377,8 @@ static void __sched notrace __schedule(b - - static inline void sched_submit_work(struct task_struct *tsk) - { -- if (!tsk->state || tsk_is_pi_blocked(tsk)) -+ if (!tsk->state) - return; -- - /* - * If a worker went to sleep, notify and ask workqueue whether - * it wants to wake up a task to maintain concurrency. -@@ -3387,6 +3386,10 @@ static inline void sched_submit_work(str - if (tsk->flags & PF_WQ_WORKER) - wq_worker_sleeping(tsk); - -+ -+ if (tsk_is_pi_blocked(tsk)) -+ return; -+ - /* - * If we are going to sleep and we have plugged IO queued, - * make sure to submit it to avoid deadlocks. ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -123,6 +123,11 @@ enum { - * cpu or grabbing pool->lock is enough for read access. If - * POOL_DISASSOCIATED is set, it's identical to L. - * -+ * On RT we need the extra protection via rt_lock_idle_list() for -+ * the list manipulations against read access from -+ * wq_worker_sleeping(). All other places are nicely serialized via -+ * pool->lock. -+ * - * A: pool->attach_mutex protected. - * - * PL: wq_pool_mutex protected. -@@ -411,6 +416,31 @@ static void workqueue_sysfs_unregister(s - if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ - else - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static inline void rt_lock_idle_list(struct worker_pool *pool) -+{ -+ preempt_disable(); -+} -+static inline void rt_unlock_idle_list(struct worker_pool *pool) -+{ -+ preempt_enable(); -+} -+static inline void sched_lock_idle_list(struct worker_pool *pool) { } -+static inline void sched_unlock_idle_list(struct worker_pool *pool) { } -+#else -+static inline void rt_lock_idle_list(struct worker_pool *pool) { } -+static inline void rt_unlock_idle_list(struct worker_pool *pool) { } -+static inline void sched_lock_idle_list(struct worker_pool *pool) -+{ -+ spin_lock_irq(&pool->lock); -+} -+static inline void sched_unlock_idle_list(struct worker_pool *pool) -+{ -+ spin_unlock_irq(&pool->lock); -+} -+#endif -+ -+ - #ifdef CONFIG_DEBUG_OBJECTS_WORK - - static struct debug_obj_descr work_debug_descr; -@@ -804,10 +834,16 @@ static struct worker *first_idle_worker( - */ - static void wake_up_worker(struct worker_pool *pool) - { -- struct worker *worker = first_idle_worker(pool); -+ struct worker *worker; -+ -+ rt_lock_idle_list(pool); -+ -+ worker = first_idle_worker(pool); - - if (likely(worker)) - wake_up_process(worker->task); -+ -+ rt_unlock_idle_list(pool); - } - - /** -@@ -835,7 +871,7 @@ void wq_worker_running(struct task_struc - */ - void wq_worker_sleeping(struct task_struct *task) - { -- struct worker *next, *worker = kthread_data(task); -+ struct worker *worker = kthread_data(task); - struct worker_pool *pool; - - /* -@@ -852,25 +888,18 @@ void wq_worker_sleeping(struct task_stru - return; - - worker->sleeping = 1; -- spin_lock_irq(&pool->lock); -+ - /* - * The counterpart of the following dec_and_test, implied mb, - * worklist not empty test sequence is in insert_work(). - * Please read comment there. -- * -- * NOT_RUNNING is clear. This means that we're bound to and -- * running on the local cpu w/ rq lock held and preemption -- * disabled, which in turn means that none else could be -- * manipulating idle_list, so dereferencing idle_list without pool -- * lock is safe. - */ - if (atomic_dec_and_test(&pool->nr_running) && - !list_empty(&pool->worklist)) { -- next = first_idle_worker(pool); -- if (next) -- wake_up_process(next->task); -+ sched_lock_idle_list(pool); -+ wake_up_worker(pool); -+ sched_unlock_idle_list(pool); - } -- spin_unlock_irq(&pool->lock); - } - - /** -@@ -1561,7 +1590,9 @@ static void worker_enter_idle(struct wor - worker->last_active = jiffies; - - /* idle_list is LIFO */ -+ rt_lock_idle_list(pool); - list_add(&worker->entry, &pool->idle_list); -+ rt_unlock_idle_list(pool); - - if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) - mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); -@@ -1594,7 +1625,9 @@ static void worker_leave_idle(struct wor - return; - worker_clr_flags(worker, WORKER_IDLE); - pool->nr_idle--; -+ rt_lock_idle_list(pool); - list_del_init(&worker->entry); -+ rt_unlock_idle_list(pool); - } - - static struct worker *alloc_worker(int node) -@@ -1760,7 +1793,9 @@ static void destroy_worker(struct worker - pool->nr_workers--; - pool->nr_idle--; - -+ rt_lock_idle_list(pool); - list_del_init(&worker->entry); -+ rt_unlock_idle_list(pool); - worker->flags |= WORKER_DIE; - wake_up_process(worker->task); - } diff --git a/debian/patches/features/all/rt/workqueue-use-locallock.patch b/debian/patches/features/all/rt/workqueue-use-locallock.patch deleted file mode 100644 index 82de7e49d..000000000 --- a/debian/patches/features/all/rt/workqueue-use-locallock.patch +++ /dev/null @@ -1,145 +0,0 @@ -Subject: workqueue: Use local irq lock instead of irq disable regions -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:42:26 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Use a local_irq_lock as a replacement for irq off regions. We keep the -semantic of irq-off in regard to the pool->lock and remain preemptible. - -Signed-off-by: Thomas Gleixner ---- - kernel/workqueue.c | 31 +++++++++++++++++-------------- - 1 file changed, 17 insertions(+), 14 deletions(-) - ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -48,6 +48,7 @@ - #include - #include - #include -+#include - - #include "workqueue_internal.h" - -@@ -331,6 +332,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient - struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; - EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); - -+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); -+ - static int worker_thread(void *__worker); - static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - -@@ -1072,9 +1075,9 @@ static void put_pwq_unlocked(struct pool - * As both pwqs and pools are RCU protected, the - * following lock operations are safe. - */ -- spin_lock_irq(&pwq->pool->lock); -+ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); - put_pwq(pwq); -- spin_unlock_irq(&pwq->pool->lock); -+ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); - } - } - -@@ -1176,7 +1179,7 @@ static int try_to_grab_pending(struct wo - struct worker_pool *pool; - struct pool_workqueue *pwq; - -- local_irq_save(*flags); -+ local_lock_irqsave(pendingb_lock, *flags); - - /* try to steal the timer if it exists */ - if (is_dwork) { -@@ -1240,7 +1243,7 @@ static int try_to_grab_pending(struct wo - spin_unlock(&pool->lock); - fail: - rcu_read_unlock(); -- local_irq_restore(*flags); -+ local_unlock_irqrestore(pendingb_lock, *flags); - if (work_is_canceling(work)) - return -ENOENT; - cpu_relax(); -@@ -1312,7 +1315,7 @@ static void __queue_work(int cpu, struct - * queued or lose PENDING. Grabbing PENDING and queueing should - * happen with IRQ disabled. - */ -- WARN_ON_ONCE(!irqs_disabled()); -+ WARN_ON_ONCE_NONRT(!irqs_disabled()); - - debug_work_activate(work); - -@@ -1417,14 +1420,14 @@ bool queue_work_on(int cpu, struct workq - bool ret = false; - unsigned long flags; - -- local_irq_save(flags); -+ local_lock_irqsave(pendingb_lock,flags); - - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - __queue_work(cpu, wq, work); - ret = true; - } - -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - EXPORT_SYMBOL(queue_work_on); -@@ -1491,14 +1494,14 @@ bool queue_delayed_work_on(int cpu, stru - unsigned long flags; - - /* read the comment in __queue_work() */ -- local_irq_save(flags); -+ local_lock_irqsave(pendingb_lock, flags); - - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - __queue_delayed_work(cpu, wq, dwork, delay); - ret = true; - } - -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - EXPORT_SYMBOL(queue_delayed_work_on); -@@ -1533,7 +1536,7 @@ bool mod_delayed_work_on(int cpu, struct - - if (likely(ret >= 0)) { - __queue_delayed_work(cpu, wq, dwork, delay); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - } - - /* -ENOENT from try_to_grab_pending() becomes %true */ -@@ -2807,7 +2810,7 @@ static bool __cancel_work_timer(struct w - - /* tell other tasks trying to grab @work to back off */ - mark_work_canceling(work); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - - flush_work(work); - clear_work_data(work); -@@ -2862,10 +2865,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); - */ - bool flush_delayed_work(struct delayed_work *dwork) - { -- local_irq_disable(); -+ local_lock_irq(pendingb_lock); - if (del_timer_sync(&dwork->timer)) - __queue_work(dwork->cpu, dwork->wq, &dwork->work); -- local_irq_enable(); -+ local_unlock_irq(pendingb_lock); - return flush_work(&dwork->work); - } - EXPORT_SYMBOL(flush_delayed_work); -@@ -2900,7 +2903,7 @@ bool cancel_delayed_work(struct delayed_ - - set_work_pool_and_clear_pending(&dwork->work, - get_work_pool_id(&dwork->work)); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - EXPORT_SYMBOL(cancel_delayed_work); diff --git a/debian/patches/features/all/rt/workqueue-use-rcu.patch b/debian/patches/features/all/rt/workqueue-use-rcu.patch deleted file mode 100644 index 78978a8ae..000000000 --- a/debian/patches/features/all/rt/workqueue-use-rcu.patch +++ /dev/null @@ -1,355 +0,0 @@ -Subject: workqueue: Use normal rcu -From: Thomas Gleixner -Date: Wed, 24 Jul 2013 15:26:54 +0200 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -There is no need for sched_rcu. The undocumented reason why sched_rcu -is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by -abusing the fact that sched_rcu reader side critical sections are also -protected by preempt or irq disabled regions. - -Signed-off-by: Thomas Gleixner ---- - kernel/workqueue.c | 96 +++++++++++++++++++++++++++++------------------------ - 1 file changed, 53 insertions(+), 43 deletions(-) - ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -125,7 +125,7 @@ enum { - * - * PL: wq_pool_mutex protected. - * -- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. -+ * PR: wq_pool_mutex protected for writes. RCU protected for reads. - * - * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. - * -@@ -134,7 +134,7 @@ enum { - * - * WQ: wq->mutex protected. - * -- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. -+ * WR: wq->mutex protected for writes. RCU protected for reads. - * - * MD: wq_mayday_lock protected. - */ -@@ -183,7 +183,7 @@ struct worker_pool { - atomic_t nr_running ____cacheline_aligned_in_smp; - - /* -- * Destruction of pool is sched-RCU protected to allow dereferences -+ * Destruction of pool is RCU protected to allow dereferences - * from get_work_pool(). - */ - struct rcu_head rcu; -@@ -212,7 +212,7 @@ struct pool_workqueue { - /* - * Release of unbound pwq is punted to system_wq. See put_pwq() - * and pwq_unbound_release_workfn() for details. pool_workqueue -- * itself is also sched-RCU protected so that the first pwq can be -+ * itself is also RCU protected so that the first pwq can be - * determined without grabbing wq->mutex. - */ - struct work_struct unbound_release_work; -@@ -338,20 +338,20 @@ static void workqueue_sysfs_unregister(s - #include - - #define assert_rcu_or_pool_mutex() \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq_pool_mutex), \ -- "sched RCU or wq_pool_mutex should be held") -+ "RCU or wq_pool_mutex should be held") - - #define assert_rcu_or_wq_mutex(wq) \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq->mutex), \ -- "sched RCU or wq->mutex should be held") -+ "RCU or wq->mutex should be held") - - #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq->mutex) && \ - !lockdep_is_held(&wq_pool_mutex), \ -- "sched RCU, wq->mutex or wq_pool_mutex should be held") -+ "RCU, wq->mutex or wq_pool_mutex should be held") - - #define for_each_cpu_worker_pool(pool, cpu) \ - for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ -@@ -363,7 +363,7 @@ static void workqueue_sysfs_unregister(s - * @pool: iteration cursor - * @pi: integer used for iteration - * -- * This must be called either with wq_pool_mutex held or sched RCU read -+ * This must be called either with wq_pool_mutex held or RCU read - * locked. If the pool needs to be used beyond the locking in effect, the - * caller is responsible for guaranteeing that the pool stays online. - * -@@ -395,7 +395,7 @@ static void workqueue_sysfs_unregister(s - * @pwq: iteration cursor - * @wq: the target workqueue - * -- * This must be called either with wq->mutex held or sched RCU read locked. -+ * This must be called either with wq->mutex held or RCU read locked. - * If the pwq needs to be used beyond the locking in effect, the caller is - * responsible for guaranteeing that the pwq stays online. - * -@@ -557,7 +557,7 @@ static int worker_pool_assign_id(struct - * @wq: the target workqueue - * @node: the node ID - * -- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU -+ * This must be called with any of wq_pool_mutex, wq->mutex or RCU - * read locked. - * If the pwq needs to be used beyond the locking in effect, the caller is - * responsible for guaranteeing that the pwq stays online. -@@ -662,8 +662,8 @@ static struct pool_workqueue *get_work_p - * @work: the work item of interest - * - * Pools are created and destroyed under wq_pool_mutex, and allows read -- * access under sched-RCU read lock. As such, this function should be -- * called under wq_pool_mutex or with preemption disabled. -+ * access under RCU read lock. As such, this function should be -+ * called under wq_pool_mutex or inside of a rcu_read_lock() region. - * - * All fields of the returned pool are accessible as long as the above - * mentioned locking is in effect. If the returned pool needs to be used -@@ -1069,7 +1069,7 @@ static void put_pwq_unlocked(struct pool - { - if (pwq) { - /* -- * As both pwqs and pools are sched-RCU protected, the -+ * As both pwqs and pools are RCU protected, the - * following lock operations are safe. - */ - spin_lock_irq(&pwq->pool->lock); -@@ -1195,6 +1195,7 @@ static int try_to_grab_pending(struct wo - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) - return 0; - -+ rcu_read_lock(); - /* - * The queueing is in progress, or it is already queued. Try to - * steal it from ->worklist without clearing WORK_STRUCT_PENDING. -@@ -1233,10 +1234,12 @@ static int try_to_grab_pending(struct wo - set_work_pool_and_keep_pending(work, pool->id); - - spin_unlock(&pool->lock); -+ rcu_read_unlock(); - return 1; - } - spin_unlock(&pool->lock); - fail: -+ rcu_read_unlock(); - local_irq_restore(*flags); - if (work_is_canceling(work)) - return -ENOENT; -@@ -1317,6 +1320,8 @@ static void __queue_work(int cpu, struct - if (unlikely(wq->flags & __WQ_DRAINING) && - WARN_ON_ONCE(!is_chained_work(wq))) - return; -+ -+ rcu_read_lock(); - retry: - if (req_cpu == WORK_CPU_UNBOUND) - cpu = raw_smp_processor_id(); -@@ -1373,10 +1378,8 @@ static void __queue_work(int cpu, struct - /* pwq determined, queue */ - trace_workqueue_queue_work(req_cpu, pwq, work); - -- if (WARN_ON(!list_empty(&work->entry))) { -- spin_unlock(&pwq->pool->lock); -- return; -- } -+ if (WARN_ON(!list_empty(&work->entry))) -+ goto out; - - pwq->nr_in_flight[pwq->work_color]++; - work_flags = work_color_to_flags(pwq->work_color); -@@ -1392,7 +1395,9 @@ static void __queue_work(int cpu, struct - - insert_work(pwq, work, worklist, work_flags); - -+out: - spin_unlock(&pwq->pool->lock); -+ rcu_read_unlock(); - } - - /** -@@ -2677,14 +2682,14 @@ static bool start_flush_work(struct work - - might_sleep(); - -- local_irq_disable(); -+ rcu_read_lock(); - pool = get_work_pool(work); - if (!pool) { -- local_irq_enable(); -+ rcu_read_unlock(); - return false; - } - -- spin_lock(&pool->lock); -+ spin_lock_irq(&pool->lock); - /* see the comment in try_to_grab_pending() with the same code */ - pwq = get_work_pwq(work); - if (pwq) { -@@ -2711,10 +2716,11 @@ static bool start_flush_work(struct work - else - lock_map_acquire_read(&pwq->wq->lockdep_map); - lock_map_release(&pwq->wq->lockdep_map); -- -+ rcu_read_unlock(); - return true; - already_gone: - spin_unlock_irq(&pool->lock); -+ rcu_read_unlock(); - return false; - } - -@@ -3122,7 +3128,7 @@ static void rcu_free_pool(struct rcu_hea - * put_unbound_pool - put a worker_pool - * @pool: worker_pool to put - * -- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU -+ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU - * safe manner. get_unbound_pool() calls this function on its failure path - * and this function should be able to release pools which went through, - * successfully or not, init_worker_pool(). -@@ -3176,8 +3182,8 @@ static void put_unbound_pool(struct work - del_timer_sync(&pool->idle_timer); - del_timer_sync(&pool->mayday_timer); - -- /* sched-RCU protected to allow dereferences from get_work_pool() */ -- call_rcu_sched(&pool->rcu, rcu_free_pool); -+ /* RCU protected to allow dereferences from get_work_pool() */ -+ call_rcu(&pool->rcu, rcu_free_pool); - } - - /** -@@ -3284,14 +3290,14 @@ static void pwq_unbound_release_workfn(s - put_unbound_pool(pool); - mutex_unlock(&wq_pool_mutex); - -- call_rcu_sched(&pwq->rcu, rcu_free_pwq); -+ call_rcu(&pwq->rcu, rcu_free_pwq); - - /* - * If we're the last pwq going away, @wq is already dead and no one - * is gonna access it anymore. Schedule RCU free. - */ - if (is_last) -- call_rcu_sched(&wq->rcu, rcu_free_wq); -+ call_rcu(&wq->rcu, rcu_free_wq); - } - - /** -@@ -3944,7 +3950,7 @@ void destroy_workqueue(struct workqueue_ - * The base ref is never dropped on per-cpu pwqs. Directly - * schedule RCU free. - */ -- call_rcu_sched(&wq->rcu, rcu_free_wq); -+ call_rcu(&wq->rcu, rcu_free_wq); - } else { - /* - * We're the sole accessor of @wq at this point. Directly -@@ -4037,7 +4043,8 @@ bool workqueue_congested(int cpu, struct - struct pool_workqueue *pwq; - bool ret; - -- rcu_read_lock_sched(); -+ rcu_read_lock(); -+ preempt_disable(); - - if (cpu == WORK_CPU_UNBOUND) - cpu = smp_processor_id(); -@@ -4048,7 +4055,8 @@ bool workqueue_congested(int cpu, struct - pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); - - ret = !list_empty(&pwq->delayed_works); -- rcu_read_unlock_sched(); -+ preempt_enable(); -+ rcu_read_unlock(); - - return ret; - } -@@ -4074,15 +4082,15 @@ unsigned int work_busy(struct work_struc - if (work_pending(work)) - ret |= WORK_BUSY_PENDING; - -- local_irq_save(flags); -+ rcu_read_lock(); - pool = get_work_pool(work); - if (pool) { -- spin_lock(&pool->lock); -+ spin_lock_irqsave(&pool->lock, flags); - if (find_worker_executing_work(pool, work)) - ret |= WORK_BUSY_RUNNING; -- spin_unlock(&pool->lock); -+ spin_unlock_irqrestore(&pool->lock, flags); - } -- local_irq_restore(flags); -+ rcu_read_unlock(); - - return ret; - } -@@ -4271,7 +4279,7 @@ void show_workqueue_state(void) - unsigned long flags; - int pi; - -- rcu_read_lock_sched(); -+ rcu_read_lock(); - - pr_info("Showing busy workqueues and worker pools:\n"); - -@@ -4322,7 +4330,7 @@ void show_workqueue_state(void) - spin_unlock_irqrestore(&pool->lock, flags); - } - -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - } - - /* -@@ -4672,16 +4680,16 @@ bool freeze_workqueues_busy(void) - * nr_active is monotonically decreasing. It's safe - * to peek without lock. - */ -- rcu_read_lock_sched(); -+ rcu_read_lock(); - for_each_pwq(pwq, wq) { - WARN_ON_ONCE(pwq->nr_active < 0); - if (pwq->nr_active) { - busy = true; -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - goto out_unlock; - } - } -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - } - out_unlock: - mutex_unlock(&wq_pool_mutex); -@@ -4871,7 +4879,8 @@ static ssize_t wq_pool_ids_show(struct d - const char *delim = ""; - int node, written = 0; - -- rcu_read_lock_sched(); -+ get_online_cpus(); -+ rcu_read_lock(); - for_each_node(node) { - written += scnprintf(buf + written, PAGE_SIZE - written, - "%s%d:%d", delim, node, -@@ -4879,7 +4888,8 @@ static ssize_t wq_pool_ids_show(struct d - delim = " "; - } - written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); -+ put_online_cpus(); - - return written; - } diff --git a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch b/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch deleted file mode 100644 index 0fc2e0f49..000000000 --- a/debian/patches/features/all/rt/x86-UV-raw_spinlock-conversion.patch +++ /dev/null @@ -1,245 +0,0 @@ -From: Mike Galbraith -Date: Sun, 2 Nov 2014 08:31:37 +0100 -Subject: x86: UV: raw_spinlock conversion -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Shrug. Lots of hobbyists have a beast in their basement, right? - - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/include/asm/uv/uv_bau.h | 14 +++++++------- - arch/x86/include/asm/uv/uv_hub.h | 2 +- - arch/x86/kernel/apic/x2apic_uv_x.c | 2 +- - arch/x86/platform/uv/tlb_uv.c | 26 +++++++++++++------------- - arch/x86/platform/uv/uv_time.c | 21 +++++++++++++-------- - 5 files changed, 35 insertions(+), 30 deletions(-) - ---- a/arch/x86/include/asm/uv/uv_bau.h -+++ b/arch/x86/include/asm/uv/uv_bau.h -@@ -615,9 +615,9 @@ struct bau_control { - cycles_t send_message; - cycles_t period_end; - cycles_t period_time; -- spinlock_t uvhub_lock; -- spinlock_t queue_lock; -- spinlock_t disable_lock; -+ raw_spinlock_t uvhub_lock; -+ raw_spinlock_t queue_lock; -+ raw_spinlock_t disable_lock; - /* tunables */ - int max_concurr; - int max_concurr_const; -@@ -776,15 +776,15 @@ static inline int atom_asr(short i, stru - * to be lowered below the current 'v'. atomic_add_unless can only stop - * on equal. - */ --static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) -+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) - { -- spin_lock(lock); -+ raw_spin_lock(lock); - if (atomic_read(v) >= u) { -- spin_unlock(lock); -+ raw_spin_unlock(lock); - return 0; - } - atomic_inc(v); -- spin_unlock(lock); -+ raw_spin_unlock(lock); - return 1; - } - ---- a/arch/x86/include/asm/uv/uv_hub.h -+++ b/arch/x86/include/asm/uv/uv_hub.h -@@ -492,7 +492,7 @@ struct uv_blade_info { - unsigned short nr_online_cpus; - unsigned short pnode; - short memory_nid; -- spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ -+ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ - unsigned long nmi_count; /* obsolete, see uv_hub_nmi */ - }; - extern struct uv_blade_info *uv_blade_info; ---- a/arch/x86/kernel/apic/x2apic_uv_x.c -+++ b/arch/x86/kernel/apic/x2apic_uv_x.c -@@ -947,7 +947,7 @@ void __init uv_system_init(void) - uv_blade_info[blade].pnode = pnode; - uv_blade_info[blade].nr_possible_cpus = 0; - uv_blade_info[blade].nr_online_cpus = 0; -- spin_lock_init(&uv_blade_info[blade].nmi_lock); -+ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock); - min_pnode = min(pnode, min_pnode); - max_pnode = max(pnode, max_pnode); - blade++; ---- a/arch/x86/platform/uv/tlb_uv.c -+++ b/arch/x86/platform/uv/tlb_uv.c -@@ -714,9 +714,9 @@ static void destination_plugged(struct b - - quiesce_local_uvhub(hmaster); - -- spin_lock(&hmaster->queue_lock); -+ raw_spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); -- spin_unlock(&hmaster->queue_lock); -+ raw_spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - -@@ -736,9 +736,9 @@ static void destination_timeout(struct b - - quiesce_local_uvhub(hmaster); - -- spin_lock(&hmaster->queue_lock); -+ raw_spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); -- spin_unlock(&hmaster->queue_lock); -+ raw_spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - -@@ -759,7 +759,7 @@ static void disable_for_period(struct ba - cycles_t tm1; - - hmaster = bcp->uvhub_master; -- spin_lock(&hmaster->disable_lock); -+ raw_spin_lock(&hmaster->disable_lock); - if (!bcp->baudisabled) { - stat->s_bau_disabled++; - tm1 = get_cycles(); -@@ -772,7 +772,7 @@ static void disable_for_period(struct ba - } - } - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - } - - static void count_max_concurr(int stat, struct bau_control *bcp, -@@ -835,7 +835,7 @@ static void record_send_stats(cycles_t t - */ - static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) - { -- spinlock_t *lock = &hmaster->uvhub_lock; -+ raw_spinlock_t *lock = &hmaster->uvhub_lock; - atomic_t *v; - - v = &hmaster->active_descriptor_count; -@@ -968,7 +968,7 @@ static int check_enable(struct bau_contr - struct bau_control *hmaster; - - hmaster = bcp->uvhub_master; -- spin_lock(&hmaster->disable_lock); -+ raw_spin_lock(&hmaster->disable_lock); - if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { - stat->s_bau_reenabled++; - for_each_present_cpu(tcpu) { -@@ -980,10 +980,10 @@ static int check_enable(struct bau_contr - tbcp->period_giveups = 0; - } - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - return 0; - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - return -1; - } - -@@ -1901,9 +1901,9 @@ static void __init init_per_cpu_tunables - bcp->cong_reps = congested_reps; - bcp->disabled_period = sec_2_cycles(disabled_period); - bcp->giveup_limit = giveup_limit; -- spin_lock_init(&bcp->queue_lock); -- spin_lock_init(&bcp->uvhub_lock); -- spin_lock_init(&bcp->disable_lock); -+ raw_spin_lock_init(&bcp->queue_lock); -+ raw_spin_lock_init(&bcp->uvhub_lock); -+ raw_spin_lock_init(&bcp->disable_lock); - } - } - ---- a/arch/x86/platform/uv/uv_time.c -+++ b/arch/x86/platform/uv/uv_time.c -@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event - - /* There is one of these allocated per node */ - struct uv_rtc_timer_head { -- spinlock_t lock; -+ raw_spinlock_t lock; - /* next cpu waiting for timer, local node relative: */ - int next_cpu; - /* number of cpus on this node: */ -@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers - uv_rtc_deallocate_timers(); - return -ENOMEM; - } -- spin_lock_init(&head->lock); -+ raw_spin_lock_init(&head->lock); - head->ncpus = uv_blade_nr_possible_cpus(bid); - head->next_cpu = -1; - blade_info[bid] = head; -@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 - unsigned long flags; - int next_cpu; - -- spin_lock_irqsave(&head->lock, flags); -+ raw_spin_lock_irqsave(&head->lock, flags); - - next_cpu = head->next_cpu; - *t = expires; -@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 - if (uv_setup_intr(cpu, expires)) { - *t = ULLONG_MAX; - uv_rtc_find_next_timer(head, pnode); -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - return -ETIME; - } - } - -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - return 0; - } - -@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, i - unsigned long flags; - int rc = 0; - -- spin_lock_irqsave(&head->lock, flags); -+ raw_spin_lock_irqsave(&head->lock, flags); - - if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) - rc = 1; -@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, i - uv_rtc_find_next_timer(head, pnode); - } - -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - - return rc; - } -@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, i - static cycle_t uv_read_rtc(struct clocksource *cs) - { - unsigned long offset; -+ cycle_t cycles; - -+ preempt_disable(); - if (uv_get_min_hub_revision_id() == 1) - offset = 0; - else - offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; - -- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); -+ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset); -+ preempt_enable(); -+ -+ return cycles; - } - - /* diff --git a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch deleted file mode 100644 index 3668ff270..000000000 --- a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch +++ /dev/null @@ -1,113 +0,0 @@ -Subject: x86: crypto: Reduce preempt disabled regions -From: Peter Zijlstra -Date: Mon, 14 Nov 2011 18:19:27 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Restrict the preempt disabled regions to the actual floating point -operations and enable preemption for the administrative actions. - -This is necessary on RT to avoid that kfree and other operations are -called with preemption disabled. - -Reported-and-tested-by: Carsten Emde -Signed-off-by: Peter Zijlstra - -Signed-off-by: Thomas Gleixner ---- - arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++----------- - 1 file changed, 13 insertions(+), 11 deletions(-) - ---- a/arch/x86/crypto/aesni-intel_glue.c -+++ b/arch/x86/crypto/aesni-intel_glue.c -@@ -383,14 +383,14 @@ static int ecb_encrypt(struct blkcipher_ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, -- nbytes & AES_BLOCK_MASK); -+ nbytes & AES_BLOCK_MASK); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -407,14 +407,14 @@ static int ecb_decrypt(struct blkcipher_ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -431,14 +431,14 @@ static int cbc_encrypt(struct blkcipher_ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -455,14 +455,14 @@ static int cbc_decrypt(struct blkcipher_ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -514,18 +514,20 @@ static int ctr_crypt(struct blkcipher_de - err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { -+ kernel_fpu_begin(); - aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } - if (walk.nbytes) { -+ kernel_fpu_begin(); - ctr_crypt_final(ctx, &walk); -+ kernel_fpu_end(); - err = blkcipher_walk_done(desc, &walk, 0); - } -- kernel_fpu_end(); - - return err; - } diff --git a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch b/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch deleted file mode 100644 index 66afc7f50..000000000 --- a/debian/patches/features/all/rt/x86-highmem-add-a-already-used-pte-check.patch +++ /dev/null @@ -1,23 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 11 Mar 2013 17:09:55 +0100 -Subject: x86/highmem: Add a "already used pte" check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -This is a copy from kmap_atomic_prot(). - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/mm/iomap_32.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/arch/x86/mm/iomap_32.c -+++ b/arch/x86/mm/iomap_32.c -@@ -66,6 +66,8 @@ void *kmap_atomic_prot_pfn(unsigned long - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -+ WARN_ON(!pte_none(*(kmap_pte - idx))); -+ - #ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = pte; - #endif diff --git a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch deleted file mode 100644 index 260e51444..000000000 --- a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:27 -0500 -Subject: x86/ioapic: Do not unmask io_apic when interrupt is in progress -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -With threaded interrupts we might see an interrupt in progress on -migration. Do not unmask it when this is the case. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- -xXx - arch/x86/kernel/apic/io_apic.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -1711,7 +1711,8 @@ static bool io_apic_level_ack_pending(st - static inline bool ioapic_irqd_mask(struct irq_data *data) - { - /* If we are moving the irq we need to mask it */ -- if (unlikely(irqd_is_setaffinity_pending(data))) { -+ if (unlikely(irqd_is_setaffinity_pending(data) && -+ !irqd_irq_inprogress(data))) { - mask_ioapic_irq(data); - return true; - } diff --git a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch deleted file mode 100644 index 8f92f2ff8..000000000 --- a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch +++ /dev/null @@ -1,31 +0,0 @@ -Subject: x86: kvm Require const tsc for RT -From: Thomas Gleixner -Date: Sun, 06 Nov 2011 12:26:18 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Non constant TSC is a nightmare on bare metal already, but with -virtualization it becomes a complete disaster because the workarounds -are horrible latency wise. That's also a preliminary for running RT in -a guest on top of a RT host. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/kvm/x86.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -5788,6 +5788,13 @@ int kvm_arch_init(void *opaque) - goto out; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { -+ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); -+ return -EOPNOTSUPP; -+ } -+#endif -+ - r = kvm_mmu_module_init(); - if (r) - goto out_free_percpu; diff --git a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch deleted file mode 100644 index 4b20e54b2..000000000 --- a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch +++ /dev/null @@ -1,180 +0,0 @@ -From: Thomas Gleixner -Date: Mon, 13 Dec 2010 16:33:39 +0100 -Subject: x86: Convert mce timer to hrtimer -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -mce_timer is started in atomic contexts of cpu bringup. This results -in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to -avoid this. - -Signed-off-by: Thomas Gleixner -fold in: -|From: Mike Galbraith -|Date: Wed, 29 May 2013 13:52:13 +0200 -|Subject: [PATCH] x86/mce: fix mce timer interval -| -|Seems mce timer fire at the wrong frequency in -rt kernels since roughly -|forever due to 32 bit overflow. 3.8-rt is also missing a multiplier. -| -|Add missing us -> ns conversion and 32 bit overflow prevention. -| -|Signed-off-by: Mike Galbraith -|[bigeasy: use ULL instead of u64 cast] -|Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/kernel/cpu/mcheck/mce.c | 52 +++++++++++++++------------------------ - 1 file changed, 20 insertions(+), 32 deletions(-) - ---- a/arch/x86/kernel/cpu/mcheck/mce.c -+++ b/arch/x86/kernel/cpu/mcheck/mce.c -@@ -41,6 +41,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -1236,7 +1237,7 @@ void mce_log_therm_throt_event(__u64 sta - static unsigned long check_interval = INITIAL_CHECK_INTERVAL; - - static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ --static DEFINE_PER_CPU(struct timer_list, mce_timer); -+static DEFINE_PER_CPU(struct hrtimer, mce_timer); - - static unsigned long mce_adjust_timer_default(unsigned long interval) - { -@@ -1245,32 +1246,18 @@ static unsigned long mce_adjust_timer_de - - static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; - --static void __restart_timer(struct timer_list *t, unsigned long interval) -+static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval) - { -- unsigned long when = jiffies + interval; -- unsigned long flags; -- -- local_irq_save(flags); -- -- if (timer_pending(t)) { -- if (time_before(when, t->expires)) -- mod_timer_pinned(t, when); -- } else { -- t->expires = round_jiffies(when); -- add_timer_on(t, smp_processor_id()); -- } -- -- local_irq_restore(flags); -+ if (!interval) -+ return HRTIMER_NORESTART; -+ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval))); -+ return HRTIMER_RESTART; - } - --static void mce_timer_fn(unsigned long data) -+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); -- int cpu = smp_processor_id(); - unsigned long iv; - -- WARN_ON(cpu != data); -- - iv = __this_cpu_read(mce_next_interval); - - if (mce_available(this_cpu_ptr(&cpu_info))) { -@@ -1293,7 +1280,7 @@ static void mce_timer_fn(unsigned long d - - done: - __this_cpu_write(mce_next_interval, iv); -- __restart_timer(t, iv); -+ return __restart_timer(timer, iv); - } - - /* -@@ -1301,7 +1288,7 @@ static void mce_timer_fn(unsigned long d - */ - void mce_timer_kick(unsigned long interval) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); -+ struct hrtimer *t = this_cpu_ptr(&mce_timer); - unsigned long iv = __this_cpu_read(mce_next_interval); - - __restart_timer(t, interval); -@@ -1316,7 +1303,7 @@ static void mce_timer_delete_all(void) - int cpu; - - for_each_online_cpu(cpu) -- del_timer_sync(&per_cpu(mce_timer, cpu)); -+ hrtimer_cancel(&per_cpu(mce_timer, cpu)); - } - - static void mce_do_trigger(struct work_struct *work) -@@ -1639,7 +1626,7 @@ static void __mcheck_cpu_clear_vendor(st - } - } - --static void mce_start_timer(unsigned int cpu, struct timer_list *t) -+static void mce_start_timer(unsigned int cpu, struct hrtimer *t) - { - unsigned long iv = check_interval * HZ; - -@@ -1648,16 +1635,17 @@ static void mce_start_timer(unsigned int - - per_cpu(mce_next_interval, cpu) = iv; - -- t->expires = round_jiffies(jiffies + iv); -- add_timer_on(t, cpu); -+ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), -+ 0, HRTIMER_MODE_REL_PINNED); - } - - static void __mcheck_cpu_init_timer(void) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); -+ struct hrtimer *t = this_cpu_ptr(&mce_timer); - unsigned int cpu = smp_processor_id(); - -- setup_timer(t, mce_timer_fn, cpu); -+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ t->function = mce_timer_fn; - mce_start_timer(cpu, t); - } - -@@ -2376,6 +2364,8 @@ static void mce_disable_cpu(void *h) - if (!mce_available(raw_cpu_ptr(&cpu_info))) - return; - -+ hrtimer_cancel(this_cpu_ptr(&mce_timer)); -+ - if (!(action & CPU_TASKS_FROZEN)) - cmci_clear(); - -@@ -2398,6 +2388,7 @@ static void mce_reenable_cpu(void *h) - if (b->init) - wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); - } -+ __mcheck_cpu_init_timer(); - } - - /* Get notified when a cpu comes on/off. Be hotplug friendly. */ -@@ -2405,7 +2396,6 @@ static int - mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) - { - unsigned int cpu = (unsigned long)hcpu; -- struct timer_list *t = &per_cpu(mce_timer, cpu); - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: -@@ -2425,11 +2415,9 @@ mce_cpu_callback(struct notifier_block * - break; - case CPU_DOWN_PREPARE: - smp_call_function_single(cpu, mce_disable_cpu, &action, 1); -- del_timer_sync(t); - break; - case CPU_DOWN_FAILED: - smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); -- mce_start_timer(cpu, t); - break; - } - diff --git a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch b/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch deleted file mode 100644 index 334cf9a2e..000000000 --- a/debian/patches/features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch +++ /dev/null @@ -1,160 +0,0 @@ -Subject: x86/mce: use swait queue for mce wakeups -From: Steven Rostedt -Date: Fri, 27 Feb 2015 15:20:37 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -We had a customer report a lockup on a 3.0-rt kernel that had the -following backtrace: - -[ffff88107fca3e80] rt_spin_lock_slowlock at ffffffff81499113 -[ffff88107fca3f40] rt_spin_lock at ffffffff81499a56 -[ffff88107fca3f50] __wake_up at ffffffff81043379 -[ffff88107fca3f80] mce_notify_irq at ffffffff81017328 -[ffff88107fca3f90] intel_threshold_interrupt at ffffffff81019508 -[ffff88107fca3fa0] smp_threshold_interrupt at ffffffff81019fc1 -[ffff88107fca3fb0] threshold_interrupt at ffffffff814a1853 - -It actually bugged because the lock was taken by the same owner that -already had that lock. What happened was the thread that was setting -itself on a wait queue had the lock when an MCE triggered. The MCE -interrupt does a wake up on its wait list and grabs the same lock. - -NOTE: THIS IS NOT A BUG ON MAINLINE - -Sorry for yelling, but as I Cc'd mainline maintainers I want them to -know that this is an PREEMPT_RT bug only. I only Cc'd them for advice. - -On PREEMPT_RT the wait queue locks are converted from normal -"spin_locks" into an rt_mutex (see the rt_spin_lock_slowlock above). -These are not to be taken by hard interrupt context. This usually isn't -a problem as most all interrupts in PREEMPT_RT are converted into -schedulable threads. Unfortunately that's not the case with the MCE irq. - -As wait queue locks are notorious for long hold times, we can not -convert them to raw_spin_locks without causing issues with -rt. But -Thomas has created a "simple-wait" structure that uses raw spin locks -which may have been a good fit. - -Unfortunately, wait queues are not the only issue, as the mce_notify_irq -also does a schedule_work(), which grabs the workqueue spin locks that -have the exact same issue. - -Thus, this patch I'm proposing is to move the actual work of the MCE -interrupt into a helper thread that gets woken up on the MCE interrupt -and does the work in a schedulable context. - -NOTE: THIS PATCH ONLY CHANGES THE BEHAVIOR WHEN PREEMPT_RT IS SET - -Oops, sorry for yelling again, but I want to stress that I keep the same -behavior of mainline when PREEMPT_RT is not set. Thus, this only changes -the MCE behavior when PREEMPT_RT is configured. - -Signed-off-by: Steven Rostedt -[bigeasy@linutronix: make mce_notify_work() a proper prototype, use - kthread_run()] -Signed-off-by: Sebastian Andrzej Siewior -[wagi: use work-simple framework to defer work to a kthread] -Signed-off-by: Daniel Wagner ---- - arch/x86/kernel/cpu/mcheck/mce.c | 68 ++++++++++++++++++++++++++++++++------- - 1 file changed, 56 insertions(+), 12 deletions(-) - ---- a/arch/x86/kernel/cpu/mcheck/mce.c -+++ b/arch/x86/kernel/cpu/mcheck/mce.c -@@ -42,6 +42,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -1313,6 +1314,56 @@ static void mce_do_trigger(struct work_s - - static DECLARE_WORK(mce_trigger_work, mce_do_trigger); - -+static void __mce_notify_work(struct swork_event *event) -+{ -+ /* Not more than two messages every minute */ -+ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); -+ -+ /* wake processes polling /dev/mcelog */ -+ wake_up_interruptible(&mce_chrdev_wait); -+ -+ /* -+ * There is no risk of missing notifications because -+ * work_pending is always cleared before the function is -+ * executed. -+ */ -+ if (mce_helper[0] && !work_pending(&mce_trigger_work)) -+ schedule_work(&mce_trigger_work); -+ -+ if (__ratelimit(&ratelimit)) -+ pr_info(HW_ERR "Machine check events logged\n"); -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static bool notify_work_ready __read_mostly; -+static struct swork_event notify_work; -+ -+static int mce_notify_work_init(void) -+{ -+ int err; -+ -+ err = swork_get(); -+ if (err) -+ return err; -+ -+ INIT_SWORK(¬ify_work, __mce_notify_work); -+ notify_work_ready = true; -+ return 0; -+} -+ -+static void mce_notify_work(void) -+{ -+ if (notify_work_ready) -+ swork_queue(¬ify_work); -+} -+#else -+static void mce_notify_work(void) -+{ -+ __mce_notify_work(NULL); -+} -+static inline int mce_notify_work_init(void) { return 0; } -+#endif -+ - /* - * Notify the user(s) about new machine check events. - * Can be called from interrupt context, but not from machine check/NMI -@@ -1320,19 +1371,8 @@ static DECLARE_WORK(mce_trigger_work, mc - */ - int mce_notify_irq(void) - { -- /* Not more than two messages every minute */ -- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); -- - if (test_and_clear_bit(0, &mce_need_notify)) { -- /* wake processes polling /dev/mcelog */ -- wake_up_interruptible(&mce_chrdev_wait); -- -- if (mce_helper[0]) -- schedule_work(&mce_trigger_work); -- -- if (__ratelimit(&ratelimit)) -- pr_info(HW_ERR "Machine check events logged\n"); -- -+ mce_notify_work(); - return 1; - } - return 0; -@@ -2456,6 +2496,10 @@ static __init int mcheck_init_device(voi - goto err_out; - } - -+ err = mce_notify_work_init(); -+ if (err) -+ goto err_out; -+ - if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { - err = -ENOMEM; - goto err_out; diff --git a/debian/patches/features/all/rt/x86-preempt-lazy.patch b/debian/patches/features/all/rt/x86-preempt-lazy.patch deleted file mode 100644 index e1774d3dd..000000000 --- a/debian/patches/features/all/rt/x86-preempt-lazy.patch +++ /dev/null @@ -1,152 +0,0 @@ -Subject: x86: Support for lazy preemption -From: Thomas Gleixner -Date: Thu, 01 Nov 2012 11:03:47 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Implement the x86 pieces for lazy preempt. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/Kconfig | 1 + - arch/x86/entry/common.c | 4 ++-- - arch/x86/entry/entry_32.S | 16 ++++++++++++++++ - arch/x86/entry/entry_64.S | 16 ++++++++++++++++ - arch/x86/include/asm/thread_info.h | 6 ++++++ - arch/x86/kernel/asm-offsets.c | 2 ++ - 6 files changed, 43 insertions(+), 2 deletions(-) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -17,6 +17,7 @@ config X86_64 - ### Arch settings - config X86 - def_bool y -+ select HAVE_PREEMPT_LAZY - select ACPI_LEGACY_TABLES_LOOKUP if ACPI - select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI - select ANON_INODES ---- a/arch/x86/entry/common.c -+++ b/arch/x86/entry/common.c -@@ -220,7 +220,7 @@ long syscall_trace_enter(struct pt_regs - - #define EXIT_TO_USERMODE_LOOP_FLAGS \ - (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ -- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) -+ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY) - - static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) - { -@@ -236,7 +236,7 @@ static void exit_to_usermode_loop(struct - /* We have work to do. */ - local_irq_enable(); - -- if (cached_flags & _TIF_NEED_RESCHED) -+ if (cached_flags & _TIF_NEED_RESCHED_MASK) - schedule(); - - #ifdef ARCH_RT_DELAYS_SIGNAL_SEND ---- a/arch/x86/entry/entry_32.S -+++ b/arch/x86/entry/entry_32.S -@@ -278,8 +278,24 @@ END(ret_from_exception) - ENTRY(resume_kernel) - DISABLE_INTERRUPTS(CLBR_ANY) - need_resched: -+ # preempt count == 0 + NEED_RS set? - cmpl $0, PER_CPU_VAR(__preempt_count) -+#ifndef CONFIG_PREEMPT_LAZY - jnz restore_all -+#else -+ jz test_int_off -+ -+ # atleast preempt count == 0 ? -+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jne restore_all -+ -+ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? -+ jnz restore_all -+ -+ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp) -+ jz restore_all -+test_int_off: -+#endif - testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all - call preempt_schedule_irq ---- a/arch/x86/entry/entry_64.S -+++ b/arch/x86/entry/entry_64.S -@@ -579,7 +579,23 @@ GLOBAL(retint_user) - bt $9, EFLAGS(%rsp) /* were interrupts off? */ - jnc 1f - 0: cmpl $0, PER_CPU_VAR(__preempt_count) -+#ifndef CONFIG_PREEMPT_LAZY - jnz 1f -+#else -+ jz do_preempt_schedule_irq -+ -+ # atleast preempt count == 0 ? -+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jnz 1f -+ -+ GET_THREAD_INFO(%rcx) -+ cmpl $0, TI_preempt_lazy_count(%rcx) -+ jnz 1f -+ -+ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) -+ jnc 1f -+do_preempt_schedule_irq: -+#endif - call preempt_schedule_irq - jmp 0b - 1: ---- a/arch/x86/include/asm/thread_info.h -+++ b/arch/x86/include/asm/thread_info.h -@@ -58,6 +58,8 @@ struct thread_info { - __u32 status; /* thread synchronous flags */ - __u32 cpu; /* current CPU */ - mm_segment_t addr_limit; -+ int preempt_lazy_count; /* 0 => lazy preemptable -+ <0 => BUG */ - unsigned int sig_on_uaccess_error:1; - unsigned int uaccess_err:1; /* uaccess failed */ - }; -@@ -95,6 +97,7 @@ struct thread_info { - #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ - #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ - #define TIF_SECCOMP 8 /* secure computing */ -+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ - #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ - #define TIF_UPROBE 12 /* breakpointed or singlestepping */ - #define TIF_NOTSC 16 /* TSC is not accessible in userland */ -@@ -119,6 +122,7 @@ struct thread_info { - #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) - #define _TIF_SECCOMP (1 << TIF_SECCOMP) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) - #define _TIF_UPROBE (1 << TIF_UPROBE) - #define _TIF_NOTSC (1 << TIF_NOTSC) -@@ -152,6 +156,8 @@ struct thread_info { - #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) - #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) - -+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) -+ - #define STACK_WARN (THREAD_SIZE/8) - - /* ---- a/arch/x86/kernel/asm-offsets.c -+++ b/arch/x86/kernel/asm-offsets.c -@@ -32,6 +32,7 @@ void common(void) { - OFFSET(TI_flags, thread_info, flags); - OFFSET(TI_status, thread_info, status); - OFFSET(TI_addr_limit, thread_info, addr_limit); -+ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); - - BLANK(); - OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); -@@ -89,4 +90,5 @@ void common(void) { - - BLANK(); - DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); -+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); - } diff --git a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch deleted file mode 100644 index 5f24b4b2d..000000000 --- a/debian/patches/features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch +++ /dev/null @@ -1,43 +0,0 @@ -From: Yang Shi -Date: Thu, 10 Dec 2015 10:58:51 -0800 -Subject: x86/signal: delay calling signals on 32bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -When running some ptrace single step tests on x86-32 machine, the below problem -is triggered: - -BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917 -in_atomic(): 1, irqs_disabled(): 0, pid: 1041, name: dummy2 -Preemption disabled at:[] do_debug+0x1f/0x1a0 - -CPU: 10 PID: 1041 Comm: dummy2 Tainted: G W 4.1.13-rt13 #1 -Call Trace: - [] dump_stack+0x46/0x5c - [] ___might_sleep+0x137/0x220 - [] rt_spin_lock+0x1f/0x80 - [] do_force_sig_info+0x2a/0xc0 - [] force_sig_info+0xd/0x10 - [] send_sigtrap+0x6f/0x80 - [] do_debug+0x161/0x1a0 - [] debug_stack_correct+0x2e/0x35 - -This happens since 959274753857 ("x86, traps: Track entry into and exit -from IST context") which was merged in v4.1-rc1. - -Signed-off-by: Yang Shi -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/include/asm/signal.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/x86/include/asm/signal.h -+++ b/arch/x86/include/asm/signal.h -@@ -32,7 +32,7 @@ typedef struct { - * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the - * trap. - */ --#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) -+#if defined(CONFIG_PREEMPT_RT_FULL) - #define ARCH_RT_DELAYS_SIGNAL_SEND - #endif - diff --git a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch deleted file mode 100644 index 38c03841c..000000000 --- a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Thomas Gleixner -Date: Thu, 16 Dec 2010 14:25:18 +0100 -Subject: x86: stackprotector: Avoid random pool on rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -CPU bringup calls into the random pool to initialize the stack -canary. During boot that works nicely even on RT as the might sleep -checks are disabled. During CPU hotplug the might sleep checks -trigger. Making the locks in random raw is a major PITA, so avoid the -call on RT is the only sensible solution. This is basically the same -randomness which we get during boot where the random pool has no -entropy and we rely on the TSC randomnness. - -Reported-by: Carsten Emde -Signed-off-by: Thomas Gleixner - ---- - arch/x86/include/asm/stackprotector.h | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - ---- a/arch/x86/include/asm/stackprotector.h -+++ b/arch/x86/include/asm/stackprotector.h -@@ -59,7 +59,7 @@ - */ - static __always_inline void boot_init_stack_canary(void) - { -- u64 canary; -+ u64 uninitialized_var(canary); - u64 tsc; - - #ifdef CONFIG_X86_64 -@@ -70,8 +70,15 @@ static __always_inline void boot_init_st - * of randomness. The TSC only matters for very early init, - * there it already has some randomness on most systems. Later - * on during the bootup the random pool has true entropy too. -+ * -+ * For preempt-rt we need to weaken the randomness a bit, as -+ * we can't call into the random generator from atomic context -+ * due to locking constraints. We just leave canary -+ * uninitialized and use the TSC based randomness on top of it. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - get_random_bytes(&canary, sizeof(canary)); -+#endif - tsc = rdtsc(); - canary += tsc + (tsc << 32UL); - diff --git a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch deleted file mode 100644 index db15c84fe..000000000 --- a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch +++ /dev/null @@ -1,29 +0,0 @@ -From: Thomas Gleixner -Date: Sun, 26 Jul 2009 02:21:32 +0200 -Subject: x86: Use generic rwsem_spinlocks on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz - -Simplifies the separation of anon_rw_semaphores and rw_semaphores for --rt. - -Signed-off-by: Thomas Gleixner - ---- - arch/x86/Kconfig | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -212,8 +212,11 @@ config ARCH_MAY_HAVE_PC_FDC - def_bool y - depends on ISA_DMA_API - -+config RWSEM_GENERIC_SPINLOCK -+ def_bool PREEMPT_RT_FULL -+ - config RWSEM_XCHGADD_ALGORITHM -- def_bool y -+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL - - config GENERIC_CALIBRATE_DELAY - def_bool y diff --git a/debian/patches/features/arm/arm-orion-always-use-multi_irq_handler.patch b/debian/patches/features/arm/arm-orion-always-use-multi_irq_handler.patch deleted file mode 100644 index e06be35ec..000000000 --- a/debian/patches/features/arm/arm-orion-always-use-multi_irq_handler.patch +++ /dev/null @@ -1,315 +0,0 @@ -From: Arnd Bergmann -Date: Wed, 2 Dec 2015 22:27:04 +0100 -Subject: ARM: orion: always use MULTI_IRQ_HANDLER -Origin: https://git.kernel.org/linus/b8cd337c8e0330f4a29b3d1f69b7c73b324b1f8d - -As a preparation for multiplatform support, this enables -the MULTI_IRQ_HANDLER code unconditionally on dove and -orion5x, and introduces the respective code on mv78xx0, -which did not have it so far. The classic entry-macro.S -files are removed as they are now obsolete. - -Signed-off-by: Arnd Bergmann -Acked-by: Andrew Lunn -Signed-off-by: Gregory CLEMENT -[bwh: Adjust to apply on top of 4.4] ---- - arch/arm/Kconfig | 3 ++ - arch/arm/mach-dove/include/mach/entry-macro.S | 33 ------------------- - arch/arm/mach-dove/irq.c | 12 +------ - arch/arm/mach-mv78xx0/include/mach/entry-macro.S | 41 ------------------------ - arch/arm/mach-mv78xx0/irq.c | 33 +++++++++++++++++++ - arch/arm/mach-orion5x/include/mach/entry-macro.S | 25 --------------- - arch/arm/mach-orion5x/irq.c | 11 ------- - arch/arm/plat-orion/irq.c | 1 - - arch/arm/plat-orion/mpp.c | 1 - - 9 files changed, 37 insertions(+), 123 deletions(-) - delete mode 100644 arch/arm/mach-dove/include/mach/entry-macro.S - delete mode 100644 arch/arm/mach-mv78xx0/include/mach/entry-macro.S - delete mode 100644 arch/arm/mach-orion5x/include/mach/entry-macro.S - ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -512,6 +512,7 @@ config ARCH_DOVE - select CPU_PJ4 - select GENERIC_CLOCKEVENTS - select MIGHT_HAVE_PCI -+ select MULTI_IRQ_HANDLER - select MVEBU_MBUS - select PINCTRL - select PINCTRL_DOVE -@@ -525,6 +526,7 @@ config ARCH_MV78XX0 - select CPU_FEROCEON - select GENERIC_CLOCKEVENTS - select MVEBU_MBUS -+ select MULTI_IRQ_HANDLER - select PCI - select PLAT_ORION_LEGACY - help -@@ -538,6 +540,7 @@ config ARCH_ORION5X - select CPU_FEROCEON - select GENERIC_CLOCKEVENTS - select MVEBU_MBUS -+ select MULTI_IRQ_HANDLER - select PCI - select PLAT_ORION_LEGACY - select MULTI_IRQ_HANDLER ---- a/arch/arm/mach-dove/include/mach/entry-macro.S -+++ /dev/null -@@ -1,33 +0,0 @@ --/* -- * arch/arm/mach-dove/include/mach/entry-macro.S -- * -- * Low-level IRQ helper macros for Marvell Dove platforms -- * -- * This file is licensed under the terms of the GNU General Public -- * License version 2. This program is licensed "as is" without any -- * warranty of any kind, whether express or implied. -- */ -- --#include -- -- .macro get_irqnr_preamble, base, tmp -- ldr \base, =IRQ_VIRT_BASE -- .endm -- -- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp -- @ check low interrupts -- ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] -- ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] -- mov \irqnr, #32 -- ands \irqstat, \irqstat, \tmp -- -- @ if no low interrupts set, check high interrupts -- ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] -- ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF] -- moveq \irqnr, #64 -- andeqs \irqstat, \irqstat, \tmp -- -- @ find first active interrupt source -- clzne \irqstat, \irqstat -- subne \irqnr, \irqnr, \irqstat -- .endm ---- a/arch/arm/mach-dove/irq.c -+++ b/arch/arm/mach-dove/irq.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -109,14 +110,6 @@ static int __initdata gpio2_irqs[4] = { - 0, - }; - --#ifdef CONFIG_MULTI_IRQ_HANDLER --/* -- * Compiling with both non-DT and DT support enabled, will -- * break asm irq handler used by non-DT boards. Therefore, -- * we provide a C-style irq handler even for non-DT boards, -- * if MULTI_IRQ_HANDLER is set. -- */ -- - static void __iomem *dove_irq_base = IRQ_VIRT_BASE; - - static asmlinkage void -@@ -139,7 +132,6 @@ __exception_irq_entry dove_legacy_handle - return; - } - } --#endif - - void __init dove_init_irq(void) - { -@@ -148,9 +140,7 @@ void __init dove_init_irq(void) - orion_irq_init(1, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF); - orion_irq_init(33, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF); - --#ifdef CONFIG_MULTI_IRQ_HANDLER - set_handle_irq(dove_legacy_handle_irq); --#endif - - /* - * Initialize gpiolib for GPIOs 0-71. ---- a/arch/arm/mach-mv78xx0/include/mach/entry-macro.S -+++ /dev/null -@@ -1,41 +0,0 @@ --/* -- * arch/arm/mach-mv78xx0/include/mach/entry-macro.S -- * -- * Low-level IRQ helper macros for Marvell MV78xx0 platforms -- * -- * This file is licensed under the terms of the GNU General Public -- * License version 2. This program is licensed "as is" without any -- * warranty of any kind, whether express or implied. -- */ -- --#include -- -- .macro get_irqnr_preamble, base, tmp -- ldr \base, =IRQ_VIRT_BASE -- .endm -- -- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp -- @ check low interrupts -- ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] -- ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] -- mov \irqnr, #31 -- ands \irqstat, \irqstat, \tmp -- bne 1001f -- -- @ if no low interrupts set, check high interrupts -- ldr \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] -- ldr \tmp, [\base, #IRQ_MASK_HIGH_OFF] -- mov \irqnr, #63 -- ands \irqstat, \irqstat, \tmp -- bne 1001f -- -- @ if no high interrupts set, check error interrupts -- ldr \irqstat, [\base, #IRQ_CAUSE_ERR_OFF] -- ldr \tmp, [\base, #IRQ_MASK_ERR_OFF] -- mov \irqnr, #95 -- ands \irqstat, \irqstat, \tmp -- -- @ find first active interrupt source --1001: clzne \irqstat, \irqstat -- subne \irqnr, \irqnr, \irqstat -- .endm ---- a/arch/arm/mach-mv78xx0/irq.c -+++ b/arch/arm/mach-mv78xx0/irq.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -23,12 +24,44 @@ static int __initdata gpio0_irqs[4] = { - IRQ_MV78XX0_GPIO_24_31, - }; - -+static void __iomem *mv78xx0_irq_base = IRQ_VIRT_BASE; -+ -+static asmlinkage void -+__exception_irq_entry mv78xx0_legacy_handle_irq(struct pt_regs *regs) -+{ -+ u32 stat; -+ -+ stat = readl_relaxed(mv78xx0_irq_base + IRQ_CAUSE_LOW_OFF); -+ stat &= readl_relaxed(mv78xx0_irq_base + IRQ_MASK_LOW_OFF); -+ if (stat) { -+ unsigned int hwirq = __fls(stat); -+ handle_IRQ(hwirq, regs); -+ return; -+ } -+ stat = readl_relaxed(mv78xx0_irq_base + IRQ_CAUSE_HIGH_OFF); -+ stat &= readl_relaxed(mv78xx0_irq_base + IRQ_MASK_HIGH_OFF); -+ if (stat) { -+ unsigned int hwirq = 32 + __fls(stat); -+ handle_IRQ(hwirq, regs); -+ return; -+ } -+ stat = readl_relaxed(mv78xx0_irq_base + IRQ_CAUSE_ERR_OFF); -+ stat &= readl_relaxed(mv78xx0_irq_base + IRQ_MASK_ERR_OFF); -+ if (stat) { -+ unsigned int hwirq = 64 + __fls(stat); -+ handle_IRQ(hwirq, regs); -+ return; -+ } -+} -+ - void __init mv78xx0_init_irq(void) - { - orion_irq_init(0, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF); - orion_irq_init(32, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF); - orion_irq_init(64, IRQ_VIRT_BASE + IRQ_MASK_ERR_OFF); - -+ set_handle_irq(mv78xx0_legacy_handle_irq); -+ - /* - * Initialize gpiolib for GPIOs 0-31. (The GPIO interrupt mask - * registers for core #1 are at an offset of 0x18 from those of ---- a/arch/arm/mach-orion5x/include/mach/entry-macro.S -+++ /dev/null -@@ -1,25 +0,0 @@ --/* -- * arch/arm/mach-orion5x/include/mach/entry-macro.S -- * -- * Low-level IRQ helper macros for Orion platforms -- * -- * This file is licensed under the terms of the GNU General Public -- * License version 2. This program is licensed "as is" without any -- * warranty of any kind, whether express or implied. -- */ -- --#include -- -- .macro get_irqnr_preamble, base, tmp -- ldr \base, =MAIN_IRQ_CAUSE -- .endm -- -- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp -- ldr \irqstat, [\base, #0] @ main cause -- ldr \tmp, [\base, #(MAIN_IRQ_MASK - MAIN_IRQ_CAUSE)] @ main mask -- mov \irqnr, #0 @ default irqnr -- @ find cause bits that are unmasked -- ands \irqstat, \irqstat, \tmp @ clear Z flag if any -- clzne \irqnr, \irqstat @ calc irqnr -- rsbne \irqnr, \irqnr, #32 -- .endm ---- a/arch/arm/mach-orion5x/irq.c -+++ b/arch/arm/mach-orion5x/irq.c -@@ -26,14 +26,6 @@ static int __initdata gpio0_irqs[4] = { - IRQ_ORION5X_GPIO_24_31, - }; - --#ifdef CONFIG_MULTI_IRQ_HANDLER --/* -- * Compiling with both non-DT and DT support enabled, will -- * break asm irq handler used by non-DT boards. Therefore, -- * we provide a C-style irq handler even for non-DT boards, -- * if MULTI_IRQ_HANDLER is set. -- */ -- - asmlinkage void - __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs) - { -@@ -47,15 +39,12 @@ __exception_irq_entry orion5x_legacy_han - return; - } - } --#endif - - void __init orion5x_init_irq(void) - { - orion_irq_init(1, MAIN_IRQ_MASK); - --#ifdef CONFIG_MULTI_IRQ_HANDLER - set_handle_irq(orion5x_legacy_handle_irq); --#endif - - /* - * Initialize gpiolib for GPIOs 0-31. ---- a/arch/arm/plat-orion/irq.c -+++ b/arch/arm/plat-orion/irq.c -@@ -18,7 +18,6 @@ - #include - #include - #include --#include - - void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr) - { ---- a/arch/arm/plat-orion/mpp.c -+++ b/arch/arm/plat-orion/mpp.c -@@ -13,7 +13,6 @@ - #include - #include - #include --#include - #include - #include - diff --git a/debian/patches/features/arm/arm-orion-move-watchdog-setup-to-mach-orion5x.patch b/debian/patches/features/arm/arm-orion-move-watchdog-setup-to-mach-orion5x.patch deleted file mode 100644 index ffd61af9f..000000000 --- a/debian/patches/features/arm/arm-orion-move-watchdog-setup-to-mach-orion5x.patch +++ /dev/null @@ -1,132 +0,0 @@ -From: Arnd Bergmann -Date: Wed, 2 Dec 2015 22:27:03 +0100 -Subject: ARM: orion: move watchdog setup to mach-orion5x -Origin: https://git.kernel.org/linus/06f3008a6a7454389a82495eb1fc132c2b0710f6 - -The watchdog device node is created in plat-orion/common.c -but depends on the bridge address that is platform specific, -so as a preparation for orion multiplatform support, we -move it out of the common code into orion5x and dove. - -At the moment, dove does not use the watchdog, so I'm marking -the function as __maybe_unused for the moment. The compiler -will be able to compile out the device definition this way, -and we can easily add it later. - -Signed-off-by: Arnd Bergmann -Acked-by: Andrew Lunn -Signed-off-by: Gregory CLEMENT ---- - arch/arm/mach-dove/common.c | 17 +++++++++++++++++ - arch/arm/mach-orion5x/common.c | 14 +++++++++++++- - arch/arm/plat-orion/common.c | 21 --------------------- - arch/arm/plat-orion/include/plat/common.h | 2 -- - 4 files changed, 30 insertions(+), 24 deletions(-) - -diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c -index 0d1a892..25a682f 100644 ---- a/arch/arm/mach-dove/common.c -+++ b/arch/arm/mach-dove/common.c -@@ -375,6 +375,23 @@ void __init dove_setup_cpu_wins(void) - DOVE_SCRATCHPAD_SIZE); - } - -+static struct resource orion_wdt_resource[] = { -+ DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x04), -+ DEFINE_RES_MEM(RSTOUTn_MASK_PHYS, 0x04), -+}; -+ -+static struct platform_device orion_wdt_device = { -+ .name = "orion_wdt", -+ .id = -1, -+ .num_resources = ARRAY_SIZE(orion_wdt_resource), -+ .resource = orion_wdt_resource, -+}; -+ -+static void __init __maybe_unused orion_wdt_init(void) -+{ -+ platform_device_register(&orion_wdt_device); -+} -+ - void __init dove_init(void) - { - pr_info("Dove 88AP510 SoC, TCLK = %d MHz.\n", -diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c -index 6bbb7b5..2b7889e 100644 ---- a/arch/arm/mach-orion5x/common.c -+++ b/arch/arm/mach-orion5x/common.c -@@ -184,9 +184,21 @@ static void __init orion5x_crypto_init(void) - /***************************************************************************** - * Watchdog - ****************************************************************************/ -+static struct resource orion_wdt_resource[] = { -+ DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x04), -+ DEFINE_RES_MEM(RSTOUTn_MASK_PHYS, 0x04), -+}; -+ -+static struct platform_device orion_wdt_device = { -+ .name = "orion_wdt", -+ .id = -1, -+ .num_resources = ARRAY_SIZE(orion_wdt_resource), -+ .resource = orion_wdt_resource, -+}; -+ - static void __init orion5x_wdt_init(void) - { -- orion_wdt_init(); -+ platform_device_register(&orion_wdt_device); - } - - -diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c -index 8861c36..78c8bf4 100644 ---- a/arch/arm/plat-orion/common.c -+++ b/arch/arm/plat-orion/common.c -@@ -21,7 +21,6 @@ - #include - #include - #include --#include - #include - - /* Create a clkdev entry for a given device/clk */ -@@ -589,26 +588,6 @@ void __init orion_spi_1_init(unsigned long mapbase) - } - - /***************************************************************************** -- * Watchdog -- ****************************************************************************/ --static struct resource orion_wdt_resource[] = { -- DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x04), -- DEFINE_RES_MEM(RSTOUTn_MASK_PHYS, 0x04), --}; -- --static struct platform_device orion_wdt_device = { -- .name = "orion_wdt", -- .id = -1, -- .num_resources = ARRAY_SIZE(orion_wdt_resource), -- .resource = orion_wdt_resource, --}; -- --void __init orion_wdt_init(void) --{ -- platform_device_register(&orion_wdt_device); --} -- --/***************************************************************************** - * XOR - ****************************************************************************/ - static u64 orion_xor_dmamask = DMA_BIT_MASK(32); -diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h -index d9a24f6..9e6d76a 100644 ---- a/arch/arm/plat-orion/include/plat/common.h -+++ b/arch/arm/plat-orion/include/plat/common.h -@@ -75,8 +75,6 @@ void __init orion_spi_init(unsigned long mapbase); - - void __init orion_spi_1_init(unsigned long mapbase); - --void __init orion_wdt_init(void); -- - void __init orion_xor0_init(unsigned long mapbase_low, - unsigned long mapbase_high, - unsigned long irq_0, diff --git a/debian/patches/features/arm/arm-orion-use-sparse_irq-everywhere.patch b/debian/patches/features/arm/arm-orion-use-sparse_irq-everywhere.patch deleted file mode 100644 index b175e41b8..000000000 --- a/debian/patches/features/arm/arm-orion-use-sparse_irq-everywhere.patch +++ /dev/null @@ -1,392 +0,0 @@ -From: Arnd Bergmann -Date: Wed, 2 Dec 2015 22:27:05 +0100 -Subject: ARM: orion: use SPARSE_IRQ everywhere -Origin: https://git.kernel.org/linus/5cdbe5d23a8a0d7274d628bb9d5ff018d25075ca - -As a preparation for multiplatform support, this moves all the -code using plat-orion over to use sparse irq support, which is -enabled implicitly for multiplatform. - -In particular, the hardcoded NR_IRQS macro gets replaced with -a machine specific one that is set in the machine descriptor -in order to set up a static mapping for all legacy interrupts. - -Signed-off-by: Arnd Bergmann -Acked-by: Andrew Lunn -Signed-off-by: Gregory CLEMENT ---- - arch/arm/Kconfig | 3 +++ - arch/arm/mach-dove/cm-a510.c | 1 + - arch/arm/mach-dove/dove-db-setup.c | 1 + - arch/arm/mach-dove/include/mach/dove.h | 2 ++ - arch/arm/mach-dove/include/mach/irqs.h | 2 +- - arch/arm/mach-dove/include/mach/pm.h | 2 +- - arch/arm/mach-dove/irq.c | 2 +- - arch/arm/mach-mv78xx0/buffalo-wxl-setup.c | 1 + - arch/arm/mach-mv78xx0/db78x00-bp-setup.c | 1 + - arch/arm/mach-mv78xx0/include/mach/irqs.h | 2 +- - arch/arm/mach-mv78xx0/include/mach/mv78xx0.h | 2 ++ - arch/arm/mach-mv78xx0/rd78x00-masa-setup.c | 1 + - arch/arm/mach-orion5x/db88f5281-setup.c | 1 + - arch/arm/mach-orion5x/dns323-setup.c | 1 + - arch/arm/mach-orion5x/include/mach/irqs.h | 2 +- - arch/arm/mach-orion5x/include/mach/orion5x.h | 2 ++ - arch/arm/mach-orion5x/kurobox_pro-setup.c | 2 ++ - arch/arm/mach-orion5x/ls-chl-setup.c | 1 + - arch/arm/mach-orion5x/ls_hgl-setup.c | 1 + - arch/arm/mach-orion5x/mv2120-setup.c | 1 + - arch/arm/mach-orion5x/net2big-setup.c | 1 + - arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c | 1 + - arch/arm/mach-orion5x/rd88f5181l-ge-setup.c | 1 + - arch/arm/mach-orion5x/rd88f5182-setup.c | 1 + - arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c | 1 + - arch/arm/mach-orion5x/terastation_pro2-setup.c | 1 + - arch/arm/mach-orion5x/ts209-setup.c | 1 + - arch/arm/mach-orion5x/ts409-setup.c | 1 + - arch/arm/mach-orion5x/ts78xx-setup.c | 1 + - arch/arm/mach-orion5x/wnr854t-setup.c | 1 + - arch/arm/mach-orion5x/wrt350n-v2-setup.c | 1 + - 31 files changed, 37 insertions(+), 5 deletions(-) - ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -517,6 +517,7 @@ config ARCH_DOVE - select PINCTRL - select PINCTRL_DOVE - select PLAT_ORION_LEGACY -+ select SPARSE_IRQ - help - Support for the Marvell Dove SoC 88AP510 - -@@ -529,6 +530,7 @@ config ARCH_MV78XX0 - select MULTI_IRQ_HANDLER - select PCI - select PLAT_ORION_LEGACY -+ select SPARSE_IRQ - help - Support for the following Marvell MV78xx0 series SoCs: - MV781x0, MV782x0. -@@ -544,6 +546,7 @@ config ARCH_ORION5X - select PCI - select PLAT_ORION_LEGACY - select MULTI_IRQ_HANDLER -+ select SPARSE_IRQ - help - Support for the following Marvell Orion 5x series SoCs: - Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182), ---- a/arch/arm/mach-dove/cm-a510.c -+++ b/arch/arm/mach-dove/cm-a510.c -@@ -88,6 +88,7 @@ static void __init cm_a510_init(void) - - MACHINE_START(CM_A510, "Compulab CM-A510 Board") - .atag_offset = 0x100, -+ .nr_irqs = DOVE_NR_IRQS, - .init_machine = cm_a510_init, - .map_io = dove_map_io, - .init_early = dove_init_early, ---- a/arch/arm/mach-dove/dove-db-setup.c -+++ b/arch/arm/mach-dove/dove-db-setup.c -@@ -94,6 +94,7 @@ static void __init dove_db_init(void) - - MACHINE_START(DOVE_DB, "Marvell DB-MV88AP510-BP Development Board") - .atag_offset = 0x100, -+ .nr_irqs = DOVE_NR_IRQS, - .init_machine = dove_db_init, - .map_io = dove_map_io, - .init_early = dove_init_early, ---- a/arch/arm/mach-dove/include/mach/dove.h -+++ b/arch/arm/mach-dove/include/mach/dove.h -@@ -11,6 +11,8 @@ - #ifndef __ASM_ARCH_DOVE_H - #define __ASM_ARCH_DOVE_H - -+#include -+ - /* - * Marvell Dove address maps. - * ---- a/arch/arm/mach-dove/include/mach/irqs.h -+++ b/arch/arm/mach-dove/include/mach/irqs.h -@@ -90,7 +90,7 @@ - #define NR_PMU_IRQS 7 - #define IRQ_DOVE_RTC (IRQ_DOVE_PMU_START + 5) - --#define NR_IRQS (IRQ_DOVE_PMU_START + NR_PMU_IRQS) -+#define DOVE_NR_IRQS (IRQ_DOVE_PMU_START + NR_PMU_IRQS) - - - #endif ---- a/arch/arm/mach-dove/include/mach/pm.h -+++ b/arch/arm/mach-dove/include/mach/pm.h -@@ -63,7 +63,7 @@ static inline int pmu_to_irq(int pin) - - static inline int irq_to_pmu(int irq) - { -- if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS) -+ if (IRQ_DOVE_PMU_START <= irq && irq < DOVE_NR_IRQS) - return irq - IRQ_DOVE_PMU_START; - - return -EINVAL; ---- a/arch/arm/mach-dove/irq.c -+++ b/arch/arm/mach-dove/irq.c -@@ -160,7 +160,7 @@ void __init dove_init_irq(void) - writel(0, PMU_INTERRUPT_MASK); - writel(0, PMU_INTERRUPT_CAUSE); - -- for (i = IRQ_DOVE_PMU_START; i < NR_IRQS; i++) { -+ for (i = IRQ_DOVE_PMU_START; i < DOVE_NR_IRQS; i++) { - irq_set_chip_and_handler(i, &pmu_irq_chip, handle_level_irq); - irq_set_status_flags(i, IRQ_LEVEL); - irq_clear_status_flags(i, IRQ_NOREQUEST); ---- a/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c -+++ b/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c -@@ -146,6 +146,7 @@ subsys_initcall(wxl_pci_init); - MACHINE_START(TERASTATION_WXL, "Buffalo Nas WXL") - /* Maintainer: Sebastien Requiem */ - .atag_offset = 0x100, -+ .nr_irqs = MV78XX0_NR_IRQS, - .init_machine = wxl_init, - .map_io = mv78xx0_map_io, - .init_early = mv78xx0_init_early, ---- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c -+++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c -@@ -94,6 +94,7 @@ subsys_initcall(db78x00_pci_init); - MACHINE_START(DB78X00_BP, "Marvell DB-78x00-BP Development Board") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, -+ .nr_irqs = MV78XX0_NR_IRQS, - .init_machine = db78x00_init, - .map_io = mv78xx0_map_io, - .init_early = mv78xx0_init_early, ---- a/arch/arm/mach-mv78xx0/include/mach/irqs.h -+++ b/arch/arm/mach-mv78xx0/include/mach/irqs.h -@@ -88,7 +88,7 @@ - #define IRQ_MV78XX0_GPIO_START 96 - #define NR_GPIO_IRQS 32 - --#define NR_IRQS (IRQ_MV78XX0_GPIO_START + NR_GPIO_IRQS) -+#define MV78XX0_NR_IRQS (IRQ_MV78XX0_GPIO_START + NR_GPIO_IRQS) - - - #endif ---- a/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h -+++ b/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h -@@ -12,6 +12,8 @@ - #ifndef __ASM_ARCH_MV78XX0_H - #define __ASM_ARCH_MV78XX0_H - -+#include "irqs.h" -+ - /* - * Marvell MV78xx0 address maps. - * ---- a/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c -+++ b/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c -@@ -79,6 +79,7 @@ subsys_initcall(rd78x00_pci_init); - MACHINE_START(RD78X00_MASA, "Marvell RD-78x00-MASA Development Board") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, -+ .nr_irqs = MV78XX0_NR_IRQS, - .init_machine = rd78x00_masa_init, - .map_io = mv78xx0_map_io, - .init_early = mv78xx0_init_early, ---- a/arch/arm/mach-orion5x/db88f5281-setup.c -+++ b/arch/arm/mach-orion5x/db88f5281-setup.c -@@ -369,6 +369,7 @@ static void __init db88f5281_init(void) - MACHINE_START(DB88F5281, "Marvell Orion-2 Development Board") - /* Maintainer: Tzachi Perelstein */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = db88f5281_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/dns323-setup.c -+++ b/arch/arm/mach-orion5x/dns323-setup.c -@@ -666,6 +666,7 @@ static void __init dns323_init(void) - MACHINE_START(DNS323, "D-Link DNS-323") - /* Maintainer: Herbert Valerio Riedel */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = dns323_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/include/mach/irqs.h -+++ b/arch/arm/mach-orion5x/include/mach/irqs.h -@@ -54,7 +54,7 @@ - #define IRQ_ORION5X_GPIO_START 33 - #define NR_GPIO_IRQS 32 - --#define NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS) -+#define ORION5X_NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS) - - - #endif ---- a/arch/arm/mach-orion5x/include/mach/orion5x.h -+++ b/arch/arm/mach-orion5x/include/mach/orion5x.h -@@ -14,6 +14,8 @@ - #ifndef __ASM_ARCH_ORION5X_H - #define __ASM_ARCH_ORION5X_H - -+#include -+ - /***************************************************************************** - * Orion Address Maps - * ---- a/arch/arm/mach-orion5x/kurobox_pro-setup.c -+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c -@@ -383,6 +383,7 @@ static void __init kurobox_pro_init(void - MACHINE_START(KUROBOX_PRO, "Buffalo/Revogear Kurobox Pro") - /* Maintainer: Ronen Shitrit */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = kurobox_pro_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, -@@ -397,6 +398,7 @@ MACHINE_END - MACHINE_START(LINKSTATION_PRO, "Buffalo Linkstation Pro/Live") - /* Maintainer: Byron Bradley */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = kurobox_pro_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/ls-chl-setup.c -+++ b/arch/arm/mach-orion5x/ls-chl-setup.c -@@ -320,6 +320,7 @@ static void __init lschl_init(void) - MACHINE_START(LINKSTATION_LSCHL, "Buffalo Linkstation LiveV3 (LS-CHL)") - /* Maintainer: Ash Hughes */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = lschl_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/ls_hgl-setup.c -+++ b/arch/arm/mach-orion5x/ls_hgl-setup.c -@@ -267,6 +267,7 @@ static void __init ls_hgl_init(void) - MACHINE_START(LINKSTATION_LS_HGL, "Buffalo Linkstation LS-HGL") - /* Maintainer: Zhu Qingsen */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = ls_hgl_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/mv2120-setup.c -+++ b/arch/arm/mach-orion5x/mv2120-setup.c -@@ -232,6 +232,7 @@ static void __init mv2120_init(void) - MACHINE_START(MV2120, "HP Media Vault mv2120") - /* Maintainer: Martin Michlmayr */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = mv2120_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/net2big-setup.c -+++ b/arch/arm/mach-orion5x/net2big-setup.c -@@ -423,6 +423,7 @@ static void __init net2big_init(void) - /* Warning: LaCie use a wrong mach-type (0x20e=526) in their bootloader. */ - MACHINE_START(NET2BIG, "LaCie 2Big Network") - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = net2big_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c -+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c -@@ -169,6 +169,7 @@ subsys_initcall(rd88f5181l_fxo_pci_init) - MACHINE_START(RD88F5181L_FXO, "Marvell Orion-VoIP FXO Reference Design") - /* Maintainer: Nicolas Pitre */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = rd88f5181l_fxo_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c -+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c -@@ -181,6 +181,7 @@ subsys_initcall(rd88f5181l_ge_pci_init); - MACHINE_START(RD88F5181L_GE, "Marvell Orion-VoIP GE Reference Design") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = rd88f5181l_ge_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/rd88f5182-setup.c -+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c -@@ -281,6 +281,7 @@ static void __init rd88f5182_init(void) - MACHINE_START(RD88F5182, "Marvell Orion-NAS Reference Design") - /* Maintainer: Ronen Shitrit */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = rd88f5182_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c -+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c -@@ -119,6 +119,7 @@ subsys_initcall(rd88f6183ap_ge_pci_init) - MACHINE_START(RD88F6183AP_GE, "Marvell Orion-1-90 AP GE Reference Design") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = rd88f6183ap_ge_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/terastation_pro2-setup.c -+++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c -@@ -359,6 +359,7 @@ static void __init tsp2_init(void) - MACHINE_START(TERASTATION_PRO2, "Buffalo Terastation Pro II/Live") - /* Maintainer: Sylver Bruneau */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = tsp2_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/ts209-setup.c -+++ b/arch/arm/mach-orion5x/ts209-setup.c -@@ -324,6 +324,7 @@ static void __init qnap_ts209_init(void) - MACHINE_START(TS209, "QNAP TS-109/TS-209") - /* Maintainer: Byron Bradley */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = qnap_ts209_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/ts409-setup.c -+++ b/arch/arm/mach-orion5x/ts409-setup.c -@@ -313,6 +313,7 @@ static void __init qnap_ts409_init(void) - MACHINE_START(TS409, "QNAP TS-409") - /* Maintainer: Sylver Bruneau */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = qnap_ts409_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/ts78xx-setup.c -+++ b/arch/arm/mach-orion5x/ts78xx-setup.c -@@ -615,6 +615,7 @@ static void __init ts78xx_init(void) - MACHINE_START(TS78XX, "Technologic Systems TS-78xx SBC") - /* Maintainer: Alexander Clouter */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = ts78xx_init, - .map_io = ts78xx_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/wnr854t-setup.c -+++ b/arch/arm/mach-orion5x/wnr854t-setup.c -@@ -174,6 +174,7 @@ subsys_initcall(wnr854t_pci_init); - MACHINE_START(WNR854T, "Netgear WNR854T") - /* Maintainer: Imre Kaloz */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = wnr854t_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, ---- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c -+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c -@@ -262,6 +262,7 @@ subsys_initcall(wrt350n_v2_pci_init); - MACHINE_START(WRT350N_V2, "Linksys WRT350N v2") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, -+ .nr_irqs = ORION5X_NR_IRQS, - .init_machine = wrt350n_v2_init, - .map_io = orion5x_map_io, - .init_early = orion5x_init_early, diff --git a/debian/patches/features/arm/arm-orion5x-clean-up-mach-.h-headers.patch b/debian/patches/features/arm/arm-orion5x-clean-up-mach-.h-headers.patch deleted file mode 100644 index 649578155..000000000 --- a/debian/patches/features/arm/arm-orion5x-clean-up-mach-.h-headers.patch +++ /dev/null @@ -1,1020 +0,0 @@ -From: Arnd Bergmann -Date: Wed, 2 Dec 2015 22:27:08 +0100 -Subject: ARM: orion5x: clean up mach/*.h headers -Origin: https://git.kernel.org/linus/c22c2c6008d69ff2632f8a69c62782468c2bb5a0 - -This is a simple move of all header files that are no longer -included by anything else from the include/mach directory -to the platform directory itself as preparation for -multiplatform support. - -The mach/uncompress.h headers are left in place for now, -and are mildly modified to be independent of the other -headers. They will be removed entirely when ARCH_MULTIPLATFORM -gets enabled and they become obsolete. - -Rather than updating the path names inside of the comments -of each header, I delete those comments to avoid having to -update them again, should they get moved or copied another -time. - -Signed-off-by: Arnd Bergmann -Acked-by: Andrew Lunn -Signed-off-by: Gregory CLEMENT ---- - arch/arm/mach-orion5x/board-d2net.c | 2 +- - arch/arm/mach-orion5x/board-dt.c | 4 +- - arch/arm/mach-orion5x/board-mss2.c | 4 +- - arch/arm/mach-orion5x/board-rd88f5182.c | 2 +- - arch/arm/mach-orion5x/bridge-regs.h | 35 ++++++ - arch/arm/mach-orion5x/common.c | 6 +- - arch/arm/mach-orion5x/db88f5281-setup.c | 2 +- - arch/arm/mach-orion5x/dns323-setup.c | 2 +- - arch/arm/mach-orion5x/include/mach/bridge-regs.h | 37 ------ - arch/arm/mach-orion5x/include/mach/hardware.h | 14 --- - arch/arm/mach-orion5x/include/mach/irqs.h | 60 --------- - arch/arm/mach-orion5x/include/mach/orion5x.h | 148 ----------------------- - arch/arm/mach-orion5x/include/mach/uncompress.h | 4 +- - arch/arm/mach-orion5x/irq.c | 2 +- - arch/arm/mach-orion5x/irqs.h | 58 +++++++++ - arch/arm/mach-orion5x/kurobox_pro-setup.c | 2 +- - arch/arm/mach-orion5x/ls-chl-setup.c | 2 +- - arch/arm/mach-orion5x/ls_hgl-setup.c | 2 +- - arch/arm/mach-orion5x/mpp.c | 2 +- - arch/arm/mach-orion5x/mv2120-setup.c | 2 +- - arch/arm/mach-orion5x/net2big-setup.c | 2 +- - arch/arm/mach-orion5x/orion5x.h | 146 ++++++++++++++++++++++ - arch/arm/mach-orion5x/pci.c | 2 +- - arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c | 2 +- - arch/arm/mach-orion5x/rd88f5181l-ge-setup.c | 2 +- - arch/arm/mach-orion5x/rd88f5182-setup.c | 2 +- - arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c | 2 +- - arch/arm/mach-orion5x/terastation_pro2-setup.c | 2 +- - arch/arm/mach-orion5x/ts209-setup.c | 2 +- - arch/arm/mach-orion5x/ts409-setup.c | 2 +- - arch/arm/mach-orion5x/ts78xx-setup.c | 2 +- - arch/arm/mach-orion5x/tsx09-common.c | 2 +- - arch/arm/mach-orion5x/wnr854t-setup.c | 2 +- - arch/arm/mach-orion5x/wrt350n-v2-setup.c | 2 +- - 34 files changed, 270 insertions(+), 292 deletions(-) - create mode 100644 arch/arm/mach-orion5x/bridge-regs.h - delete mode 100644 arch/arm/mach-orion5x/include/mach/bridge-regs.h - delete mode 100644 arch/arm/mach-orion5x/include/mach/hardware.h - delete mode 100644 arch/arm/mach-orion5x/include/mach/irqs.h - delete mode 100644 arch/arm/mach-orion5x/include/mach/orion5x.h - create mode 100644 arch/arm/mach-orion5x/irqs.h - create mode 100644 arch/arm/mach-orion5x/orion5x.h - -diff --git a/arch/arm/mach-orion5x/board-d2net.c b/arch/arm/mach-orion5x/board-d2net.c -index 8a72841..a89376a 100644 ---- a/arch/arm/mach-orion5x/board-d2net.c -+++ b/arch/arm/mach-orion5x/board-d2net.c -@@ -20,9 +20,9 @@ - #include - #include - #include --#include - #include - #include "common.h" -+#include "orion5x.h" - - /***************************************************************************** - * LaCie d2 Network Info -diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c -index d087178..6f4c2c4 100644 ---- a/arch/arm/mach-orion5x/board-dt.c -+++ b/arch/arm/mach-orion5x/board-dt.c -@@ -20,10 +20,10 @@ - #include - #include - #include --#include --#include - #include - #include -+#include "orion5x.h" -+#include "bridge-regs.h" - #include "common.h" - - static struct of_dev_auxdata orion5x_auxdata_lookup[] __initdata = { -diff --git a/arch/arm/mach-orion5x/board-mss2.c b/arch/arm/mach-orion5x/board-mss2.c -index 66f9c3b..79202fd 100644 ---- a/arch/arm/mach-orion5x/board-mss2.c -+++ b/arch/arm/mach-orion5x/board-mss2.c -@@ -17,8 +17,8 @@ - #include - #include - #include --#include --#include -+#include "orion5x.h" -+#include "bridge-regs.h" - #include "common.h" - - /***************************************************************************** -diff --git a/arch/arm/mach-orion5x/board-rd88f5182.c b/arch/arm/mach-orion5x/board-rd88f5182.c -index 270824b..b7b0f52 100644 ---- a/arch/arm/mach-orion5x/board-rd88f5182.c -+++ b/arch/arm/mach-orion5x/board-rd88f5182.c -@@ -18,8 +18,8 @@ - #include - #include - #include --#include - #include "common.h" -+#include "orion5x.h" - - /***************************************************************************** - * RD-88F5182 Info -diff --git a/arch/arm/mach-orion5x/bridge-regs.h b/arch/arm/mach-orion5x/bridge-regs.h -new file mode 100644 -index 0000000..305598e ---- /dev/null -+++ b/arch/arm/mach-orion5x/bridge-regs.h -@@ -0,0 +1,35 @@ -+/* -+ * Orion CPU Bridge Registers -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#ifndef __ASM_ARCH_BRIDGE_REGS_H -+#define __ASM_ARCH_BRIDGE_REGS_H -+ -+#include "orion5x.h" -+ -+#define CPU_CONF (ORION5X_BRIDGE_VIRT_BASE + 0x100) -+ -+#define CPU_CTRL (ORION5X_BRIDGE_VIRT_BASE + 0x104) -+ -+#define RSTOUTn_MASK (ORION5X_BRIDGE_VIRT_BASE + 0x108) -+#define RSTOUTn_MASK_PHYS (ORION5X_BRIDGE_PHYS_BASE + 0x108) -+ -+#define CPU_SOFT_RESET (ORION5X_BRIDGE_VIRT_BASE + 0x10c) -+ -+#define BRIDGE_CAUSE (ORION5X_BRIDGE_VIRT_BASE + 0x110) -+ -+#define POWER_MNG_CTRL_REG (ORION5X_BRIDGE_VIRT_BASE + 0x11C) -+ -+#define BRIDGE_INT_TIMER1_CLR (~0x0004) -+ -+#define MAIN_IRQ_CAUSE (ORION5X_BRIDGE_VIRT_BASE + 0x200) -+ -+#define MAIN_IRQ_MASK (ORION5X_BRIDGE_VIRT_BASE + 0x204) -+ -+#define TIMER_VIRT_BASE (ORION5X_BRIDGE_VIRT_BASE + 0x300) -+#define TIMER_PHYS_BASE (ORION5X_BRIDGE_PHYS_BASE + 0x300) -+#endif -diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c -index 2b7889e..70c3366 100644 ---- a/arch/arm/mach-orion5x/common.c -+++ b/arch/arm/mach-orion5x/common.c -@@ -27,14 +27,14 @@ - #include - #include - #include --#include --#include --#include - #include - #include - #include - #include -+ -+#include "bridge-regs.h" - #include "common.h" -+#include "orion5x.h" - - /***************************************************************************** - * I/O Address Mapping -diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c -index 698528f..12f74b4 100644 ---- a/arch/arm/mach-orion5x/db88f5281-setup.c -+++ b/arch/arm/mach-orion5x/db88f5281-setup.c -@@ -23,10 +23,10 @@ - #include - #include - #include --#include - #include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * DB-88F5281 on board devices -diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c -index 96a8c50..cd483bf 100644 ---- a/arch/arm/mach-orion5x/dns323-setup.c -+++ b/arch/arm/mach-orion5x/dns323-setup.c -@@ -33,8 +33,8 @@ - #include - #include - #include --#include - #include -+#include "orion5x.h" - #include "common.h" - #include "mpp.h" - -diff --git a/arch/arm/mach-orion5x/include/mach/bridge-regs.h b/arch/arm/mach-orion5x/include/mach/bridge-regs.h -deleted file mode 100644 -index 5766e3f..0000000 ---- a/arch/arm/mach-orion5x/include/mach/bridge-regs.h -+++ /dev/null -@@ -1,37 +0,0 @@ --/* -- * arch/arm/mach-orion5x/include/mach/bridge-regs.h -- * -- * Orion CPU Bridge Registers -- * -- * This file is licensed under the terms of the GNU General Public -- * License version 2. This program is licensed "as is" without any -- * warranty of any kind, whether express or implied. -- */ -- --#ifndef __ASM_ARCH_BRIDGE_REGS_H --#define __ASM_ARCH_BRIDGE_REGS_H -- --#include -- --#define CPU_CONF (ORION5X_BRIDGE_VIRT_BASE + 0x100) -- --#define CPU_CTRL (ORION5X_BRIDGE_VIRT_BASE + 0x104) -- --#define RSTOUTn_MASK (ORION5X_BRIDGE_VIRT_BASE + 0x108) --#define RSTOUTn_MASK_PHYS (ORION5X_BRIDGE_PHYS_BASE + 0x108) -- --#define CPU_SOFT_RESET (ORION5X_BRIDGE_VIRT_BASE + 0x10c) -- --#define BRIDGE_CAUSE (ORION5X_BRIDGE_VIRT_BASE + 0x110) -- --#define POWER_MNG_CTRL_REG (ORION5X_BRIDGE_VIRT_BASE + 0x11C) -- --#define BRIDGE_INT_TIMER1_CLR (~0x0004) -- --#define MAIN_IRQ_CAUSE (ORION5X_BRIDGE_VIRT_BASE + 0x200) -- --#define MAIN_IRQ_MASK (ORION5X_BRIDGE_VIRT_BASE + 0x204) -- --#define TIMER_VIRT_BASE (ORION5X_BRIDGE_VIRT_BASE + 0x300) --#define TIMER_PHYS_BASE (ORION5X_BRIDGE_PHYS_BASE + 0x300) --#endif -diff --git a/arch/arm/mach-orion5x/include/mach/hardware.h b/arch/arm/mach-orion5x/include/mach/hardware.h -deleted file mode 100644 -index 3957354..0000000 ---- a/arch/arm/mach-orion5x/include/mach/hardware.h -+++ /dev/null -@@ -1,14 +0,0 @@ --/* -- * arch/arm/mach-orion5x/include/mach/hardware.h -- * -- * This program is free software; you can redistribute it and/or modify -- * it under the terms of the GNU General Public License version 2 as -- * published by the Free Software Foundation. -- */ -- --#ifndef __ASM_ARCH_HARDWARE_H --#define __ASM_ARCH_HARDWARE_H -- --#include "orion5x.h" -- --#endif -diff --git a/arch/arm/mach-orion5x/include/mach/irqs.h b/arch/arm/mach-orion5x/include/mach/irqs.h -deleted file mode 100644 -index 4b8703c..0000000 ---- a/arch/arm/mach-orion5x/include/mach/irqs.h -+++ /dev/null -@@ -1,60 +0,0 @@ --/* -- * arch/arm/mach-orion5x/include/mach/irqs.h -- * -- * IRQ definitions for Orion SoC -- * -- * Maintainer: Tzachi Perelstein -- * -- * This file is licensed under the terms of the GNU General Public -- * License version 2. This program is licensed "as is" without any -- * warranty of any kind, whether express or implied. -- */ -- --#ifndef __ASM_ARCH_IRQS_H --#define __ASM_ARCH_IRQS_H -- --/* -- * Orion Main Interrupt Controller -- */ --#define IRQ_ORION5X_BRIDGE (1 + 0) --#define IRQ_ORION5X_DOORBELL_H2C (1 + 1) --#define IRQ_ORION5X_DOORBELL_C2H (1 + 2) --#define IRQ_ORION5X_UART0 (1 + 3) --#define IRQ_ORION5X_UART1 (1 + 4) --#define IRQ_ORION5X_I2C (1 + 5) --#define IRQ_ORION5X_GPIO_0_7 (1 + 6) --#define IRQ_ORION5X_GPIO_8_15 (1 + 7) --#define IRQ_ORION5X_GPIO_16_23 (1 + 8) --#define IRQ_ORION5X_GPIO_24_31 (1 + 9) --#define IRQ_ORION5X_PCIE0_ERR (1 + 10) --#define IRQ_ORION5X_PCIE0_INT (1 + 11) --#define IRQ_ORION5X_USB1_CTRL (1 + 12) --#define IRQ_ORION5X_DEV_BUS_ERR (1 + 14) --#define IRQ_ORION5X_PCI_ERR (1 + 15) --#define IRQ_ORION5X_USB_BR_ERR (1 + 16) --#define IRQ_ORION5X_USB0_CTRL (1 + 17) --#define IRQ_ORION5X_ETH_RX (1 + 18) --#define IRQ_ORION5X_ETH_TX (1 + 19) --#define IRQ_ORION5X_ETH_MISC (1 + 20) --#define IRQ_ORION5X_ETH_SUM (1 + 21) --#define IRQ_ORION5X_ETH_ERR (1 + 22) --#define IRQ_ORION5X_IDMA_ERR (1 + 23) --#define IRQ_ORION5X_IDMA_0 (1 + 24) --#define IRQ_ORION5X_IDMA_1 (1 + 25) --#define IRQ_ORION5X_IDMA_2 (1 + 26) --#define IRQ_ORION5X_IDMA_3 (1 + 27) --#define IRQ_ORION5X_CESA (1 + 28) --#define IRQ_ORION5X_SATA (1 + 29) --#define IRQ_ORION5X_XOR0 (1 + 30) --#define IRQ_ORION5X_XOR1 (1 + 31) -- --/* -- * Orion General Purpose Pins -- */ --#define IRQ_ORION5X_GPIO_START 33 --#define NR_GPIO_IRQS 32 -- --#define ORION5X_NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS) -- -- --#endif -diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h -deleted file mode 100644 -index 7be7c2e..0000000 ---- a/arch/arm/mach-orion5x/include/mach/orion5x.h -+++ /dev/null -@@ -1,148 +0,0 @@ --/* -- * arch/arm/mach-orion5x/include/mach/orion5x.h -- * -- * Generic definitions of Orion SoC flavors: -- * Orion-1, Orion-VoIP, Orion-NAS, Orion-2, and Orion-1-90. -- * -- * Maintainer: Tzachi Perelstein -- * -- * This file is licensed under the terms of the GNU General Public -- * License version 2. This program is licensed "as is" without any -- * warranty of any kind, whether express or implied. -- */ -- --#ifndef __ASM_ARCH_ORION5X_H --#define __ASM_ARCH_ORION5X_H -- --#include -- --/***************************************************************************** -- * Orion Address Maps -- * -- * phys -- * e0000000 PCIe MEM space -- * e8000000 PCI MEM space -- * f0000000 PCIe WA space (Orion-1/Orion-NAS only) -- * f1000000 on-chip peripheral registers -- * f2000000 PCIe I/O space -- * f2100000 PCI I/O space -- * f2200000 SRAM dedicated for the crypto unit -- * f4000000 device bus mappings (boot) -- * fa000000 device bus mappings (cs0) -- * fa800000 device bus mappings (cs2) -- * fc000000 device bus mappings (cs0/cs1) -- * -- * virt phys size -- * fe000000 f1000000 1M on-chip peripheral registers -- * fee00000 f2000000 64K PCIe I/O space -- * fee10000 f2100000 64K PCI I/O space -- * fd000000 f0000000 16M PCIe WA space (Orion-1/Orion-NAS only) -- ****************************************************************************/ --#define ORION5X_REGS_PHYS_BASE 0xf1000000 --#define ORION5X_REGS_VIRT_BASE IOMEM(0xfe000000) --#define ORION5X_REGS_SIZE SZ_1M -- --#define ORION5X_PCIE_IO_PHYS_BASE 0xf2000000 --#define ORION5X_PCIE_IO_BUS_BASE 0x00000000 --#define ORION5X_PCIE_IO_SIZE SZ_64K -- --#define ORION5X_PCI_IO_PHYS_BASE 0xf2100000 --#define ORION5X_PCI_IO_BUS_BASE 0x00010000 --#define ORION5X_PCI_IO_SIZE SZ_64K -- --#define ORION5X_SRAM_PHYS_BASE (0xf2200000) --#define ORION5X_SRAM_SIZE SZ_8K -- --/* Relevant only for Orion-1/Orion-NAS */ --#define ORION5X_PCIE_WA_PHYS_BASE 0xf0000000 --#define ORION5X_PCIE_WA_VIRT_BASE IOMEM(0xfd000000) --#define ORION5X_PCIE_WA_SIZE SZ_16M -- --#define ORION5X_PCIE_MEM_PHYS_BASE 0xe0000000 --#define ORION5X_PCIE_MEM_SIZE SZ_128M -- --#define ORION5X_PCI_MEM_PHYS_BASE 0xe8000000 --#define ORION5X_PCI_MEM_SIZE SZ_128M -- --/******************************************************************************* -- * Orion Registers Map -- ******************************************************************************/ -- --#define ORION5X_DDR_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x00000) --#define ORION5X_DDR_WINS_BASE (ORION5X_DDR_PHYS_BASE + 0x1500) --#define ORION5X_DDR_WINS_SZ (0x10) --#define ORION5X_DDR_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x00000) --#define ORION5X_DEV_BUS_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x10000) --#define ORION5X_DEV_BUS_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x10000) --#define ORION5X_DEV_BUS_REG(x) (ORION5X_DEV_BUS_VIRT_BASE + (x)) --#define GPIO_VIRT_BASE ORION5X_DEV_BUS_REG(0x0100) --#define SPI_PHYS_BASE (ORION5X_DEV_BUS_PHYS_BASE + 0x0600) --#define I2C_PHYS_BASE (ORION5X_DEV_BUS_PHYS_BASE + 0x1000) --#define UART0_PHYS_BASE (ORION5X_DEV_BUS_PHYS_BASE + 0x2000) --#define UART0_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE + 0x2000) --#define UART1_PHYS_BASE (ORION5X_DEV_BUS_PHYS_BASE + 0x2100) --#define UART1_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE + 0x2100) -- --#define ORION5X_BRIDGE_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x20000) --#define ORION5X_BRIDGE_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x20000) --#define ORION5X_BRIDGE_WINS_BASE (ORION5X_BRIDGE_PHYS_BASE) --#define ORION5X_BRIDGE_WINS_SZ (0x80) -- --#define ORION5X_PCI_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x30000) -- --#define ORION5X_PCIE_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x40000) -- --#define ORION5X_USB0_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x50000) --#define ORION5X_USB0_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x50000) -- --#define ORION5X_XOR_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x60900) --#define ORION5X_XOR_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x60900) -- --#define ORION5X_ETH_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x70000) --#define ORION5X_ETH_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x70000) -- --#define ORION5X_SATA_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x80000) --#define ORION5X_SATA_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x80000) -- --#define ORION5X_CRYPTO_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x90000) -- --#define ORION5X_USB1_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0xa0000) --#define ORION5X_USB1_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0xa0000) -- --/******************************************************************************* -- * Device Bus Registers -- ******************************************************************************/ --#define MPP_0_7_CTRL ORION5X_DEV_BUS_REG(0x000) --#define MPP_8_15_CTRL ORION5X_DEV_BUS_REG(0x004) --#define MPP_16_19_CTRL ORION5X_DEV_BUS_REG(0x050) --#define MPP_DEV_CTRL ORION5X_DEV_BUS_REG(0x008) --#define MPP_RESET_SAMPLE ORION5X_DEV_BUS_REG(0x010) --#define DEV_BANK_0_PARAM ORION5X_DEV_BUS_REG(0x45c) --#define DEV_BANK_1_PARAM ORION5X_DEV_BUS_REG(0x460) --#define DEV_BANK_2_PARAM ORION5X_DEV_BUS_REG(0x464) --#define DEV_BANK_BOOT_PARAM ORION5X_DEV_BUS_REG(0x46c) --#define DEV_BUS_CTRL ORION5X_DEV_BUS_REG(0x4c0) --#define DEV_BUS_INT_CAUSE ORION5X_DEV_BUS_REG(0x4d0) --#define DEV_BUS_INT_MASK ORION5X_DEV_BUS_REG(0x4d4) -- --/******************************************************************************* -- * Supported Devices & Revisions -- ******************************************************************************/ --/* Orion-1 (88F5181) and Orion-VoIP (88F5181L) */ --#define MV88F5181_DEV_ID 0x5181 --#define MV88F5181_REV_B1 3 --#define MV88F5181L_REV_A0 8 --#define MV88F5181L_REV_A1 9 --/* Orion-NAS (88F5182) */ --#define MV88F5182_DEV_ID 0x5182 --#define MV88F5182_REV_A2 2 --/* Orion-2 (88F5281) */ --#define MV88F5281_DEV_ID 0x5281 --#define MV88F5281_REV_D0 4 --#define MV88F5281_REV_D1 5 --#define MV88F5281_REV_D2 6 --/* Orion-1-90 (88F6183) */ --#define MV88F6183_DEV_ID 0x6183 --#define MV88F6183_REV_B0 3 -- --#endif -diff --git a/arch/arm/mach-orion5x/include/mach/uncompress.h b/arch/arm/mach-orion5x/include/mach/uncompress.h -index abd26b5..25e5cb9 100644 ---- a/arch/arm/mach-orion5x/include/mach/uncompress.h -+++ b/arch/arm/mach-orion5x/include/mach/uncompress.h -@@ -1,6 +1,4 @@ - /* -- * arch/arm/mach-orion5x/include/mach/uncompress.h -- * - * Tzachi Perelstein - * - * This file is licensed under the terms of the GNU General Public -@@ -9,8 +7,8 @@ - */ - - #include --#include - -+#define UART0_PHYS_BASE (0xf1000000 + 0x12000) - #define SERIAL_BASE ((unsigned char *)UART0_PHYS_BASE) - - static void putc(const char c) -diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c -index 8678db1..de980ef 100644 ---- a/arch/arm/mach-orion5x/irq.c -+++ b/arch/arm/mach-orion5x/irq.c -@@ -13,10 +13,10 @@ - #include - #include - #include --#include - #include - #include - #include -+#include "bridge-regs.h" - #include "common.h" - - static int __initdata gpio0_irqs[4] = { -diff --git a/arch/arm/mach-orion5x/irqs.h b/arch/arm/mach-orion5x/irqs.h -new file mode 100644 -index 0000000..506c8e0 ---- /dev/null -+++ b/arch/arm/mach-orion5x/irqs.h -@@ -0,0 +1,58 @@ -+/* -+ * IRQ definitions for Orion SoC -+ * -+ * Maintainer: Tzachi Perelstein -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#ifndef __ASM_ARCH_IRQS_H -+#define __ASM_ARCH_IRQS_H -+ -+/* -+ * Orion Main Interrupt Controller -+ */ -+#define IRQ_ORION5X_BRIDGE (1 + 0) -+#define IRQ_ORION5X_DOORBELL_H2C (1 + 1) -+#define IRQ_ORION5X_DOORBELL_C2H (1 + 2) -+#define IRQ_ORION5X_UART0 (1 + 3) -+#define IRQ_ORION5X_UART1 (1 + 4) -+#define IRQ_ORION5X_I2C (1 + 5) -+#define IRQ_ORION5X_GPIO_0_7 (1 + 6) -+#define IRQ_ORION5X_GPIO_8_15 (1 + 7) -+#define IRQ_ORION5X_GPIO_16_23 (1 + 8) -+#define IRQ_ORION5X_GPIO_24_31 (1 + 9) -+#define IRQ_ORION5X_PCIE0_ERR (1 + 10) -+#define IRQ_ORION5X_PCIE0_INT (1 + 11) -+#define IRQ_ORION5X_USB1_CTRL (1 + 12) -+#define IRQ_ORION5X_DEV_BUS_ERR (1 + 14) -+#define IRQ_ORION5X_PCI_ERR (1 + 15) -+#define IRQ_ORION5X_USB_BR_ERR (1 + 16) -+#define IRQ_ORION5X_USB0_CTRL (1 + 17) -+#define IRQ_ORION5X_ETH_RX (1 + 18) -+#define IRQ_ORION5X_ETH_TX (1 + 19) -+#define IRQ_ORION5X_ETH_MISC (1 + 20) -+#define IRQ_ORION5X_ETH_SUM (1 + 21) -+#define IRQ_ORION5X_ETH_ERR (1 + 22) -+#define IRQ_ORION5X_IDMA_ERR (1 + 23) -+#define IRQ_ORION5X_IDMA_0 (1 + 24) -+#define IRQ_ORION5X_IDMA_1 (1 + 25) -+#define IRQ_ORION5X_IDMA_2 (1 + 26) -+#define IRQ_ORION5X_IDMA_3 (1 + 27) -+#define IRQ_ORION5X_CESA (1 + 28) -+#define IRQ_ORION5X_SATA (1 + 29) -+#define IRQ_ORION5X_XOR0 (1 + 30) -+#define IRQ_ORION5X_XOR1 (1 + 31) -+ -+/* -+ * Orion General Purpose Pins -+ */ -+#define IRQ_ORION5X_GPIO_START 33 -+#define NR_GPIO_IRQS 32 -+ -+#define ORION5X_NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS) -+ -+ -+#endif -diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c -index b1ebb37..9dc3f59 100644 ---- a/arch/arm/mach-orion5x/kurobox_pro-setup.c -+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c -@@ -23,10 +23,10 @@ - #include - #include - #include --#include - #include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * KUROBOX-PRO Info -diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c -index cb8720e..dfdaa8a 100644 ---- a/arch/arm/mach-orion5x/ls-chl-setup.c -+++ b/arch/arm/mach-orion5x/ls-chl-setup.c -@@ -22,9 +22,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * Linkstation LS-CHL Info -diff --git a/arch/arm/mach-orion5x/ls_hgl-setup.c b/arch/arm/mach-orion5x/ls_hgl-setup.c -index 0ddfa23..47ba6e0 100644 ---- a/arch/arm/mach-orion5x/ls_hgl-setup.c -+++ b/arch/arm/mach-orion5x/ls_hgl-setup.c -@@ -21,9 +21,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * Linkstation LS-HGL Info -diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c -index 5b70026..19ef185 100644 ---- a/arch/arm/mach-orion5x/mpp.c -+++ b/arch/arm/mach-orion5x/mpp.c -@@ -11,8 +11,8 @@ - #include - #include - #include --#include - #include -+#include "orion5x.h" - #include "mpp.h" - #include "common.h" - -diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c -index 11985dc..2bf8ec7 100644 ---- a/arch/arm/mach-orion5x/mv2120-setup.c -+++ b/arch/arm/mach-orion5x/mv2120-setup.c -@@ -21,9 +21,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - #define MV2120_NOR_BOOT_BASE 0xf4000000 - #define MV2120_NOR_BOOT_SIZE SZ_512K -diff --git a/arch/arm/mach-orion5x/net2big-setup.c b/arch/arm/mach-orion5x/net2big-setup.c -index e91abcc..bf6be4c 100644 ---- a/arch/arm/mach-orion5x/net2big-setup.c -+++ b/arch/arm/mach-orion5x/net2big-setup.c -@@ -24,10 +24,10 @@ - #include - #include - #include --#include - #include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * LaCie 2Big Network Info -diff --git a/arch/arm/mach-orion5x/orion5x.h b/arch/arm/mach-orion5x/orion5x.h -new file mode 100644 -index 0000000..3364df3 ---- /dev/null -+++ b/arch/arm/mach-orion5x/orion5x.h -@@ -0,0 +1,146 @@ -+/* -+ * Generic definitions of Orion SoC flavors: -+ * Orion-1, Orion-VoIP, Orion-NAS, Orion-2, and Orion-1-90. -+ * -+ * Maintainer: Tzachi Perelstein -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#ifndef __ASM_ARCH_ORION5X_H -+#define __ASM_ARCH_ORION5X_H -+ -+#include "irqs.h" -+ -+/***************************************************************************** -+ * Orion Address Maps -+ * -+ * phys -+ * e0000000 PCIe MEM space -+ * e8000000 PCI MEM space -+ * f0000000 PCIe WA space (Orion-1/Orion-NAS only) -+ * f1000000 on-chip peripheral registers -+ * f2000000 PCIe I/O space -+ * f2100000 PCI I/O space -+ * f2200000 SRAM dedicated for the crypto unit -+ * f4000000 device bus mappings (boot) -+ * fa000000 device bus mappings (cs0) -+ * fa800000 device bus mappings (cs2) -+ * fc000000 device bus mappings (cs0/cs1) -+ * -+ * virt phys size -+ * fe000000 f1000000 1M on-chip peripheral registers -+ * fee00000 f2000000 64K PCIe I/O space -+ * fee10000 f2100000 64K PCI I/O space -+ * fd000000 f0000000 16M PCIe WA space (Orion-1/Orion-NAS only) -+ ****************************************************************************/ -+#define ORION5X_REGS_PHYS_BASE 0xf1000000 -+#define ORION5X_REGS_VIRT_BASE IOMEM(0xfe000000) -+#define ORION5X_REGS_SIZE SZ_1M -+ -+#define ORION5X_PCIE_IO_PHYS_BASE 0xf2000000 -+#define ORION5X_PCIE_IO_BUS_BASE 0x00000000 -+#define ORION5X_PCIE_IO_SIZE SZ_64K -+ -+#define ORION5X_PCI_IO_PHYS_BASE 0xf2100000 -+#define ORION5X_PCI_IO_BUS_BASE 0x00010000 -+#define ORION5X_PCI_IO_SIZE SZ_64K -+ -+#define ORION5X_SRAM_PHYS_BASE (0xf2200000) -+#define ORION5X_SRAM_SIZE SZ_8K -+ -+/* Relevant only for Orion-1/Orion-NAS */ -+#define ORION5X_PCIE_WA_PHYS_BASE 0xf0000000 -+#define ORION5X_PCIE_WA_VIRT_BASE IOMEM(0xfd000000) -+#define ORION5X_PCIE_WA_SIZE SZ_16M -+ -+#define ORION5X_PCIE_MEM_PHYS_BASE 0xe0000000 -+#define ORION5X_PCIE_MEM_SIZE SZ_128M -+ -+#define ORION5X_PCI_MEM_PHYS_BASE 0xe8000000 -+#define ORION5X_PCI_MEM_SIZE SZ_128M -+ -+/******************************************************************************* -+ * Orion Registers Map -+ ******************************************************************************/ -+ -+#define ORION5X_DDR_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x00000) -+#define ORION5X_DDR_WINS_BASE (ORION5X_DDR_PHYS_BASE + 0x1500) -+#define ORION5X_DDR_WINS_SZ (0x10) -+#define ORION5X_DDR_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x00000) -+#define ORION5X_DEV_BUS_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x10000) -+#define ORION5X_DEV_BUS_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x10000) -+#define ORION5X_DEV_BUS_REG(x) (ORION5X_DEV_BUS_VIRT_BASE + (x)) -+#define GPIO_VIRT_BASE ORION5X_DEV_BUS_REG(0x0100) -+#define SPI_PHYS_BASE (ORION5X_DEV_BUS_PHYS_BASE + 0x0600) -+#define I2C_PHYS_BASE (ORION5X_DEV_BUS_PHYS_BASE + 0x1000) -+#define UART0_PHYS_BASE (ORION5X_DEV_BUS_PHYS_BASE + 0x2000) -+#define UART0_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE + 0x2000) -+#define UART1_PHYS_BASE (ORION5X_DEV_BUS_PHYS_BASE + 0x2100) -+#define UART1_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE + 0x2100) -+ -+#define ORION5X_BRIDGE_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x20000) -+#define ORION5X_BRIDGE_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x20000) -+#define ORION5X_BRIDGE_WINS_BASE (ORION5X_BRIDGE_PHYS_BASE) -+#define ORION5X_BRIDGE_WINS_SZ (0x80) -+ -+#define ORION5X_PCI_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x30000) -+ -+#define ORION5X_PCIE_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x40000) -+ -+#define ORION5X_USB0_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x50000) -+#define ORION5X_USB0_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x50000) -+ -+#define ORION5X_XOR_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x60900) -+#define ORION5X_XOR_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x60900) -+ -+#define ORION5X_ETH_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x70000) -+#define ORION5X_ETH_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x70000) -+ -+#define ORION5X_SATA_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x80000) -+#define ORION5X_SATA_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0x80000) -+ -+#define ORION5X_CRYPTO_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0x90000) -+ -+#define ORION5X_USB1_PHYS_BASE (ORION5X_REGS_PHYS_BASE + 0xa0000) -+#define ORION5X_USB1_VIRT_BASE (ORION5X_REGS_VIRT_BASE + 0xa0000) -+ -+/******************************************************************************* -+ * Device Bus Registers -+ ******************************************************************************/ -+#define MPP_0_7_CTRL ORION5X_DEV_BUS_REG(0x000) -+#define MPP_8_15_CTRL ORION5X_DEV_BUS_REG(0x004) -+#define MPP_16_19_CTRL ORION5X_DEV_BUS_REG(0x050) -+#define MPP_DEV_CTRL ORION5X_DEV_BUS_REG(0x008) -+#define MPP_RESET_SAMPLE ORION5X_DEV_BUS_REG(0x010) -+#define DEV_BANK_0_PARAM ORION5X_DEV_BUS_REG(0x45c) -+#define DEV_BANK_1_PARAM ORION5X_DEV_BUS_REG(0x460) -+#define DEV_BANK_2_PARAM ORION5X_DEV_BUS_REG(0x464) -+#define DEV_BANK_BOOT_PARAM ORION5X_DEV_BUS_REG(0x46c) -+#define DEV_BUS_CTRL ORION5X_DEV_BUS_REG(0x4c0) -+#define DEV_BUS_INT_CAUSE ORION5X_DEV_BUS_REG(0x4d0) -+#define DEV_BUS_INT_MASK ORION5X_DEV_BUS_REG(0x4d4) -+ -+/******************************************************************************* -+ * Supported Devices & Revisions -+ ******************************************************************************/ -+/* Orion-1 (88F5181) and Orion-VoIP (88F5181L) */ -+#define MV88F5181_DEV_ID 0x5181 -+#define MV88F5181_REV_B1 3 -+#define MV88F5181L_REV_A0 8 -+#define MV88F5181L_REV_A1 9 -+/* Orion-NAS (88F5182) */ -+#define MV88F5182_DEV_ID 0x5182 -+#define MV88F5182_REV_A2 2 -+/* Orion-2 (88F5281) */ -+#define MV88F5281_DEV_ID 0x5281 -+#define MV88F5281_REV_D0 4 -+#define MV88F5281_REV_D1 5 -+#define MV88F5281_REV_D2 6 -+/* Orion-1-90 (88F6183) */ -+#define MV88F6183_DEV_ID 0x6183 -+#define MV88F6183_REV_B0 3 -+ -+#endif -diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c -index b02f394..ecb998e 100644 ---- a/arch/arm/mach-orion5x/pci.c -+++ b/arch/arm/mach-orion5x/pci.c -@@ -19,8 +19,8 @@ - #include - #include - #include --#include - #include "common.h" -+#include "orion5x.h" - - /***************************************************************************** - * Orion has one PCIe controller and one PCI controller. -diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c -index 69a6e5b..c742e7b 100644 ---- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c -+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c -@@ -20,9 +20,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * RD-88F5181L FXO Info -diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c -index e19f8b7..7e977b7 100644 ---- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c -+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c -@@ -21,9 +21,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * RD-88F5181L GE Info -diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c -index 180a4f9..fe3e67c 100644 ---- a/arch/arm/mach-orion5x/rd88f5182-setup.c -+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c -@@ -23,9 +23,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * RD-88F5182 Info -diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c -index cc5bdbe..4bf80dd 100644 ---- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c -+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c -@@ -22,8 +22,8 @@ - #include - #include - #include --#include - #include "common.h" -+#include "orion5x.h" - - static struct mv643xx_eth_platform_data rd88f6183ap_ge_eth_data = { - .phy_addr = -1, -diff --git a/arch/arm/mach-orion5x/terastation_pro2-setup.c b/arch/arm/mach-orion5x/terastation_pro2-setup.c -index ad20575..deb5e29 100644 ---- a/arch/arm/mach-orion5x/terastation_pro2-setup.c -+++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c -@@ -22,9 +22,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - - /***************************************************************************** - * Terastation Pro 2/Live Info -diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c -index 1cfc364..7bd671b 100644 ---- a/arch/arm/mach-orion5x/ts209-setup.c -+++ b/arch/arm/mach-orion5x/ts209-setup.c -@@ -25,9 +25,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - #include "tsx09-common.h" - - #define QNAP_TS209_NOR_BOOT_BASE 0xf4000000 -diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c -index bc985cf..a77613b 100644 ---- a/arch/arm/mach-orion5x/ts409-setup.c -+++ b/arch/arm/mach-orion5x/ts409-setup.c -@@ -27,9 +27,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - #include "tsx09-common.h" - - /***************************************************************************** -diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c -index 5a61a66..b67ace4 100644 ---- a/arch/arm/mach-orion5x/ts78xx-setup.c -+++ b/arch/arm/mach-orion5x/ts78xx-setup.c -@@ -23,9 +23,9 @@ - #include - #include - #include --#include - #include "common.h" - #include "mpp.h" -+#include "orion5x.h" - #include "ts78xx-fpga.h" - - /***************************************************************************** -diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c -index d42e006..8977498 100644 ---- a/arch/arm/mach-orion5x/tsx09-common.c -+++ b/arch/arm/mach-orion5x/tsx09-common.c -@@ -15,7 +15,7 @@ - #include - #include - #include --#include -+#include "orion5x.h" - #include "tsx09-common.h" - #include "common.h" - -diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c -index bcc6e12..4e1e5c8 100644 ---- a/arch/arm/mach-orion5x/wnr854t-setup.c -+++ b/arch/arm/mach-orion5x/wnr854t-setup.c -@@ -19,7 +19,7 @@ - #include - #include - #include --#include -+#include "orion5x.h" - #include "common.h" - #include "mpp.h" - -diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c -index 4068d7a..61e9027 100644 ---- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c -+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c -@@ -22,7 +22,7 @@ - #include - #include - #include --#include -+#include "orion5x.h" - #include "common.h" - #include "mpp.h" - diff --git a/debian/patches/features/arm/arm-orion5x-multiplatform-support.patch b/debian/patches/features/arm/arm-orion5x-multiplatform-support.patch deleted file mode 100644 index a0bde91c9..000000000 --- a/debian/patches/features/arm/arm-orion5x-multiplatform-support.patch +++ /dev/null @@ -1,147 +0,0 @@ -From: Arnd Bergmann -Date: Wed, 2 Dec 2015 22:27:09 +0100 -Subject: ARM: orion5x: multiplatform support -Origin: https://git.kernel.org/linus/63cddd25fa02dbba294fb09f78ea24d7a9f1c7d9 - -The orion5x platform is now ready to be enabled for multiplatform -support, this patch does the switch over by modifying the Kconfig file, -the defconfig and removing the last mach/*.h header that becomes obsolete -with this. - -Signed-off-by: Arnd Bergmann -Acked-by: Andrew Lunn -Signed-off-by: Gregory CLEMENT ---- - arch/arm/Kconfig | 17 --------- - arch/arm/configs/orion5x_defconfig | 3 ++ - arch/arm/mach-orion5x/Kconfig | 18 +++++++--- - arch/arm/mach-orion5x/Makefile | 2 ++ - arch/arm/mach-orion5x/include/mach/uncompress.h | 46 ------------------------- - 5 files changed, 19 insertions(+), 67 deletions(-) - delete mode 100644 arch/arm/mach-orion5x/include/mach/uncompress.h - ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -535,23 +535,6 @@ config ARCH_MV78XX0 - Support for the following Marvell MV78xx0 series SoCs: - MV781x0, MV782x0. - --config ARCH_ORION5X -- bool "Marvell Orion" -- depends on MMU -- select ARCH_REQUIRE_GPIOLIB -- select CPU_FEROCEON -- select GENERIC_CLOCKEVENTS -- select MVEBU_MBUS -- select MULTI_IRQ_HANDLER -- select PCI -- select PLAT_ORION_LEGACY -- select MULTI_IRQ_HANDLER -- select SPARSE_IRQ -- help -- Support for the following Marvell Orion 5x series SoCs: -- Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182), -- Orion-2 (5281), Orion-1-90 (6183). -- - config ARCH_MMP - bool "Marvell PXA168/910/MMP2" - depends on MMU ---- a/arch/arm/configs/orion5x_defconfig -+++ b/arch/arm/configs/orion5x_defconfig -@@ -13,6 +13,9 @@ CONFIG_MODULE_UNLOAD=y - # CONFIG_BLK_DEV_BSG is not set - CONFIG_PARTITION_ADVANCED=y - CONFIG_BSD_DISKLABEL=y -+CONFIG_ARCH_MULTI_V5=y -+# CONFIG_ARCH_MULTI_V6 is not set -+# CONFIG_ARCH_MULTI_V7 is not set - CONFIG_ARCH_ORION5X=y - CONFIG_ARCH_ORION5X_DT=y - CONFIG_MACH_DB88F5281=y ---- a/arch/arm/mach-orion5x/Kconfig -+++ b/arch/arm/mach-orion5x/Kconfig -@@ -1,6 +1,18 @@ --if ARCH_ORION5X -+menuconfig ARCH_ORION5X -+ bool "Marvell Orion" -+ depends on MMU && ARCH_MULTI_V5 -+ select ARCH_REQUIRE_GPIOLIB -+ select CPU_FEROCEON -+ select GENERIC_CLOCKEVENTS -+ select MVEBU_MBUS -+ select PCI -+ select PLAT_ORION_LEGACY -+ help -+ Support for the following Marvell Orion 5x series SoCs: -+ Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182), -+ Orion-2 (5281), Orion-1-90 (6183). - --menu "Orion Implementations" -+if ARCH_ORION5X - - config ARCH_ORION5X_DT - bool "Marvell Orion5x Flattened Device Tree" -@@ -163,6 +175,4 @@ config MACH_RD88F6183AP_GE - Say 'Y' here if you want your kernel to support the - Marvell Orion-1-90 (88F6183) AP GE RD. - --endmenu -- - endif ---- a/arch/arm/mach-orion5x/Makefile -+++ b/arch/arm/mach-orion5x/Makefile -@@ -1,3 +1,5 @@ -+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include -+ - obj-y += common.o pci.o irq.o mpp.o - obj-$(CONFIG_MACH_DB88F5281) += db88f5281-setup.o - obj-$(CONFIG_MACH_RD88F5182) += rd88f5182-setup.o ---- a/arch/arm/mach-orion5x/include/mach/uncompress.h -+++ /dev/null -@@ -1,46 +0,0 @@ --/* -- * Tzachi Perelstein -- * -- * This file is licensed under the terms of the GNU General Public -- * License version 2. This program is licensed "as is" without any -- * warranty of any kind, whether express or implied. -- */ -- --#include -- --#define UART0_PHYS_BASE (0xf1000000 + 0x12000) --#define SERIAL_BASE ((unsigned char *)UART0_PHYS_BASE) -- --static void putc(const char c) --{ -- unsigned char *base = SERIAL_BASE; -- int i; -- -- for (i = 0; i < 0x1000; i++) { -- if (base[UART_LSR << 2] & UART_LSR_THRE) -- break; -- barrier(); -- } -- -- base[UART_TX << 2] = c; --} -- --static void flush(void) --{ -- unsigned char *base = SERIAL_BASE; -- unsigned char mask; -- int i; -- -- mask = UART_LSR_TEMT | UART_LSR_THRE; -- -- for (i = 0; i < 0x1000; i++) { -- if ((base[UART_LSR << 2] & mask) == mask) -- break; -- barrier(); -- } --} -- --/* -- * nothing to do -- */ --#define arch_decomp_setup() diff --git a/debian/patches/features/arm/rpi/arm-bcm2835-add-a-compat-string-for-bcm2836-machine-.patch b/debian/patches/features/arm/rpi/arm-bcm2835-add-a-compat-string-for-bcm2836-machine-.patch deleted file mode 100644 index c64b0c283..000000000 --- a/debian/patches/features/arm/rpi/arm-bcm2835-add-a-compat-string-for-bcm2836-machine-.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Eric Anholt -Date: Wed, 16 Dec 2015 15:55:14 -0800 -Subject: [2/3] ARM: bcm2835: Add a compat string for bcm2836 machine probe -Origin: https://github.com/anholt/linux/commit/c1be3c1fc6178ca48750b4e66f1acb7c22b64997 - -Supporting the 2836 requires using the new interrupt controller, which -we have support for. - -Signed-off-by: Eric Anholt ---- - arch/arm/mach-bcm/board_bcm2835.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/arch/arm/mach-bcm/board_bcm2835.c b/arch/arm/mach-bcm/board_bcm2835.c -index 0f7b9ea..834d676 100644 ---- a/arch/arm/mach-bcm/board_bcm2835.c -+++ b/arch/arm/mach-bcm/board_bcm2835.c -@@ -36,7 +36,12 @@ static void __init bcm2835_init(void) - } - - static const char * const bcm2835_compat[] = { -+#ifdef CONFIG_ARCH_MULTI_V6 - "brcm,bcm2835", -+#endif -+#ifdef CONFIG_ARCH_MULTI_V7 -+ "brcm,bcm2836", -+#endif - NULL - }; - diff --git a/debian/patches/features/arm/rpi/arm-bcm2835-add-devicetree-for-bcm2836-and-raspberry.patch b/debian/patches/features/arm/rpi/arm-bcm2835-add-devicetree-for-bcm2836-and-raspberry.patch deleted file mode 100644 index 380343049..000000000 --- a/debian/patches/features/arm/rpi/arm-bcm2835-add-devicetree-for-bcm2836-and-raspberry.patch +++ /dev/null @@ -1,156 +0,0 @@ -From: Eric Anholt -Date: Thu, 16 Apr 2015 15:26:45 -0700 -Subject: [3/4] ARM: bcm2835: Add devicetree for bcm2836 and Raspberry Pi 2 B -Origin: https://github.com/anholt/linux/commit/c33319cd945001741d1b381655c8b7310d756163 - -The Pi 2 B ends up like a Pi 1 B+, with the same peripherals and -pinout, but the CPU and memory layout changed to use the 2836. - -Signed-off-by: Eric Anholt ---- - arch/arm/boot/dts/Makefile | 3 +- - arch/arm/boot/dts/bcm2836-rpi-2-b.dts | 35 ++++++++++++++++ - arch/arm/boot/dts/bcm2836.dtsi | 78 +++++++++++++++++++++++++++++++++++ - 3 files changed, 115 insertions(+), 1 deletion(-) - create mode 100644 arch/arm/boot/dts/bcm2836-rpi-2-b.dts - create mode 100644 arch/arm/boot/dts/bcm2836.dtsi - -diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile -index 30bbc37..54e8f6b 100644 ---- a/arch/arm/boot/dts/Makefile -+++ b/arch/arm/boot/dts/Makefile -@@ -60,7 +60,8 @@ dtb-$(CONFIG_ARCH_BCM2835) += \ - bcm2835-rpi-b.dtb \ - bcm2835-rpi-b-rev2.dtb \ - bcm2835-rpi-b-plus.dtb \ -- bcm2835-rpi-a-plus.dtb -+ bcm2835-rpi-a-plus.dtb \ -+ bcm2836-rpi-2-b.dtb - dtb-$(CONFIG_ARCH_BCM_5301X) += \ - bcm4708-asus-rt-ac56u.dtb \ - bcm4708-asus-rt-ac68u.dtb \ -diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts -new file mode 100644 -index 0000000..ff94666 ---- /dev/null -+++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts -@@ -0,0 +1,35 @@ -+/dts-v1/; -+#include "bcm2836.dtsi" -+#include "bcm2835-rpi.dtsi" -+ -+/ { -+ compatible = "raspberrypi,2-model-b", "brcm,bcm2836"; -+ model = "Raspberry Pi 2 Model B"; -+ -+ memory { -+ reg = <0 0x40000000>; -+ }; -+ -+ leds { -+ act { -+ gpios = <&gpio 47 0>; -+ }; -+ -+ pwr { -+ label = "PWR"; -+ gpios = <&gpio 35 0>; -+ default-state = "keep"; -+ linux,default-trigger = "default-on"; -+ }; -+ }; -+}; -+ -+&gpio { -+ pinctrl-0 = <&gpioout &alt0 &i2s_alt0 &alt3>; -+ -+ /* I2S interface */ -+ i2s_alt0: i2s_alt0 { -+ brcm,pins = <18 19 20 21>; -+ brcm,function = ; -+ }; -+}; -diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi -new file mode 100644 -index 0000000..9d0651d ---- /dev/null -+++ b/arch/arm/boot/dts/bcm2836.dtsi -@@ -0,0 +1,78 @@ -+#include "bcm283x.dtsi" -+ -+/ { -+ compatible = "brcm,bcm2836"; -+ -+ soc { -+ ranges = <0x7e000000 0x3f000000 0x1000000>, -+ <0x40000000 0x40000000 0x00001000>; -+ dma-ranges = <0xc0000000 0x00000000 0x3f000000>; -+ -+ local_intc: local_intc { -+ compatible = "brcm,bcm2836-l1-intc"; -+ reg = <0x40000000 0x100>; -+ interrupt-controller; -+ #interrupt-cells = <1>; -+ interrupt-parent = <&local_intc>; -+ }; -+ -+ arm-pmu { -+ compatible = "arm,cortex-a7-pmu"; -+ interrupt-parent = <&local_intc>; -+ interrupts = <9>; -+ }; -+ }; -+ -+ timer { -+ compatible = "arm,armv7-timer"; -+ interrupt-parent = <&local_intc>; -+ interrupts = <0>, // PHYS_SECURE_PPI -+ <1>, // PHYS_NONSECURE_PPI -+ <3>, // VIRT_PPI -+ <2>; // HYP_PPI -+ always-on; -+ }; -+ -+ cpus: cpus { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ v7_cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a7"; -+ reg = <0xf00>; -+ clock-frequency = <800000000>; -+ }; -+ -+ v7_cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a7"; -+ reg = <0xf01>; -+ clock-frequency = <800000000>; -+ }; -+ -+ v7_cpu2: cpu@2 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a7"; -+ reg = <0xf02>; -+ clock-frequency = <800000000>; -+ }; -+ -+ v7_cpu3: cpu@3 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a7"; -+ reg = <0xf03>; -+ clock-frequency = <800000000>; -+ }; -+ }; -+}; -+ -+/* Make the BCM2835-style global interrupt controller be a child of the -+ * CPU-local interrupt controller. -+ */ -+&intc { -+ compatible = "brcm,bcm2836-armctrl-ic"; -+ reg = <0x7e00b200 0x200>; -+ interrupt-parent = <&local_intc>; -+ interrupts = <8>; -+}; diff --git a/debian/patches/features/arm/rpi/arm-bcm2835-add-kconfig-support-for-bcm2836.patch b/debian/patches/features/arm/rpi/arm-bcm2835-add-kconfig-support-for-bcm2836.patch deleted file mode 100644 index 54171678c..000000000 --- a/debian/patches/features/arm/rpi/arm-bcm2835-add-kconfig-support-for-bcm2836.patch +++ /dev/null @@ -1,76 +0,0 @@ -From: Eric Anholt -Date: Tue, 24 Feb 2015 15:07:55 +0000 -Subject: [3/3] ARM: bcm2835: Add Kconfig support for bcm2836 -Origin: https://github.com/anholt/linux/commit/5234c34e4cd7695647ccc1cabb50c3e7720dd3fb - -This should be a complete port of bcm2835 functionality to bcm2836 -(Raspberry Pi 2). - -Signed-off-by: Eric Anholt ---- - arch/arm/Kconfig.debug | 10 ++++++++-- - arch/arm/mach-bcm/Kconfig | 9 +++++---- - 2 files changed, 13 insertions(+), 6 deletions(-) - -diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug -index 259c0ca..957b876 100644 ---- a/arch/arm/Kconfig.debug -+++ b/arch/arm/Kconfig.debug -@@ -143,7 +143,12 @@ choice - - config DEBUG_BCM2835 - bool "Kernel low-level debugging on BCM2835 PL011 UART" -- depends on ARCH_BCM2835 -+ depends on ARCH_BCM2835 && ARCH_MULTI_V6 -+ select DEBUG_UART_PL01X -+ -+ config DEBUG_BCM2836 -+ bool "Kernel low-level debugging on BCM2836 PL011 UART" -+ depends on ARCH_BCM2835 && ARCH_MULTI_V7 - select DEBUG_UART_PL01X - - config DEBUG_BCM_5301X -@@ -1402,6 +1407,7 @@ config DEBUG_UART_PHYS - default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2 - default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3 - default 0x20201000 if DEBUG_BCM2835 -+ default 0x3f201000 if DEBUG_BCM2836 - default 0x3e000000 if DEBUG_BCM_KONA_UART - default 0x4000e400 if DEBUG_LL_UART_EFM32 - default 0x40081000 if DEBUG_LPC18XX_UART0 -@@ -1485,7 +1491,7 @@ config DEBUG_UART_VIRT - default 0xf0000be0 if ARCH_EBSA110 - default 0xf0010000 if DEBUG_ASM9260_UART - default 0xf01fb000 if DEBUG_NOMADIK_UART -- default 0xf0201000 if DEBUG_BCM2835 -+ default 0xf0201000 if DEBUG_BCM2835 || DEBUG_BCM2836 - default 0xf1000300 if DEBUG_BCM_5301X - default 0xf1002000 if DEBUG_MT8127_UART0 - default 0xf1006000 if DEBUG_MT6589_UART0 -diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig -index 8c53c55..3b2acf4 100644 ---- a/arch/arm/mach-bcm/Kconfig -+++ b/arch/arm/mach-bcm/Kconfig -@@ -122,17 +122,18 @@ config ARCH_BCM_MOBILE_SMP - comment "Other Architectures" - - config ARCH_BCM2835 -- bool "Broadcom BCM2835 family" if ARCH_MULTI_V6 -+ bool "Broadcom BCM2835 family" if ARCH_MULTI_V6 || ARCH_MULTI_V7 - select ARCH_REQUIRE_GPIOLIB - select ARM_AMBA -- select ARM_ERRATA_411920 -+ select ARM_ERRATA_411920 if ARCH_MULTI_V6 - select ARM_TIMER_SP804 -+ select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7 - select CLKSRC_OF - select PINCTRL - select PINCTRL_BCM2835 - help -- This enables support for the Broadcom BCM2835 SoC. This SoC is -- used in the Raspberry Pi and Roku 2 devices. -+ This enables support for the Broadcom BCM2835 and BCM2836 SoCs. -+ This SoC is used in the Raspberry Pi and Roku 2 devices. - - config ARCH_BCM_63XX - bool "Broadcom BCM63xx DSL SoC" if ARCH_MULTI_V7 diff --git a/debian/patches/features/arm/rpi/arm-bcm2835-add-rpi-power-domain-driver.patch b/debian/patches/features/arm/rpi/arm-bcm2835-add-rpi-power-domain-driver.patch deleted file mode 100644 index 8e187e2b2..000000000 --- a/debian/patches/features/arm/rpi/arm-bcm2835-add-rpi-power-domain-driver.patch +++ /dev/null @@ -1,376 +0,0 @@ -From: Alexander Aring -Date: Wed, 16 Dec 2015 16:26:47 -0800 -Subject: [3/3] ARM: bcm2835: add rpi power domain driver -Origin: https://github.com/anholt/linux/commit/a09cd356586d33f64cbe64ee4f5c1a7c4a6abee5 - -This patch adds support for several power domains on Raspberry Pi, -including USB (so it can be enabled even if the bootloader didn't do -it), and graphics. - -This patch is the combined work of Eric Anholt (who wrote USB support -inside of the Raspberry Pi firmware driver, and wrote the non-USB -domain support) and Alexander Aring (who separated the original USB -work out from the firmware driver). - -Signed-off-by: Alexander Aring -Signed-off-by: Eric Anholt -Reviewed-by: Ulf Hansson -Reviewed-by: Kevin Hilman ---- - drivers/soc/Kconfig | 1 + - drivers/soc/Makefile | 1 + - drivers/soc/bcm/Kconfig | 9 + - drivers/soc/bcm/Makefile | 1 + - drivers/soc/bcm/raspberrypi-power.c | 247 ++++++++++++++++++++++++++ - include/dt-bindings/power/raspberrypi-power.h | 41 +++++ - 6 files changed, 300 insertions(+) - create mode 100644 drivers/soc/bcm/Kconfig - create mode 100644 drivers/soc/bcm/Makefile - create mode 100644 drivers/soc/bcm/raspberrypi-power.c - create mode 100644 include/dt-bindings/power/raspberrypi-power.h - -diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig -index 4e853ed..8441426 100644 ---- a/drivers/soc/Kconfig -+++ b/drivers/soc/Kconfig -@@ -1,5 +1,6 @@ - menu "SOC (System On Chip) specific Drivers" - -+source "drivers/soc/bcm/Kconfig" - source "drivers/soc/brcmstb/Kconfig" - source "drivers/soc/mediatek/Kconfig" - source "drivers/soc/qcom/Kconfig" -diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile -index f2ba2e9..f3f955c 100644 ---- a/drivers/soc/Makefile -+++ b/drivers/soc/Makefile -@@ -2,6 +2,7 @@ - # Makefile for the Linux Kernel SOC specific device drivers. - # - -+obj-y += bcm/ - obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/ - obj-$(CONFIG_MACH_DOVE) += dove/ - obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ -diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig -new file mode 100644 -index 0000000..5ba1827 ---- /dev/null -+++ b/drivers/soc/bcm/Kconfig -@@ -0,0 +1,9 @@ -+config RASPBERRYPI_POWER -+ bool "Raspberry Pi power domain driver" -+ depends on ARCH_BCM2835 || COMPILE_TEST -+ depends on RASPBERRYPI_FIRMWARE -+ select PM_GENERIC_DOMAINS if PM -+ select PM_GENERIC_DOMAINS_OF if PM -+ help -+ This enables support for the RPi power domains which can be enabled -+ or disabled via the RPi firmware. -diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile -new file mode 100644 -index 0000000..63aa3eb ---- /dev/null -+++ b/drivers/soc/bcm/Makefile -@@ -0,0 +1 @@ -+obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o -diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c -new file mode 100644 -index 0000000..fe96a8b ---- /dev/null -+++ b/drivers/soc/bcm/raspberrypi-power.c -@@ -0,0 +1,247 @@ -+/* (C) 2015 Pengutronix, Alexander Aring -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * Authors: -+ * Alexander Aring -+ * Eric Anholt -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * Firmware indices for the old power domains interface. Only a few -+ * of them were actually implemented. -+ */ -+#define RPI_OLD_POWER_DOMAIN_USB 3 -+#define RPI_OLD_POWER_DOMAIN_V3D 10 -+ -+struct rpi_power_domain { -+ u32 domain; -+ bool enabled; -+ bool old_interface; -+ struct generic_pm_domain base; -+ struct rpi_firmware *fw; -+}; -+ -+struct rpi_power_domains { -+ bool has_new_interface; -+ struct genpd_onecell_data xlate; -+ struct rpi_firmware *fw; -+ struct rpi_power_domain domains[RPI_POWER_DOMAIN_COUNT]; -+}; -+ -+/* -+ * Packet definition used by RPI_FIRMWARE_SET_POWER_STATE and -+ * RPI_FIRMWARE_SET_DOMAIN_STATE -+ */ -+struct rpi_power_domain_packet { -+ u32 domain; -+ u32 on; -+} __packet; -+ -+/* -+ * Asks the firmware to enable or disable power on a specific power -+ * domain. -+ */ -+static int rpi_firmware_set_power(struct rpi_power_domain *rpi_domain, bool on) -+{ -+ struct rpi_power_domain_packet packet; -+ -+ packet.domain = rpi_domain->domain; -+ packet.on = on; -+ return rpi_firmware_property(rpi_domain->fw, -+ rpi_domain->old_interface ? -+ RPI_FIRMWARE_SET_POWER_STATE : -+ RPI_FIRMWARE_SET_DOMAIN_STATE, -+ &packet, sizeof(packet)); -+} -+ -+static int rpi_domain_off(struct generic_pm_domain *domain) -+{ -+ struct rpi_power_domain *rpi_domain = -+ container_of(domain, struct rpi_power_domain, base); -+ -+ return rpi_firmware_set_power(rpi_domain, false); -+} -+ -+static int rpi_domain_on(struct generic_pm_domain *domain) -+{ -+ struct rpi_power_domain *rpi_domain = -+ container_of(domain, struct rpi_power_domain, base); -+ -+ return rpi_firmware_set_power(rpi_domain, true); -+} -+ -+static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains, -+ int xlate_index, const char *name) -+{ -+ struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index]; -+ -+ dom->fw = rpi_domains->fw; -+ -+ dom->base.name = name; -+ dom->base.power_on = rpi_domain_on; -+ dom->base.power_off = rpi_domain_off; -+ -+ /* -+ * Treat all power domains as off at boot. -+ * -+ * The firmware itself may be keeping some domains on, but -+ * from Linux's perspective all we control is the refcounts -+ * that we give to the firmware, and we can't ask the firmware -+ * to turn off something that we haven't ourselves turned on. -+ */ -+ pm_genpd_init(&dom->base, NULL, true); -+ -+ rpi_domains->xlate.domains[xlate_index] = &dom->base; -+} -+ -+static void rpi_init_power_domain(struct rpi_power_domains *rpi_domains, -+ int xlate_index, const char *name) -+{ -+ struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index]; -+ -+ if (!rpi_domains->has_new_interface) -+ return; -+ -+ /* The DT binding index is the firmware's domain index minus one. */ -+ dom->domain = xlate_index + 1; -+ -+ rpi_common_init_power_domain(rpi_domains, xlate_index, name); -+} -+ -+static void rpi_init_old_power_domain(struct rpi_power_domains *rpi_domains, -+ int xlate_index, int domain, -+ const char *name) -+{ -+ struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index]; -+ -+ dom->old_interface = true; -+ dom->domain = domain; -+ -+ rpi_common_init_power_domain(rpi_domains, xlate_index, name); -+} -+ -+/* -+ * Detects whether the firmware supports the new power domains interface. -+ * -+ * The firmware doesn't actually return an error on an unknown tag, -+ * and just skips over it, so we do the detection by putting an -+ * unexpected value in the return field and checking if it was -+ * unchanged. -+ */ -+static bool -+rpi_has_new_domain_support(struct rpi_power_domains *rpi_domains) -+{ -+ struct rpi_power_domain_packet packet; -+ int ret; -+ -+ packet.domain = RPI_POWER_DOMAIN_ARM; -+ packet.on = ~0; -+ -+ ret = rpi_firmware_property(rpi_domains->fw, -+ RPI_FIRMWARE_GET_DOMAIN_STATE, -+ &packet, sizeof(packet)); -+ -+ return ret == 0 && packet.on != ~0; -+} -+ -+static int rpi_power_probe(struct platform_device *pdev) -+{ -+ struct device_node *fw_np; -+ struct device *dev = &pdev->dev; -+ struct rpi_power_domains *rpi_domains; -+ -+ rpi_domains = devm_kzalloc(dev, sizeof(*rpi_domains), GFP_KERNEL); -+ if (!rpi_domains) -+ return -ENOMEM; -+ -+ rpi_domains->xlate.domains = -+ devm_kzalloc(dev, sizeof(*rpi_domains->xlate.domains) * -+ RPI_POWER_DOMAIN_COUNT, GFP_KERNEL); -+ if (!rpi_domains->xlate.domains) -+ return -ENOMEM; -+ -+ rpi_domains->xlate.num_domains = RPI_POWER_DOMAIN_COUNT; -+ -+ fw_np = of_parse_phandle(pdev->dev.of_node, "firmware", 0); -+ if (!fw_np) { -+ dev_err(&pdev->dev, "no firmware node\n"); -+ return -ENODEV; -+ } -+ -+ rpi_domains->fw = rpi_firmware_get(fw_np); -+ of_node_put(fw_np); -+ if (!rpi_domains->fw) -+ return -EPROBE_DEFER; -+ -+ rpi_domains->has_new_interface = -+ rpi_has_new_domain_support(rpi_domains); -+ -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C0, "I2C0"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C1, "I2C1"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C2, "I2C2"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VIDEO_SCALER, -+ "VIDEO_SCALER"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VPU1, "VPU1"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_HDMI, "HDMI"); -+ -+ /* -+ * Use the old firmware interface for USB power, so that we -+ * can turn it on even if the firmware hasn't been updated. -+ */ -+ rpi_init_old_power_domain(rpi_domains, RPI_POWER_DOMAIN_USB, -+ RPI_OLD_POWER_DOMAIN_USB, "USB"); -+ -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VEC, "VEC"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_JPEG, "JPEG"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_H264, "H264"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_V3D, "V3D"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_ISP, "ISP"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_UNICAM0, "UNICAM0"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_UNICAM1, "UNICAM1"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CCP2RX, "CCP2RX"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CSI2, "CSI2"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CPI, "CPI"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_DSI0, "DSI0"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_DSI1, "DSI1"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_TRANSPOSER, -+ "TRANSPOSER"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CCP2TX, "CCP2TX"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CDP, "CDP"); -+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_ARM, "ARM"); -+ -+ of_genpd_add_provider_onecell(dev->of_node, &rpi_domains->xlate); -+ -+ platform_set_drvdata(pdev, rpi_domains); -+ -+ return 0; -+} -+ -+static const struct of_device_id rpi_power_of_match[] = { -+ { .compatible = "raspberrypi,bcm2835-power", }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, rpi_power_of_match); -+ -+static struct platform_driver rpi_power_driver = { -+ .driver = { -+ .name = "raspberrypi-power", -+ .of_match_table = rpi_power_of_match, -+ }, -+ .probe = rpi_power_probe, -+}; -+builtin_platform_driver(rpi_power_driver); -+ -+MODULE_AUTHOR("Alexander Aring "); -+MODULE_AUTHOR("Eric Anholt "); -+MODULE_DESCRIPTION("Raspberry Pi power domain driver"); -+MODULE_LICENSE("GPL v2"); -diff --git a/include/dt-bindings/power/raspberrypi-power.h b/include/dt-bindings/power/raspberrypi-power.h -new file mode 100644 -index 0000000..b3ff8e0 ---- /dev/null -+++ b/include/dt-bindings/power/raspberrypi-power.h -@@ -0,0 +1,41 @@ -+/* -+ * Copyright © 2015 Broadcom -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#ifndef _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H -+#define _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H -+ -+/* These power domain indices are the firmware interface's indices -+ * minus one. -+ */ -+#define RPI_POWER_DOMAIN_I2C0 0 -+#define RPI_POWER_DOMAIN_I2C1 1 -+#define RPI_POWER_DOMAIN_I2C2 2 -+#define RPI_POWER_DOMAIN_VIDEO_SCALER 3 -+#define RPI_POWER_DOMAIN_VPU1 4 -+#define RPI_POWER_DOMAIN_HDMI 5 -+#define RPI_POWER_DOMAIN_USB 6 -+#define RPI_POWER_DOMAIN_VEC 7 -+#define RPI_POWER_DOMAIN_JPEG 8 -+#define RPI_POWER_DOMAIN_H264 9 -+#define RPI_POWER_DOMAIN_V3D 10 -+#define RPI_POWER_DOMAIN_ISP 11 -+#define RPI_POWER_DOMAIN_UNICAM0 12 -+#define RPI_POWER_DOMAIN_UNICAM1 13 -+#define RPI_POWER_DOMAIN_CCP2RX 14 -+#define RPI_POWER_DOMAIN_CSI2 15 -+#define RPI_POWER_DOMAIN_CPI 16 -+#define RPI_POWER_DOMAIN_DSI0 17 -+#define RPI_POWER_DOMAIN_DSI1 18 -+#define RPI_POWER_DOMAIN_TRANSPOSER 19 -+#define RPI_POWER_DOMAIN_CCP2TX 20 -+#define RPI_POWER_DOMAIN_CDP 21 -+#define RPI_POWER_DOMAIN_ARM 22 -+ -+#define RPI_POWER_DOMAIN_COUNT 23 -+ -+#endif /* _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H */ diff --git a/debian/patches/features/arm/rpi/arm-bcm2835-add-the-auxiliary-clocks-to-the-device-t.patch b/debian/patches/features/arm/rpi/arm-bcm2835-add-the-auxiliary-clocks-to-the-device-t.patch deleted file mode 100644 index a886b0852..000000000 --- a/debian/patches/features/arm/rpi/arm-bcm2835-add-the-auxiliary-clocks-to-the-device-t.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Eric Anholt -Date: Tue, 15 Dec 2015 15:35:59 -0800 -Subject: [4/4] ARM: bcm2835: Add the auxiliary clocks to the device tree. -Origin: https://github.com/anholt/linux/commit/53b6084357a44d7c34044504e1bf149d9156934f - -These will be used for enabling UART1, SPI1, and SPI2. - -Signed-off-by: Eric Anholt ---- - arch/arm/boot/dts/bcm283x.dtsi | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi -index 8a7e727..971e741 100644 ---- a/arch/arm/boot/dts/bcm283x.dtsi -+++ b/arch/arm/boot/dts/bcm283x.dtsi -@@ -152,6 +152,13 @@ - status = "disabled"; - }; - -+ aux: aux@0x7e215000 { -+ compatible = "brcm,bcm2835-aux"; -+ #clock-cells = <1>; -+ reg = <0x7e215000 0x8>; -+ clocks = <&clocks BCM2835_CLOCK_VPU>; -+ }; -+ - sdhci: sdhci@7e300000 { - compatible = "brcm,bcm2835-sdhci"; - reg = <0x7e300000 0x100>; diff --git a/debian/patches/features/arm/rpi/arm-bcm2835-define-two-new-packets-from-the-latest-f.patch b/debian/patches/features/arm/rpi/arm-bcm2835-define-two-new-packets-from-the-latest-f.patch deleted file mode 100644 index 24f394b16..000000000 --- a/debian/patches/features/arm/rpi/arm-bcm2835-define-two-new-packets-from-the-latest-f.patch +++ /dev/null @@ -1,32 +0,0 @@ -From: Eric Anholt -Date: Tue, 1 Dec 2015 16:49:12 -0800 -Subject: [1/3] ARM: bcm2835: Define two new packets from the latest firmware. -Origin: https://github.com/anholt/linux/commit/60d56333e869be6ad6926cdba3ba974512b2183b - -These packets give us direct access to the firmware's power management -code, as opposed to GET/SET_POWER_STATE packets that only had a couple -of domains implemented. - -Signed-off-by: Eric Anholt -Reviewed-by: Kevin Hilman ---- - include/soc/bcm2835/raspberrypi-firmware.h | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h -index c07d74a..3fb3571 100644 ---- a/include/soc/bcm2835/raspberrypi-firmware.h -+++ b/include/soc/bcm2835/raspberrypi-firmware.h -@@ -72,10 +72,12 @@ enum rpi_firmware_property_tag { - RPI_FIRMWARE_SET_ENABLE_QPU = 0x00030012, - RPI_FIRMWARE_GET_DISPMANX_RESOURCE_MEM_HANDLE = 0x00030014, - RPI_FIRMWARE_GET_EDID_BLOCK = 0x00030020, -+ RPI_FIRMWARE_GET_DOMAIN_STATE = 0x00030030, - RPI_FIRMWARE_SET_CLOCK_STATE = 0x00038001, - RPI_FIRMWARE_SET_CLOCK_RATE = 0x00038002, - RPI_FIRMWARE_SET_VOLTAGE = 0x00038003, - RPI_FIRMWARE_SET_TURBO = 0x00038009, -+ RPI_FIRMWARE_SET_DOMAIN_STATE = 0x00038030, - - /* Dispmanx TAGS */ - RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001, diff --git a/debian/patches/features/arm/rpi/arm-bcm2835-move-the-cpu-peripheral-include-out-of-c.patch b/debian/patches/features/arm/rpi/arm-bcm2835-move-the-cpu-peripheral-include-out-of-c.patch deleted file mode 100644 index 0155dd0e6..000000000 --- a/debian/patches/features/arm/rpi/arm-bcm2835-move-the-cpu-peripheral-include-out-of-c.patch +++ /dev/null @@ -1,68 +0,0 @@ -From: Eric Anholt -Date: Wed, 16 Dec 2015 15:55:12 -0800 -Subject: [2/4] ARM: bcm2835: Move the CPU/peripheral include out of common RPi - DT. -Origin: https://github.com/anholt/linux/commit/bafa68c08c33ddde3bc10d2d7e5d3b77b4a6c8ed - -For Raspberry Pi 2, we want to use the same general pin assignment -bits, but need to use bcm2836.dtsi for the CPU instead. - -Signed-off-by: Eric Anholt ---- - arch/arm/boot/dts/bcm2835-rpi-a-plus.dts | 1 + - arch/arm/boot/dts/bcm2835-rpi-b-plus.dts | 1 + - arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts | 1 + - arch/arm/boot/dts/bcm2835-rpi-b.dts | 1 + - arch/arm/boot/dts/bcm2835-rpi.dtsi | 2 -- - 5 files changed, 4 insertions(+), 2 deletions(-) - -diff --git a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts -index b2bff43..228614f 100644 ---- a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts -+++ b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts -@@ -1,4 +1,5 @@ - /dts-v1/; -+#include "bcm2835.dtsi" - #include "bcm2835-rpi.dtsi" - - / { -diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts -index 668442b..ef54050 100644 ---- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts -+++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts -@@ -1,4 +1,5 @@ - /dts-v1/; -+#include "bcm2835.dtsi" - #include "bcm2835-rpi.dtsi" - - / { -diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts -index eab8b591..86f1f2f 100644 ---- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts -+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts -@@ -1,4 +1,5 @@ - /dts-v1/; -+#include "bcm2835.dtsi" - #include "bcm2835-rpi.dtsi" - - / { -diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts -index ff6b2d1..4859e9d 100644 ---- a/arch/arm/boot/dts/bcm2835-rpi-b.dts -+++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts -@@ -1,4 +1,5 @@ - /dts-v1/; -+#include "bcm2835.dtsi" - #include "bcm2835-rpi.dtsi" - - / { -diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi -index 3572f03..3afb9fe 100644 ---- a/arch/arm/boot/dts/bcm2835-rpi.dtsi -+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi -@@ -1,5 +1,3 @@ --#include "bcm2835.dtsi" -- - / { - memory { - reg = <0 0x10000000>; diff --git a/debian/patches/features/arm/rpi/arm-bcm2835-split-the-dt-for-peripherals-from-the-dt.patch b/debian/patches/features/arm/rpi/arm-bcm2835-split-the-dt-for-peripherals-from-the-dt.patch deleted file mode 100644 index 1fa7268ab..000000000 --- a/debian/patches/features/arm/rpi/arm-bcm2835-split-the-dt-for-peripherals-from-the-dt.patch +++ /dev/null @@ -1,440 +0,0 @@ -From: Eric Anholt -Date: Wed, 16 Dec 2015 13:24:40 -0800 -Subject: [1/4] ARM: bcm2835: Split the DT for peripherals from the DT for the - CPU -Origin: https://github.com/anholt/linux/commit/482626063d446eac1809e025a79ad0a7d45bc22d - -The set of peripherals remained constant across bcm2835 (Raspberry Pi -1) and bcm2836 (Raspberry Pi 2), but the CPU was swapped out. Split -the files so that we can include just peripheral setup in 2836. - -Signed-off-by: Eric Anholt ---- - arch/arm/boot/dts/bcm2835.dtsi | 194 +------------------------------------- - arch/arm/boot/dts/bcm283x.dtsi | 205 +++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 206 insertions(+), 193 deletions(-) - create mode 100644 arch/arm/boot/dts/bcm283x.dtsi - -diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi -index aef64de..b83b326 100644 ---- a/arch/arm/boot/dts/bcm2835.dtsi -+++ b/arch/arm/boot/dts/bcm2835.dtsi -@@ -1,206 +1,14 @@ --#include --#include --#include "skeleton.dtsi" -+#include "bcm283x.dtsi" - - / { - compatible = "brcm,bcm2835"; -- model = "BCM2835"; -- interrupt-parent = <&intc>; -- -- chosen { -- bootargs = "earlyprintk console=ttyAMA0"; -- }; - - soc { -- compatible = "simple-bus"; -- #address-cells = <1>; -- #size-cells = <1>; - ranges = <0x7e000000 0x20000000 0x02000000>; - dma-ranges = <0x40000000 0x00000000 0x20000000>; - -- timer@7e003000 { -- compatible = "brcm,bcm2835-system-timer"; -- reg = <0x7e003000 0x1000>; -- interrupts = <1 0>, <1 1>, <1 2>, <1 3>; -- /* This could be a reference to BCM2835_CLOCK_TIMER, -- * but we don't have the driver using the common clock -- * support yet. -- */ -- clock-frequency = <1000000>; -- }; -- -- dma: dma@7e007000 { -- compatible = "brcm,bcm2835-dma"; -- reg = <0x7e007000 0xf00>; -- interrupts = <1 16>, -- <1 17>, -- <1 18>, -- <1 19>, -- <1 20>, -- <1 21>, -- <1 22>, -- <1 23>, -- <1 24>, -- <1 25>, -- <1 26>, -- <1 27>, -- <1 28>; -- -- #dma-cells = <1>; -- brcm,dma-channel-mask = <0x7f35>; -- }; -- -- intc: interrupt-controller@7e00b200 { -- compatible = "brcm,bcm2835-armctrl-ic"; -- reg = <0x7e00b200 0x200>; -- interrupt-controller; -- #interrupt-cells = <2>; -- }; -- -- watchdog@7e100000 { -- compatible = "brcm,bcm2835-pm-wdt"; -- reg = <0x7e100000 0x28>; -- }; -- -- clocks: cprman@7e101000 { -- compatible = "brcm,bcm2835-cprman"; -- #clock-cells = <1>; -- reg = <0x7e101000 0x2000>; -- -- /* CPRMAN derives everything from the platform's -- * oscillator. -- */ -- clocks = <&clk_osc>; -- }; -- -- rng@7e104000 { -- compatible = "brcm,bcm2835-rng"; -- reg = <0x7e104000 0x10>; -- }; -- -- mailbox: mailbox@7e00b800 { -- compatible = "brcm,bcm2835-mbox"; -- reg = <0x7e00b880 0x40>; -- interrupts = <0 1>; -- #mbox-cells = <0>; -- }; -- -- gpio: gpio@7e200000 { -- compatible = "brcm,bcm2835-gpio"; -- reg = <0x7e200000 0xb4>; -- /* -- * The GPIO IP block is designed for 3 banks of GPIOs. -- * Each bank has a GPIO interrupt for itself. -- * There is an overall "any bank" interrupt. -- * In order, these are GIC interrupts 17, 18, 19, 20. -- * Since the BCM2835 only has 2 banks, the 2nd bank -- * interrupt output appears to be mirrored onto the -- * 3rd bank's interrupt signal. -- * So, a bank0 interrupt shows up on 17, 20, and -- * a bank1 interrupt shows up on 18, 19, 20! -- */ -- interrupts = <2 17>, <2 18>, <2 19>, <2 20>; -- -- gpio-controller; -- #gpio-cells = <2>; -- -- interrupt-controller; -- #interrupt-cells = <2>; -- }; -- -- uart0: uart@7e201000 { -- compatible = "brcm,bcm2835-pl011", "arm,pl011", "arm,primecell"; -- reg = <0x7e201000 0x1000>; -- interrupts = <2 25>; -- clocks = <&clocks BCM2835_CLOCK_UART>, -- <&clocks BCM2835_CLOCK_VPU>; -- clock-names = "uartclk", "apb_pclk"; -- arm,primecell-periphid = <0x00241011>; -- }; -- -- i2s: i2s@7e203000 { -- compatible = "brcm,bcm2835-i2s"; -- reg = <0x7e203000 0x20>, -- <0x7e101098 0x02>; -- -- dmas = <&dma 2>, -- <&dma 3>; -- dma-names = "tx", "rx"; -- status = "disabled"; -- }; -- -- spi: spi@7e204000 { -- compatible = "brcm,bcm2835-spi"; -- reg = <0x7e204000 0x1000>; -- interrupts = <2 22>; -- clocks = <&clocks BCM2835_CLOCK_VPU>; -- #address-cells = <1>; -- #size-cells = <0>; -- status = "disabled"; -- }; -- -- i2c0: i2c@7e205000 { -- compatible = "brcm,bcm2835-i2c"; -- reg = <0x7e205000 0x1000>; -- interrupts = <2 21>; -- clocks = <&clocks BCM2835_CLOCK_VPU>; -- #address-cells = <1>; -- #size-cells = <0>; -- status = "disabled"; -- }; -- -- sdhci: sdhci@7e300000 { -- compatible = "brcm,bcm2835-sdhci"; -- reg = <0x7e300000 0x100>; -- interrupts = <2 30>; -- clocks = <&clocks BCM2835_CLOCK_EMMC>; -- status = "disabled"; -- }; -- -- i2c1: i2c@7e804000 { -- compatible = "brcm,bcm2835-i2c"; -- reg = <0x7e804000 0x1000>; -- interrupts = <2 21>; -- clocks = <&clocks BCM2835_CLOCK_VPU>; -- #address-cells = <1>; -- #size-cells = <0>; -- status = "disabled"; -- }; -- -- i2c2: i2c@7e805000 { -- compatible = "brcm,bcm2835-i2c"; -- reg = <0x7e805000 0x1000>; -- interrupts = <2 21>; -- clocks = <&clocks BCM2835_CLOCK_VPU>; -- #address-cells = <1>; -- #size-cells = <0>; -- status = "disabled"; -- }; -- -- usb@7e980000 { -- compatible = "brcm,bcm2835-usb"; -- reg = <0x7e980000 0x10000>; -- interrupts = <1 9>; -- }; -- - arm-pmu { - compatible = "arm,arm1176-pmu"; - }; - }; -- -- clocks { -- compatible = "simple-bus"; -- #address-cells = <1>; -- #size-cells = <0>; -- -- /* The oscillator is the root of the clock tree. */ -- clk_osc: clock@3 { -- compatible = "fixed-clock"; -- reg = <3>; -- #clock-cells = <0>; -- clock-output-names = "osc"; -- clock-frequency = <19200000>; -- }; -- -- }; - }; -diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi -new file mode 100644 -index 0000000..8a7e727 ---- /dev/null -+++ b/arch/arm/boot/dts/bcm283x.dtsi -@@ -0,0 +1,205 @@ -+#include -+#include -+#include "skeleton.dtsi" -+ -+/* This include file covers the common peripherals and configuration between -+ * bcm2835 and bcm2836 implementations, leaving the CPU configuration to -+ * bcm2835.dtsi and bcm2836.dtsi. -+ */ -+ -+/ { -+ compatible = "brcm,bcm2835"; -+ model = "BCM2835"; -+ interrupt-parent = <&intc>; -+ -+ chosen { -+ bootargs = "earlyprintk console=ttyAMA0"; -+ }; -+ -+ soc { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ -+ timer@7e003000 { -+ compatible = "brcm,bcm2835-system-timer"; -+ reg = <0x7e003000 0x1000>; -+ interrupts = <1 0>, <1 1>, <1 2>, <1 3>; -+ /* This could be a reference to BCM2835_CLOCK_TIMER, -+ * but we don't have the driver using the common clock -+ * support yet. -+ */ -+ clock-frequency = <1000000>; -+ }; -+ -+ dma: dma@7e007000 { -+ compatible = "brcm,bcm2835-dma"; -+ reg = <0x7e007000 0xf00>; -+ interrupts = <1 16>, -+ <1 17>, -+ <1 18>, -+ <1 19>, -+ <1 20>, -+ <1 21>, -+ <1 22>, -+ <1 23>, -+ <1 24>, -+ <1 25>, -+ <1 26>, -+ <1 27>, -+ <1 28>; -+ -+ #dma-cells = <1>; -+ brcm,dma-channel-mask = <0x7f35>; -+ }; -+ -+ intc: interrupt-controller@7e00b200 { -+ compatible = "brcm,bcm2835-armctrl-ic"; -+ reg = <0x7e00b200 0x200>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ watchdog@7e100000 { -+ compatible = "brcm,bcm2835-pm-wdt"; -+ reg = <0x7e100000 0x28>; -+ }; -+ -+ clocks: cprman@7e101000 { -+ compatible = "brcm,bcm2835-cprman"; -+ #clock-cells = <1>; -+ reg = <0x7e101000 0x2000>; -+ -+ /* CPRMAN derives everything from the platform's -+ * oscillator. -+ */ -+ clocks = <&clk_osc>; -+ }; -+ -+ rng@7e104000 { -+ compatible = "brcm,bcm2835-rng"; -+ reg = <0x7e104000 0x10>; -+ }; -+ -+ mailbox: mailbox@7e00b800 { -+ compatible = "brcm,bcm2835-mbox"; -+ reg = <0x7e00b880 0x40>; -+ interrupts = <0 1>; -+ #mbox-cells = <0>; -+ }; -+ -+ gpio: gpio@7e200000 { -+ compatible = "brcm,bcm2835-gpio"; -+ reg = <0x7e200000 0xb4>; -+ /* -+ * The GPIO IP block is designed for 3 banks of GPIOs. -+ * Each bank has a GPIO interrupt for itself. -+ * There is an overall "any bank" interrupt. -+ * In order, these are GIC interrupts 17, 18, 19, 20. -+ * Since the BCM2835 only has 2 banks, the 2nd bank -+ * interrupt output appears to be mirrored onto the -+ * 3rd bank's interrupt signal. -+ * So, a bank0 interrupt shows up on 17, 20, and -+ * a bank1 interrupt shows up on 18, 19, 20! -+ */ -+ interrupts = <2 17>, <2 18>, <2 19>, <2 20>; -+ -+ gpio-controller; -+ #gpio-cells = <2>; -+ -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ uart0: uart@7e201000 { -+ compatible = "brcm,bcm2835-pl011", "arm,pl011", "arm,primecell"; -+ reg = <0x7e201000 0x1000>; -+ interrupts = <2 25>; -+ clocks = <&clocks BCM2835_CLOCK_UART>, -+ <&clocks BCM2835_CLOCK_VPU>; -+ clock-names = "uartclk", "apb_pclk"; -+ arm,primecell-periphid = <0x00241011>; -+ }; -+ -+ i2s: i2s@7e203000 { -+ compatible = "brcm,bcm2835-i2s"; -+ reg = <0x7e203000 0x20>, -+ <0x7e101098 0x02>; -+ -+ dmas = <&dma 2>, -+ <&dma 3>; -+ dma-names = "tx", "rx"; -+ status = "disabled"; -+ }; -+ -+ spi: spi@7e204000 { -+ compatible = "brcm,bcm2835-spi"; -+ reg = <0x7e204000 0x1000>; -+ interrupts = <2 22>; -+ clocks = <&clocks BCM2835_CLOCK_VPU>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ status = "disabled"; -+ }; -+ -+ i2c0: i2c@7e205000 { -+ compatible = "brcm,bcm2835-i2c"; -+ reg = <0x7e205000 0x1000>; -+ interrupts = <2 21>; -+ clocks = <&clocks BCM2835_CLOCK_VPU>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ status = "disabled"; -+ }; -+ -+ sdhci: sdhci@7e300000 { -+ compatible = "brcm,bcm2835-sdhci"; -+ reg = <0x7e300000 0x100>; -+ interrupts = <2 30>; -+ clocks = <&clocks BCM2835_CLOCK_EMMC>; -+ status = "disabled"; -+ }; -+ -+ i2c1: i2c@7e804000 { -+ compatible = "brcm,bcm2835-i2c"; -+ reg = <0x7e804000 0x1000>; -+ interrupts = <2 21>; -+ clocks = <&clocks BCM2835_CLOCK_VPU>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ status = "disabled"; -+ }; -+ -+ i2c2: i2c@7e805000 { -+ compatible = "brcm,bcm2835-i2c"; -+ reg = <0x7e805000 0x1000>; -+ interrupts = <2 21>; -+ clocks = <&clocks BCM2835_CLOCK_VPU>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ status = "disabled"; -+ }; -+ -+ usb@7e980000 { -+ compatible = "brcm,bcm2835-usb"; -+ reg = <0x7e980000 0x10000>; -+ interrupts = <1 9>; -+ }; -+ }; -+ -+ clocks { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ /* The oscillator is the root of the clock tree. */ -+ clk_osc: clock@3 { -+ compatible = "fixed-clock"; -+ reg = <3>; -+ #clock-cells = <0>; -+ clock-output-names = "osc"; -+ clock-frequency = <19200000>; -+ }; -+ -+ }; -+}; diff --git a/debian/patches/features/arm/rpi/drm-create-a-driver-hook-for-allocating-gem-object-s.patch b/debian/patches/features/arm/rpi/drm-create-a-driver-hook-for-allocating-gem-object-s.patch deleted file mode 100644 index 5247f3a00..000000000 --- a/debian/patches/features/arm/rpi/drm-create-a-driver-hook-for-allocating-gem-object-s.patch +++ /dev/null @@ -1,58 +0,0 @@ -From: Eric Anholt -Date: Mon, 30 Nov 2015 10:55:13 -0800 -Subject: drm: Create a driver hook for allocating GEM object structs. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=10028c5ab107d3765c7fc282b6c45324d1602155 - -The CMA helpers had no way for a driver to extend the struct with its -own fields. Since the CMA helpers are mostly "Allocate a -drm_gem_cma_object, then fill in a few fields", it's hard to write as -pure helpers without passing in a driver callback for the allocate -step. - -Signed-off-by: Eric Anholt -Reviewed-by: Daniel Vetter ---- - drivers/gpu/drm/drm_gem_cma_helper.c | 10 ++++++---- - include/drm/drmP.h | 7 +++++++ - 2 files changed, 13 insertions(+), 4 deletions(-) - -diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c -index e109b49..0f7b00ba 100644 ---- a/drivers/gpu/drm/drm_gem_cma_helper.c -+++ b/drivers/gpu/drm/drm_gem_cma_helper.c -@@ -59,11 +59,13 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size) - struct drm_gem_object *gem_obj; - int ret; - -- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); -- if (!cma_obj) -+ if (drm->driver->gem_create_object) -+ gem_obj = drm->driver->gem_create_object(drm, size); -+ else -+ gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); -+ if (!gem_obj) - return ERR_PTR(-ENOMEM); -- -- gem_obj = &cma_obj->base; -+ cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); - - ret = drm_gem_object_init(drm, gem_obj, size); - if (ret) -diff --git a/include/drm/drmP.h b/include/drm/drmP.h -index 0b921ae..22ff162 100644 ---- a/include/drm/drmP.h -+++ b/include/drm/drmP.h -@@ -580,6 +580,13 @@ struct drm_driver { - int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); - void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); - -+ /** -+ * Hook for allocating the GEM object struct, for use by core -+ * helpers. -+ */ -+ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, -+ size_t size); -+ - /* prime: */ - /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ - int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, diff --git a/debian/patches/features/arm/rpi/drm-vc4-add-a-bo-cache.patch b/debian/patches/features/arm/rpi/drm-vc4-add-a-bo-cache.patch deleted file mode 100644 index 919675460..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-add-a-bo-cache.patch +++ /dev/null @@ -1,509 +0,0 @@ -From: Eric Anholt -Date: Fri, 9 Oct 2015 20:25:07 -0700 -Subject: [01/16] drm/vc4: Add a BO cache. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=c826a6e1064419f78855463cf29ce9e8b9d25bf4 - -We need to allocate new BOs in the kernel as part of each frame, but -the CMA allocator is way too slow for that. As an optimization, keep -track of recently-freed BOs and reuse them, with a 1 second timeout to -fully free them back to the system. - -This improves 3D performance by about 15%. - -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/vc4/vc4_bo.c | 336 +++++++++++++++++++++++++++++++++++++- - drivers/gpu/drm/vc4/vc4_debugfs.c | 1 + - drivers/gpu/drm/vc4/vc4_drv.c | 6 +- - drivers/gpu/drm/vc4/vc4_drv.h | 49 +++++- - 4 files changed, 384 insertions(+), 8 deletions(-) - -diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c -index ab9f510..18faa5b 100644 ---- a/drivers/gpu/drm/vc4/vc4_bo.c -+++ b/drivers/gpu/drm/vc4/vc4_bo.c -@@ -12,19 +12,229 @@ - * access to system memory with no MMU in between. To support it, we - * use the GEM CMA helper functions to allocate contiguous ranges of - * physical memory for our BOs. -+ * -+ * Since the CMA allocator is very slow, we keep a cache of recently -+ * freed BOs around so that the kernel's allocation of objects for 3D -+ * rendering can return quickly. - */ - - #include "vc4_drv.h" - --struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size) -+static void vc4_bo_stats_dump(struct vc4_dev *vc4) -+{ -+ DRM_INFO("num bos allocated: %d\n", -+ vc4->bo_stats.num_allocated); -+ DRM_INFO("size bos allocated: %dkb\n", -+ vc4->bo_stats.size_allocated / 1024); -+ DRM_INFO("num bos used: %d\n", -+ vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached); -+ DRM_INFO("size bos used: %dkb\n", -+ (vc4->bo_stats.size_allocated - -+ vc4->bo_stats.size_cached) / 1024); -+ DRM_INFO("num bos cached: %d\n", -+ vc4->bo_stats.num_cached); -+ DRM_INFO("size bos cached: %dkb\n", -+ vc4->bo_stats.size_cached / 1024); -+} -+ -+#ifdef CONFIG_DEBUG_FS -+int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) -+{ -+ struct drm_info_node *node = (struct drm_info_node *)m->private; -+ struct drm_device *dev = node->minor->dev; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct vc4_bo_stats stats; -+ -+ /* Take a snapshot of the current stats with the lock held. */ -+ mutex_lock(&vc4->bo_lock); -+ stats = vc4->bo_stats; -+ mutex_unlock(&vc4->bo_lock); -+ -+ seq_printf(m, "num bos allocated: %d\n", -+ stats.num_allocated); -+ seq_printf(m, "size bos allocated: %dkb\n", -+ stats.size_allocated / 1024); -+ seq_printf(m, "num bos used: %d\n", -+ stats.num_allocated - stats.num_cached); -+ seq_printf(m, "size bos used: %dkb\n", -+ (stats.size_allocated - stats.size_cached) / 1024); -+ seq_printf(m, "num bos cached: %d\n", -+ stats.num_cached); -+ seq_printf(m, "size bos cached: %dkb\n", -+ stats.size_cached / 1024); -+ -+ return 0; -+} -+#endif -+ -+static uint32_t bo_page_index(size_t size) -+{ -+ return (size / PAGE_SIZE) - 1; -+} -+ -+/* Must be called with bo_lock held. */ -+static void vc4_bo_destroy(struct vc4_bo *bo) - { -+ struct drm_gem_object *obj = &bo->base.base; -+ struct vc4_dev *vc4 = to_vc4_dev(obj->dev); -+ -+ vc4->bo_stats.num_allocated--; -+ vc4->bo_stats.size_allocated -= obj->size; -+ drm_gem_cma_free_object(obj); -+} -+ -+/* Must be called with bo_lock held. */ -+static void vc4_bo_remove_from_cache(struct vc4_bo *bo) -+{ -+ struct drm_gem_object *obj = &bo->base.base; -+ struct vc4_dev *vc4 = to_vc4_dev(obj->dev); -+ -+ vc4->bo_stats.num_cached--; -+ vc4->bo_stats.size_cached -= obj->size; -+ -+ list_del(&bo->unref_head); -+ list_del(&bo->size_head); -+} -+ -+static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev, -+ size_t size) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ uint32_t page_index = bo_page_index(size); -+ -+ if (vc4->bo_cache.size_list_size <= page_index) { -+ uint32_t new_size = max(vc4->bo_cache.size_list_size * 2, -+ page_index + 1); -+ struct list_head *new_list; -+ uint32_t i; -+ -+ new_list = kmalloc_array(new_size, sizeof(struct list_head), -+ GFP_KERNEL); -+ if (!new_list) -+ return NULL; -+ -+ /* Rebase the old cached BO lists to their new list -+ * head locations. -+ */ -+ for (i = 0; i < vc4->bo_cache.size_list_size; i++) { -+ struct list_head *old_list = -+ &vc4->bo_cache.size_list[i]; -+ -+ if (list_empty(old_list)) -+ INIT_LIST_HEAD(&new_list[i]); -+ else -+ list_replace(old_list, &new_list[i]); -+ } -+ /* And initialize the brand new BO list heads. */ -+ for (i = vc4->bo_cache.size_list_size; i < new_size; i++) -+ INIT_LIST_HEAD(&new_list[i]); -+ -+ kfree(vc4->bo_cache.size_list); -+ vc4->bo_cache.size_list = new_list; -+ vc4->bo_cache.size_list_size = new_size; -+ } -+ -+ return &vc4->bo_cache.size_list[page_index]; -+} -+ -+void vc4_bo_cache_purge(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ mutex_lock(&vc4->bo_lock); -+ while (!list_empty(&vc4->bo_cache.time_list)) { -+ struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, -+ struct vc4_bo, unref_head); -+ vc4_bo_remove_from_cache(bo); -+ vc4_bo_destroy(bo); -+ } -+ mutex_unlock(&vc4->bo_lock); -+} -+ -+static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, -+ uint32_t size) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ uint32_t page_index = bo_page_index(size); -+ struct vc4_bo *bo = NULL; -+ -+ size = roundup(size, PAGE_SIZE); -+ -+ mutex_lock(&vc4->bo_lock); -+ if (page_index >= vc4->bo_cache.size_list_size) -+ goto out; -+ -+ if (list_empty(&vc4->bo_cache.size_list[page_index])) -+ goto out; -+ -+ bo = list_first_entry(&vc4->bo_cache.size_list[page_index], -+ struct vc4_bo, size_head); -+ vc4_bo_remove_from_cache(bo); -+ kref_init(&bo->base.base.refcount); -+ -+out: -+ mutex_unlock(&vc4->bo_lock); -+ return bo; -+} -+ -+/** -+ * vc4_gem_create_object - Implementation of driver->gem_create_object. -+ * -+ * This lets the CMA helpers allocate object structs for us, and keep -+ * our BO stats correct. -+ */ -+struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct vc4_bo *bo; -+ -+ bo = kzalloc(sizeof(*bo), GFP_KERNEL); -+ if (!bo) -+ return ERR_PTR(-ENOMEM); -+ -+ mutex_lock(&vc4->bo_lock); -+ vc4->bo_stats.num_allocated++; -+ vc4->bo_stats.size_allocated += size; -+ mutex_unlock(&vc4->bo_lock); -+ -+ return &bo->base.base; -+} -+ -+struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, -+ bool from_cache) -+{ -+ size_t size = roundup(unaligned_size, PAGE_SIZE); -+ struct vc4_dev *vc4 = to_vc4_dev(dev); - struct drm_gem_cma_object *cma_obj; - -- cma_obj = drm_gem_cma_create(dev, size); -- if (IS_ERR(cma_obj)) -+ if (size == 0) - return NULL; -- else -- return to_vc4_bo(&cma_obj->base); -+ -+ /* First, try to get a vc4_bo from the kernel BO cache. */ -+ if (from_cache) { -+ struct vc4_bo *bo = vc4_bo_get_from_cache(dev, size); -+ -+ if (bo) -+ return bo; -+ } -+ -+ cma_obj = drm_gem_cma_create(dev, size); -+ if (IS_ERR(cma_obj)) { -+ /* -+ * If we've run out of CMA memory, kill the cache of -+ * CMA allocations we've got laying around and try again. -+ */ -+ vc4_bo_cache_purge(dev); -+ -+ cma_obj = drm_gem_cma_create(dev, size); -+ if (IS_ERR(cma_obj)) { -+ DRM_ERROR("Failed to allocate from CMA:\n"); -+ vc4_bo_stats_dump(vc4); -+ return NULL; -+ } -+ } -+ -+ return to_vc4_bo(&cma_obj->base); - } - - int vc4_dumb_create(struct drm_file *file_priv, -@@ -41,7 +251,7 @@ int vc4_dumb_create(struct drm_file *file_priv, - if (args->size < args->pitch * args->height) - args->size = args->pitch * args->height; - -- bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE)); -+ bo = vc4_bo_create(dev, args->size, false); - if (!bo) - return -ENOMEM; - -@@ -50,3 +260,117 @@ int vc4_dumb_create(struct drm_file *file_priv, - - return ret; - } -+ -+/* Must be called with bo_lock held. */ -+static void vc4_bo_cache_free_old(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ unsigned long expire_time = jiffies - msecs_to_jiffies(1000); -+ -+ while (!list_empty(&vc4->bo_cache.time_list)) { -+ struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, -+ struct vc4_bo, unref_head); -+ if (time_before(expire_time, bo->free_time)) { -+ mod_timer(&vc4->bo_cache.time_timer, -+ round_jiffies_up(jiffies + -+ msecs_to_jiffies(1000))); -+ return; -+ } -+ -+ vc4_bo_remove_from_cache(bo); -+ vc4_bo_destroy(bo); -+ } -+} -+ -+/* Called on the last userspace/kernel unreference of the BO. Returns -+ * it to the BO cache if possible, otherwise frees it. -+ * -+ * Note that this is called with the struct_mutex held. -+ */ -+void vc4_free_object(struct drm_gem_object *gem_bo) -+{ -+ struct drm_device *dev = gem_bo->dev; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct vc4_bo *bo = to_vc4_bo(gem_bo); -+ struct list_head *cache_list; -+ -+ mutex_lock(&vc4->bo_lock); -+ /* If the object references someone else's memory, we can't cache it. -+ */ -+ if (gem_bo->import_attach) { -+ vc4_bo_destroy(bo); -+ goto out; -+ } -+ -+ /* Don't cache if it was publicly named. */ -+ if (gem_bo->name) { -+ vc4_bo_destroy(bo); -+ goto out; -+ } -+ -+ cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size); -+ if (!cache_list) { -+ vc4_bo_destroy(bo); -+ goto out; -+ } -+ -+ bo->free_time = jiffies; -+ list_add(&bo->size_head, cache_list); -+ list_add(&bo->unref_head, &vc4->bo_cache.time_list); -+ -+ vc4->bo_stats.num_cached++; -+ vc4->bo_stats.size_cached += gem_bo->size; -+ -+ vc4_bo_cache_free_old(dev); -+ -+out: -+ mutex_unlock(&vc4->bo_lock); -+} -+ -+static void vc4_bo_cache_time_work(struct work_struct *work) -+{ -+ struct vc4_dev *vc4 = -+ container_of(work, struct vc4_dev, bo_cache.time_work); -+ struct drm_device *dev = vc4->dev; -+ -+ mutex_lock(&vc4->bo_lock); -+ vc4_bo_cache_free_old(dev); -+ mutex_unlock(&vc4->bo_lock); -+} -+ -+static void vc4_bo_cache_time_timer(unsigned long data) -+{ -+ struct drm_device *dev = (struct drm_device *)data; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ schedule_work(&vc4->bo_cache.time_work); -+} -+ -+void vc4_bo_cache_init(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ mutex_init(&vc4->bo_lock); -+ -+ INIT_LIST_HEAD(&vc4->bo_cache.time_list); -+ -+ INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); -+ setup_timer(&vc4->bo_cache.time_timer, -+ vc4_bo_cache_time_timer, -+ (unsigned long)dev); -+} -+ -+void vc4_bo_cache_destroy(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ del_timer(&vc4->bo_cache.time_timer); -+ cancel_work_sync(&vc4->bo_cache.time_work); -+ -+ vc4_bo_cache_purge(dev); -+ -+ if (vc4->bo_stats.num_allocated) { -+ DRM_ERROR("Destroying BO cache while BOs still allocated:\n"); -+ vc4_bo_stats_dump(vc4); -+ } -+} -diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c -index 4297b0a5..6bcf96e 100644 ---- a/drivers/gpu/drm/vc4/vc4_debugfs.c -+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c -@@ -16,6 +16,7 @@ - #include "vc4_regs.h" - - static const struct drm_info_list vc4_debugfs_list[] = { -+ {"bo_stats", vc4_bo_stats_debugfs, 0}, - {"hdmi_regs", vc4_hdmi_debugfs_regs, 0}, - {"hvs_regs", vc4_hvs_debugfs_regs, 0}, - {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, -diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c -index 6e73060..da041fa 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.c -+++ b/drivers/gpu/drm/vc4/vc4_drv.c -@@ -92,7 +92,8 @@ static struct drm_driver vc4_drm_driver = { - .debugfs_cleanup = vc4_debugfs_cleanup, - #endif - -- .gem_free_object = drm_gem_cma_free_object, -+ .gem_create_object = vc4_create_object, -+ .gem_free_object = vc4_free_object, - .gem_vm_ops = &drm_gem_cma_vm_ops, - - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, -@@ -170,6 +171,8 @@ static int vc4_drm_bind(struct device *dev) - - drm_dev_set_unique(drm, dev_name(dev)); - -+ vc4_bo_cache_init(drm); -+ - drm_mode_config_init(drm); - if (ret) - goto unref; -@@ -202,6 +205,7 @@ unbind_all: - component_unbind_all(dev, drm); - unref: - drm_dev_unref(drm); -+ vc4_bo_cache_destroy(drm); - return ret; - } - -diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h -index fd8319f..39a1ff5 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.h -+++ b/drivers/gpu/drm/vc4/vc4_drv.h -@@ -17,6 +17,37 @@ struct vc4_dev { - struct vc4_crtc *crtc[3]; - - struct drm_fbdev_cma *fbdev; -+ -+ /* The kernel-space BO cache. Tracks buffers that have been -+ * unreferenced by all other users (refcounts of 0!) but not -+ * yet freed, so we can do cheap allocations. -+ */ -+ struct vc4_bo_cache { -+ /* Array of list heads for entries in the BO cache, -+ * based on number of pages, so we can do O(1) lookups -+ * in the cache when allocating. -+ */ -+ struct list_head *size_list; -+ uint32_t size_list_size; -+ -+ /* List of all BOs in the cache, ordered by age, so we -+ * can do O(1) lookups when trying to free old -+ * buffers. -+ */ -+ struct list_head time_list; -+ struct work_struct time_work; -+ struct timer_list time_timer; -+ } bo_cache; -+ -+ struct vc4_bo_stats { -+ u32 num_allocated; -+ u32 size_allocated; -+ u32 num_cached; -+ u32 size_cached; -+ } bo_stats; -+ -+ /* Protects bo_cache and the BO stats. */ -+ struct mutex bo_lock; - }; - - static inline struct vc4_dev * -@@ -27,6 +58,17 @@ to_vc4_dev(struct drm_device *dev) - - struct vc4_bo { - struct drm_gem_cma_object base; -+ -+ /* List entry for the BO's position in either -+ * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list -+ */ -+ struct list_head unref_head; -+ -+ /* Time in jiffies when the BO was put in vc4->bo_cache. */ -+ unsigned long free_time; -+ -+ /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ -+ struct list_head size_head; - }; - - static inline struct vc4_bo * -@@ -104,13 +146,18 @@ to_vc4_encoder(struct drm_encoder *encoder) - #define wait_for(COND, MS) _wait_for(COND, MS, 1) - - /* vc4_bo.c */ -+struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); - void vc4_free_object(struct drm_gem_object *gem_obj); --struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size); -+struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, -+ bool from_cache); - int vc4_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); - struct dma_buf *vc4_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags); -+void vc4_bo_cache_init(struct drm_device *dev); -+void vc4_bo_cache_destroy(struct drm_device *dev); -+int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); - - /* vc4_crtc.c */ - extern struct platform_driver vc4_crtc_driver; diff --git a/debian/patches/features/arm/rpi/drm-vc4-add-an-api-for-creating-gpu-shaders-in-gem-b.patch b/debian/patches/features/arm/rpi/drm-vc4-add-an-api-for-creating-gpu-shaders-in-gem-b.patch deleted file mode 100644 index 9174c4d86..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-add-an-api-for-creating-gpu-shaders-in-gem-b.patch +++ /dev/null @@ -1,1165 +0,0 @@ -From: Eric Anholt -Date: Mon, 30 Nov 2015 11:41:40 -0800 -Subject: [03/16] drm/vc4: Add an API for creating GPU shaders in GEM BOs. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=463873d5701427f2964a0b4b72c45f1f14b6df87 - -Since we have no MMU, the kernel needs to validate that the submitted -shader code won't make any accesses to memory that the user doesn't -control, which involves banning some operations (general purpose DMA -writes), and tracking where we need to write out pointers for other -operations (texture sampling). Once it's validated, we return a GEM -BO containing the shader, which doesn't allow mapping for write or -exporting to other subsystems. - -v2: Use __u32-style types. - -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/vc4/Makefile | 3 +- - drivers/gpu/drm/vc4/vc4_bo.c | 140 ++++++++ - drivers/gpu/drm/vc4/vc4_drv.c | 9 +- - drivers/gpu/drm/vc4/vc4_drv.h | 50 +++ - drivers/gpu/drm/vc4/vc4_qpu_defines.h | 264 +++++++++++++++ - drivers/gpu/drm/vc4/vc4_validate_shaders.c | 513 +++++++++++++++++++++++++++++ - include/uapi/drm/vc4_drm.h | 25 ++ - 7 files changed, 999 insertions(+), 5 deletions(-) - create mode 100644 drivers/gpu/drm/vc4/vc4_qpu_defines.h - create mode 100644 drivers/gpu/drm/vc4/vc4_validate_shaders.c - -diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile -index 32b4f9c..eb776a6 100644 ---- a/drivers/gpu/drm/vc4/Makefile -+++ b/drivers/gpu/drm/vc4/Makefile -@@ -10,7 +10,8 @@ vc4-y := \ - vc4_kms.o \ - vc4_hdmi.o \ - vc4_hvs.o \ -- vc4_plane.o -+ vc4_plane.o \ -+ vc4_validate_shaders.o - - vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o - -diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c -index 06cba26..18dfe3e 100644 ---- a/drivers/gpu/drm/vc4/vc4_bo.c -+++ b/drivers/gpu/drm/vc4/vc4_bo.c -@@ -79,6 +79,12 @@ static void vc4_bo_destroy(struct vc4_bo *bo) - struct drm_gem_object *obj = &bo->base.base; - struct vc4_dev *vc4 = to_vc4_dev(obj->dev); - -+ if (bo->validated_shader) { -+ kfree(bo->validated_shader->texture_samples); -+ kfree(bo->validated_shader); -+ bo->validated_shader = NULL; -+ } -+ - vc4->bo_stats.num_allocated--; - vc4->bo_stats.size_allocated -= obj->size; - drm_gem_cma_free_object(obj); -@@ -315,6 +321,12 @@ void vc4_free_object(struct drm_gem_object *gem_bo) - goto out; - } - -+ if (bo->validated_shader) { -+ kfree(bo->validated_shader->texture_samples); -+ kfree(bo->validated_shader); -+ bo->validated_shader = NULL; -+ } -+ - bo->free_time = jiffies; - list_add(&bo->size_head, cache_list); - list_add(&bo->unref_head, &vc4->bo_cache.time_list); -@@ -347,6 +359,78 @@ static void vc4_bo_cache_time_timer(unsigned long data) - schedule_work(&vc4->bo_cache.time_work); - } - -+struct dma_buf * -+vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) -+{ -+ struct vc4_bo *bo = to_vc4_bo(obj); -+ -+ if (bo->validated_shader) { -+ DRM_ERROR("Attempting to export shader BO\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ return drm_gem_prime_export(dev, obj, flags); -+} -+ -+int vc4_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+ struct drm_gem_object *gem_obj; -+ struct vc4_bo *bo; -+ int ret; -+ -+ ret = drm_gem_mmap(filp, vma); -+ if (ret) -+ return ret; -+ -+ gem_obj = vma->vm_private_data; -+ bo = to_vc4_bo(gem_obj); -+ -+ if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { -+ DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); -+ return -EINVAL; -+ } -+ -+ /* -+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the -+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map -+ * the whole buffer. -+ */ -+ vma->vm_flags &= ~VM_PFNMAP; -+ vma->vm_pgoff = 0; -+ -+ ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma, -+ bo->base.vaddr, bo->base.paddr, -+ vma->vm_end - vma->vm_start); -+ if (ret) -+ drm_gem_vm_close(vma); -+ -+ return ret; -+} -+ -+int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) -+{ -+ struct vc4_bo *bo = to_vc4_bo(obj); -+ -+ if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { -+ DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); -+ return -EINVAL; -+ } -+ -+ return drm_gem_cma_prime_mmap(obj, vma); -+} -+ -+void *vc4_prime_vmap(struct drm_gem_object *obj) -+{ -+ struct vc4_bo *bo = to_vc4_bo(obj); -+ -+ if (bo->validated_shader) { -+ DRM_ERROR("mmaping of shader BOs not allowed.\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ return drm_gem_cma_prime_vmap(obj); -+} -+ - int vc4_create_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) - { -@@ -387,6 +471,62 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, - return 0; - } - -+int -+vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_vc4_create_shader_bo *args = data; -+ struct vc4_bo *bo = NULL; -+ int ret; -+ -+ if (args->size == 0) -+ return -EINVAL; -+ -+ if (args->size % sizeof(u64) != 0) -+ return -EINVAL; -+ -+ if (args->flags != 0) { -+ DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); -+ return -EINVAL; -+ } -+ -+ if (args->pad != 0) { -+ DRM_INFO("Pad set: 0x%08x\n", args->pad); -+ return -EINVAL; -+ } -+ -+ bo = vc4_bo_create(dev, args->size, true); -+ if (!bo) -+ return -ENOMEM; -+ -+ ret = copy_from_user(bo->base.vaddr, -+ (void __user *)(uintptr_t)args->data, -+ args->size); -+ if (ret != 0) -+ goto fail; -+ /* Clear the rest of the memory from allocating from the BO -+ * cache. -+ */ -+ memset(bo->base.vaddr + args->size, 0, -+ bo->base.base.size - args->size); -+ -+ bo->validated_shader = vc4_validate_shader(&bo->base); -+ if (!bo->validated_shader) { -+ ret = -EINVAL; -+ goto fail; -+ } -+ -+ /* We have to create the handle after validation, to avoid -+ * races for users to do doing things like mmap the shader BO. -+ */ -+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); -+ -+ fail: -+ drm_gem_object_unreference_unlocked(&bo->base.base); -+ -+ return ret; -+} -+ - void vc4_bo_cache_init(struct drm_device *dev) - { - struct vc4_dev *vc4 = to_vc4_dev(dev); -diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c -index 5fa4688..da4be9c8 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.c -+++ b/drivers/gpu/drm/vc4/vc4_drv.c -@@ -64,7 +64,7 @@ static const struct file_operations vc4_drm_fops = { - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, -- .mmap = drm_gem_cma_mmap, -+ .mmap = vc4_mmap, - .poll = drm_poll, - .read = drm_read, - #ifdef CONFIG_COMPAT -@@ -76,6 +76,7 @@ static const struct file_operations vc4_drm_fops = { - static const struct drm_ioctl_desc vc4_drm_ioctls[] = { - DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), -+ DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), - }; - - static struct drm_driver vc4_drm_driver = { -@@ -102,12 +103,12 @@ static struct drm_driver vc4_drm_driver = { - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import = drm_gem_prime_import, -- .gem_prime_export = drm_gem_prime_export, -+ .gem_prime_export = vc4_prime_export, - .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, - .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, -- .gem_prime_vmap = drm_gem_cma_prime_vmap, -+ .gem_prime_vmap = vc4_prime_vmap, - .gem_prime_vunmap = drm_gem_cma_prime_vunmap, -- .gem_prime_mmap = drm_gem_cma_prime_mmap, -+ .gem_prime_mmap = vc4_prime_mmap, - - .dumb_create = vc4_dumb_create, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, -diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h -index fddb0a0..bd77d55 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.h -+++ b/drivers/gpu/drm/vc4/vc4_drv.h -@@ -69,6 +69,11 @@ struct vc4_bo { - - /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ - struct list_head size_head; -+ -+ /* Struct for shader validation state, if created by -+ * DRM_IOCTL_VC4_CREATE_SHADER_BO. -+ */ -+ struct vc4_validated_shader_info *validated_shader; - }; - - static inline struct vc4_bo * -@@ -118,6 +123,42 @@ to_vc4_encoder(struct drm_encoder *encoder) - #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) - - /** -+ * struct vc4_texture_sample_info - saves the offsets into the UBO for texture -+ * setup parameters. -+ * -+ * This will be used at draw time to relocate the reference to the texture -+ * contents in p0, and validate that the offset combined with -+ * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. -+ * Note that the hardware treats unprovided config parameters as 0, so not all -+ * of them need to be set up for every texure sample, and we'll store ~0 as -+ * the offset to mark the unused ones. -+ * -+ * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit -+ * Setup") for definitions of the texture parameters. -+ */ -+struct vc4_texture_sample_info { -+ bool is_direct; -+ uint32_t p_offset[4]; -+}; -+ -+/** -+ * struct vc4_validated_shader_info - information about validated shaders that -+ * needs to be used from command list validation. -+ * -+ * For a given shader, each time a shader state record references it, we need -+ * to verify that the shader doesn't read more uniforms than the shader state -+ * record's uniform BO pointer can provide, and we need to apply relocations -+ * and validate the shader state record's uniforms that define the texture -+ * samples. -+ */ -+struct vc4_validated_shader_info { -+ uint32_t uniforms_size; -+ uint32_t uniforms_src_size; -+ uint32_t num_texture_samples; -+ struct vc4_texture_sample_info *texture_samples; -+}; -+ -+/** - * _wait_for - magic (register) wait macro - * - * Does the right thing for modeset paths when run under kdgb or similar atomic -@@ -157,8 +198,13 @@ struct dma_buf *vc4_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags); - int vc4_create_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -+int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); - int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -+int vc4_mmap(struct file *filp, struct vm_area_struct *vma); -+int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -+void *vc4_prime_vmap(struct drm_gem_object *obj); - void vc4_bo_cache_init(struct drm_device *dev); - void vc4_bo_cache_destroy(struct drm_device *dev); - int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); -@@ -194,3 +240,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, - enum drm_plane_type type); - u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); - u32 vc4_plane_dlist_size(struct drm_plane_state *state); -+ -+/* vc4_validate_shader.c */ -+struct vc4_validated_shader_info * -+vc4_validate_shader(struct drm_gem_cma_object *shader_obj); -diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h -new file mode 100644 -index 0000000..d5c2f3c ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h -@@ -0,0 +1,264 @@ -+/* -+ * Copyright © 2014 Broadcom -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#ifndef VC4_QPU_DEFINES_H -+#define VC4_QPU_DEFINES_H -+ -+enum qpu_op_add { -+ QPU_A_NOP, -+ QPU_A_FADD, -+ QPU_A_FSUB, -+ QPU_A_FMIN, -+ QPU_A_FMAX, -+ QPU_A_FMINABS, -+ QPU_A_FMAXABS, -+ QPU_A_FTOI, -+ QPU_A_ITOF, -+ QPU_A_ADD = 12, -+ QPU_A_SUB, -+ QPU_A_SHR, -+ QPU_A_ASR, -+ QPU_A_ROR, -+ QPU_A_SHL, -+ QPU_A_MIN, -+ QPU_A_MAX, -+ QPU_A_AND, -+ QPU_A_OR, -+ QPU_A_XOR, -+ QPU_A_NOT, -+ QPU_A_CLZ, -+ QPU_A_V8ADDS = 30, -+ QPU_A_V8SUBS = 31, -+}; -+ -+enum qpu_op_mul { -+ QPU_M_NOP, -+ QPU_M_FMUL, -+ QPU_M_MUL24, -+ QPU_M_V8MULD, -+ QPU_M_V8MIN, -+ QPU_M_V8MAX, -+ QPU_M_V8ADDS, -+ QPU_M_V8SUBS, -+}; -+ -+enum qpu_raddr { -+ QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */ -+ /* 0-31 are the plain regfile a or b fields */ -+ QPU_R_UNIF = 32, -+ QPU_R_VARY = 35, -+ QPU_R_ELEM_QPU = 38, -+ QPU_R_NOP, -+ QPU_R_XY_PIXEL_COORD = 41, -+ QPU_R_MS_REV_FLAGS = 41, -+ QPU_R_VPM = 48, -+ QPU_R_VPM_LD_BUSY, -+ QPU_R_VPM_LD_WAIT, -+ QPU_R_MUTEX_ACQUIRE, -+}; -+ -+enum qpu_waddr { -+ /* 0-31 are the plain regfile a or b fields */ -+ QPU_W_ACC0 = 32, /* aka r0 */ -+ QPU_W_ACC1, -+ QPU_W_ACC2, -+ QPU_W_ACC3, -+ QPU_W_TMU_NOSWAP, -+ QPU_W_ACC5, -+ QPU_W_HOST_INT, -+ QPU_W_NOP, -+ QPU_W_UNIFORMS_ADDRESS, -+ QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */ -+ QPU_W_MS_FLAGS = 42, -+ QPU_W_REV_FLAG = 42, -+ QPU_W_TLB_STENCIL_SETUP = 43, -+ QPU_W_TLB_Z, -+ QPU_W_TLB_COLOR_MS, -+ QPU_W_TLB_COLOR_ALL, -+ QPU_W_TLB_ALPHA_MASK, -+ QPU_W_VPM, -+ QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */ -+ QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */ -+ QPU_W_MUTEX_RELEASE, -+ QPU_W_SFU_RECIP, -+ QPU_W_SFU_RECIPSQRT, -+ QPU_W_SFU_EXP, -+ QPU_W_SFU_LOG, -+ QPU_W_TMU0_S, -+ QPU_W_TMU0_T, -+ QPU_W_TMU0_R, -+ QPU_W_TMU0_B, -+ QPU_W_TMU1_S, -+ QPU_W_TMU1_T, -+ QPU_W_TMU1_R, -+ QPU_W_TMU1_B, -+}; -+ -+enum qpu_sig_bits { -+ QPU_SIG_SW_BREAKPOINT, -+ QPU_SIG_NONE, -+ QPU_SIG_THREAD_SWITCH, -+ QPU_SIG_PROG_END, -+ QPU_SIG_WAIT_FOR_SCOREBOARD, -+ QPU_SIG_SCOREBOARD_UNLOCK, -+ QPU_SIG_LAST_THREAD_SWITCH, -+ QPU_SIG_COVERAGE_LOAD, -+ QPU_SIG_COLOR_LOAD, -+ QPU_SIG_COLOR_LOAD_END, -+ QPU_SIG_LOAD_TMU0, -+ QPU_SIG_LOAD_TMU1, -+ QPU_SIG_ALPHA_MASK_LOAD, -+ QPU_SIG_SMALL_IMM, -+ QPU_SIG_LOAD_IMM, -+ QPU_SIG_BRANCH -+}; -+ -+enum qpu_mux { -+ /* hardware mux values */ -+ QPU_MUX_R0, -+ QPU_MUX_R1, -+ QPU_MUX_R2, -+ QPU_MUX_R3, -+ QPU_MUX_R4, -+ QPU_MUX_R5, -+ QPU_MUX_A, -+ QPU_MUX_B, -+ -+ /* non-hardware mux values */ -+ QPU_MUX_IMM, -+}; -+ -+enum qpu_cond { -+ QPU_COND_NEVER, -+ QPU_COND_ALWAYS, -+ QPU_COND_ZS, -+ QPU_COND_ZC, -+ QPU_COND_NS, -+ QPU_COND_NC, -+ QPU_COND_CS, -+ QPU_COND_CC, -+}; -+ -+enum qpu_pack_mul { -+ QPU_PACK_MUL_NOP, -+ /* replicated to each 8 bits of the 32-bit dst. */ -+ QPU_PACK_MUL_8888 = 3, -+ QPU_PACK_MUL_8A, -+ QPU_PACK_MUL_8B, -+ QPU_PACK_MUL_8C, -+ QPU_PACK_MUL_8D, -+}; -+ -+enum qpu_pack_a { -+ QPU_PACK_A_NOP, -+ /* convert to 16 bit float if float input, or to int16. */ -+ QPU_PACK_A_16A, -+ QPU_PACK_A_16B, -+ /* replicated to each 8 bits of the 32-bit dst. */ -+ QPU_PACK_A_8888, -+ /* Convert to 8-bit unsigned int. */ -+ QPU_PACK_A_8A, -+ QPU_PACK_A_8B, -+ QPU_PACK_A_8C, -+ QPU_PACK_A_8D, -+ -+ /* Saturating variants of the previous instructions. */ -+ QPU_PACK_A_32_SAT, /* int-only */ -+ QPU_PACK_A_16A_SAT, /* int or float */ -+ QPU_PACK_A_16B_SAT, -+ QPU_PACK_A_8888_SAT, -+ QPU_PACK_A_8A_SAT, -+ QPU_PACK_A_8B_SAT, -+ QPU_PACK_A_8C_SAT, -+ QPU_PACK_A_8D_SAT, -+}; -+ -+enum qpu_unpack_r4 { -+ QPU_UNPACK_R4_NOP, -+ QPU_UNPACK_R4_F16A_TO_F32, -+ QPU_UNPACK_R4_F16B_TO_F32, -+ QPU_UNPACK_R4_8D_REP, -+ QPU_UNPACK_R4_8A, -+ QPU_UNPACK_R4_8B, -+ QPU_UNPACK_R4_8C, -+ QPU_UNPACK_R4_8D, -+}; -+ -+#define QPU_MASK(high, low) \ -+ ((((uint64_t)1 << ((high) - (low) + 1)) - 1) << (low)) -+ -+#define QPU_GET_FIELD(word, field) \ -+ ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT)) -+ -+#define QPU_SIG_SHIFT 60 -+#define QPU_SIG_MASK QPU_MASK(63, 60) -+ -+#define QPU_UNPACK_SHIFT 57 -+#define QPU_UNPACK_MASK QPU_MASK(59, 57) -+ -+/** -+ * If set, the pack field means PACK_MUL or R4 packing, instead of normal -+ * regfile a packing. -+ */ -+#define QPU_PM ((uint64_t)1 << 56) -+ -+#define QPU_PACK_SHIFT 52 -+#define QPU_PACK_MASK QPU_MASK(55, 52) -+ -+#define QPU_COND_ADD_SHIFT 49 -+#define QPU_COND_ADD_MASK QPU_MASK(51, 49) -+#define QPU_COND_MUL_SHIFT 46 -+#define QPU_COND_MUL_MASK QPU_MASK(48, 46) -+ -+#define QPU_SF ((uint64_t)1 << 45) -+ -+#define QPU_WADDR_ADD_SHIFT 38 -+#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38) -+#define QPU_WADDR_MUL_SHIFT 32 -+#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32) -+ -+#define QPU_OP_MUL_SHIFT 29 -+#define QPU_OP_MUL_MASK QPU_MASK(31, 29) -+ -+#define QPU_RADDR_A_SHIFT 18 -+#define QPU_RADDR_A_MASK QPU_MASK(23, 18) -+#define QPU_RADDR_B_SHIFT 12 -+#define QPU_RADDR_B_MASK QPU_MASK(17, 12) -+#define QPU_SMALL_IMM_SHIFT 12 -+#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12) -+ -+#define QPU_ADD_A_SHIFT 9 -+#define QPU_ADD_A_MASK QPU_MASK(11, 9) -+#define QPU_ADD_B_SHIFT 6 -+#define QPU_ADD_B_MASK QPU_MASK(8, 6) -+#define QPU_MUL_A_SHIFT 3 -+#define QPU_MUL_A_MASK QPU_MASK(5, 3) -+#define QPU_MUL_B_SHIFT 0 -+#define QPU_MUL_B_MASK QPU_MASK(2, 0) -+ -+#define QPU_WS ((uint64_t)1 << 44) -+ -+#define QPU_OP_ADD_SHIFT 24 -+#define QPU_OP_ADD_MASK QPU_MASK(28, 24) -+ -+#endif /* VC4_QPU_DEFINES_H */ -diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c -new file mode 100644 -index 0000000..f67124b ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c -@@ -0,0 +1,513 @@ -+/* -+ * Copyright © 2014 Broadcom -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+/** -+ * DOC: Shader validator for VC4. -+ * -+ * The VC4 has no IOMMU between it and system memory, so a user with -+ * access to execute shaders could escalate privilege by overwriting -+ * system memory (using the VPM write address register in the -+ * general-purpose DMA mode) or reading system memory it shouldn't -+ * (reading it as a texture, or uniform data, or vertex data). -+ * -+ * This walks over a shader BO, ensuring that its accesses are -+ * appropriately bounded, and recording how many texture accesses are -+ * made and where so that we can do relocations for them in the -+ * uniform stream. -+ */ -+ -+#include "vc4_drv.h" -+#include "vc4_qpu_defines.h" -+ -+struct vc4_shader_validation_state { -+ struct vc4_texture_sample_info tmu_setup[2]; -+ int tmu_write_count[2]; -+ -+ /* For registers that were last written to by a MIN instruction with -+ * one argument being a uniform, the address of the uniform. -+ * Otherwise, ~0. -+ * -+ * This is used for the validation of direct address memory reads. -+ */ -+ uint32_t live_min_clamp_offsets[32 + 32 + 4]; -+ bool live_max_clamp_regs[32 + 32 + 4]; -+}; -+ -+static uint32_t -+waddr_to_live_reg_index(uint32_t waddr, bool is_b) -+{ -+ if (waddr < 32) { -+ if (is_b) -+ return 32 + waddr; -+ else -+ return waddr; -+ } else if (waddr <= QPU_W_ACC3) { -+ return 64 + waddr - QPU_W_ACC0; -+ } else { -+ return ~0; -+ } -+} -+ -+static uint32_t -+raddr_add_a_to_live_reg_index(uint64_t inst) -+{ -+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); -+ uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A); -+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); -+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); -+ -+ if (add_a == QPU_MUX_A) -+ return raddr_a; -+ else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM) -+ return 32 + raddr_b; -+ else if (add_a <= QPU_MUX_R3) -+ return 64 + add_a; -+ else -+ return ~0; -+} -+ -+static bool -+is_tmu_submit(uint32_t waddr) -+{ -+ return (waddr == QPU_W_TMU0_S || -+ waddr == QPU_W_TMU1_S); -+} -+ -+static bool -+is_tmu_write(uint32_t waddr) -+{ -+ return (waddr >= QPU_W_TMU0_S && -+ waddr <= QPU_W_TMU1_B); -+} -+ -+static bool -+record_texture_sample(struct vc4_validated_shader_info *validated_shader, -+ struct vc4_shader_validation_state *validation_state, -+ int tmu) -+{ -+ uint32_t s = validated_shader->num_texture_samples; -+ int i; -+ struct vc4_texture_sample_info *temp_samples; -+ -+ temp_samples = krealloc(validated_shader->texture_samples, -+ (s + 1) * sizeof(*temp_samples), -+ GFP_KERNEL); -+ if (!temp_samples) -+ return false; -+ -+ memcpy(&temp_samples[s], -+ &validation_state->tmu_setup[tmu], -+ sizeof(*temp_samples)); -+ -+ validated_shader->num_texture_samples = s + 1; -+ validated_shader->texture_samples = temp_samples; -+ -+ for (i = 0; i < 4; i++) -+ validation_state->tmu_setup[tmu].p_offset[i] = ~0; -+ -+ return true; -+} -+ -+static bool -+check_tmu_write(uint64_t inst, -+ struct vc4_validated_shader_info *validated_shader, -+ struct vc4_shader_validation_state *validation_state, -+ bool is_mul) -+{ -+ uint32_t waddr = (is_mul ? -+ QPU_GET_FIELD(inst, QPU_WADDR_MUL) : -+ QPU_GET_FIELD(inst, QPU_WADDR_ADD)); -+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); -+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); -+ int tmu = waddr > QPU_W_TMU0_B; -+ bool submit = is_tmu_submit(waddr); -+ bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0; -+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); -+ -+ if (is_direct) { -+ uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B); -+ uint32_t clamp_reg, clamp_offset; -+ -+ if (sig == QPU_SIG_SMALL_IMM) { -+ DRM_ERROR("direct TMU read used small immediate\n"); -+ return false; -+ } -+ -+ /* Make sure that this texture load is an add of the base -+ * address of the UBO to a clamped offset within the UBO. -+ */ -+ if (is_mul || -+ QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { -+ DRM_ERROR("direct TMU load wasn't an add\n"); -+ return false; -+ } -+ -+ /* We assert that the the clamped address is the first -+ * argument, and the UBO base address is the second argument. -+ * This is arbitrary, but simpler than supporting flipping the -+ * two either way. -+ */ -+ clamp_reg = raddr_add_a_to_live_reg_index(inst); -+ if (clamp_reg == ~0) { -+ DRM_ERROR("direct TMU load wasn't clamped\n"); -+ return false; -+ } -+ -+ clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg]; -+ if (clamp_offset == ~0) { -+ DRM_ERROR("direct TMU load wasn't clamped\n"); -+ return false; -+ } -+ -+ /* Store the clamp value's offset in p1 (see reloc_tex() in -+ * vc4_validate.c). -+ */ -+ validation_state->tmu_setup[tmu].p_offset[1] = -+ clamp_offset; -+ -+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && -+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { -+ DRM_ERROR("direct TMU load didn't add to a uniform\n"); -+ return false; -+ } -+ -+ validation_state->tmu_setup[tmu].is_direct = true; -+ } else { -+ if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM && -+ raddr_b == QPU_R_UNIF)) { -+ DRM_ERROR("uniform read in the same instruction as " -+ "texture setup.\n"); -+ return false; -+ } -+ } -+ -+ if (validation_state->tmu_write_count[tmu] >= 4) { -+ DRM_ERROR("TMU%d got too many parameters before dispatch\n", -+ tmu); -+ return false; -+ } -+ validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] = -+ validated_shader->uniforms_size; -+ validation_state->tmu_write_count[tmu]++; -+ /* Since direct uses a RADDR uniform reference, it will get counted in -+ * check_instruction_reads() -+ */ -+ if (!is_direct) -+ validated_shader->uniforms_size += 4; -+ -+ if (submit) { -+ if (!record_texture_sample(validated_shader, -+ validation_state, tmu)) { -+ return false; -+ } -+ -+ validation_state->tmu_write_count[tmu] = 0; -+ } -+ -+ return true; -+} -+ -+static bool -+check_reg_write(uint64_t inst, -+ struct vc4_validated_shader_info *validated_shader, -+ struct vc4_shader_validation_state *validation_state, -+ bool is_mul) -+{ -+ uint32_t waddr = (is_mul ? -+ QPU_GET_FIELD(inst, QPU_WADDR_MUL) : -+ QPU_GET_FIELD(inst, QPU_WADDR_ADD)); -+ -+ switch (waddr) { -+ case QPU_W_UNIFORMS_ADDRESS: -+ /* XXX: We'll probably need to support this for reladdr, but -+ * it's definitely a security-related one. -+ */ -+ DRM_ERROR("uniforms address load unsupported\n"); -+ return false; -+ -+ case QPU_W_TLB_COLOR_MS: -+ case QPU_W_TLB_COLOR_ALL: -+ case QPU_W_TLB_Z: -+ /* These only interact with the tile buffer, not main memory, -+ * so they're safe. -+ */ -+ return true; -+ -+ case QPU_W_TMU0_S: -+ case QPU_W_TMU0_T: -+ case QPU_W_TMU0_R: -+ case QPU_W_TMU0_B: -+ case QPU_W_TMU1_S: -+ case QPU_W_TMU1_T: -+ case QPU_W_TMU1_R: -+ case QPU_W_TMU1_B: -+ return check_tmu_write(inst, validated_shader, validation_state, -+ is_mul); -+ -+ case QPU_W_HOST_INT: -+ case QPU_W_TMU_NOSWAP: -+ case QPU_W_TLB_ALPHA_MASK: -+ case QPU_W_MUTEX_RELEASE: -+ /* XXX: I haven't thought about these, so don't support them -+ * for now. -+ */ -+ DRM_ERROR("Unsupported waddr %d\n", waddr); -+ return false; -+ -+ case QPU_W_VPM_ADDR: -+ DRM_ERROR("General VPM DMA unsupported\n"); -+ return false; -+ -+ case QPU_W_VPM: -+ case QPU_W_VPMVCD_SETUP: -+ /* We allow VPM setup in general, even including VPM DMA -+ * configuration setup, because the (unsafe) DMA can only be -+ * triggered by QPU_W_VPM_ADDR writes. -+ */ -+ return true; -+ -+ case QPU_W_TLB_STENCIL_SETUP: -+ return true; -+ } -+ -+ return true; -+} -+ -+static void -+track_live_clamps(uint64_t inst, -+ struct vc4_validated_shader_info *validated_shader, -+ struct vc4_shader_validation_state *validation_state) -+{ -+ uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD); -+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); -+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); -+ uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD); -+ uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A); -+ uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B); -+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); -+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); -+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); -+ bool ws = inst & QPU_WS; -+ uint32_t lri_add_a, lri_add, lri_mul; -+ bool add_a_is_min_0; -+ -+ /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0), -+ * before we clear previous live state. -+ */ -+ lri_add_a = raddr_add_a_to_live_reg_index(inst); -+ add_a_is_min_0 = (lri_add_a != ~0 && -+ validation_state->live_max_clamp_regs[lri_add_a]); -+ -+ /* Clear live state for registers written by our instruction. */ -+ lri_add = waddr_to_live_reg_index(waddr_add, ws); -+ lri_mul = waddr_to_live_reg_index(waddr_mul, !ws); -+ if (lri_mul != ~0) { -+ validation_state->live_max_clamp_regs[lri_mul] = false; -+ validation_state->live_min_clamp_offsets[lri_mul] = ~0; -+ } -+ if (lri_add != ~0) { -+ validation_state->live_max_clamp_regs[lri_add] = false; -+ validation_state->live_min_clamp_offsets[lri_add] = ~0; -+ } else { -+ /* Nothing further to do for live tracking, since only ADDs -+ * generate new live clamp registers. -+ */ -+ return; -+ } -+ -+ /* Now, handle remaining live clamp tracking for the ADD operation. */ -+ -+ if (cond_add != QPU_COND_ALWAYS) -+ return; -+ -+ if (op_add == QPU_A_MAX) { -+ /* Track live clamps of a value to a minimum of 0 (in either -+ * arg). -+ */ -+ if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 || -+ (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) { -+ return; -+ } -+ -+ validation_state->live_max_clamp_regs[lri_add] = true; -+ } else if (op_add == QPU_A_MIN) { -+ /* Track live clamps of a value clamped to a minimum of 0 and -+ * a maximum of some uniform's offset. -+ */ -+ if (!add_a_is_min_0) -+ return; -+ -+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && -+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF && -+ sig != QPU_SIG_SMALL_IMM)) { -+ return; -+ } -+ -+ validation_state->live_min_clamp_offsets[lri_add] = -+ validated_shader->uniforms_size; -+ } -+} -+ -+static bool -+check_instruction_writes(uint64_t inst, -+ struct vc4_validated_shader_info *validated_shader, -+ struct vc4_shader_validation_state *validation_state) -+{ -+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); -+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); -+ bool ok; -+ -+ if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) { -+ DRM_ERROR("ADD and MUL both set up textures\n"); -+ return false; -+ } -+ -+ ok = (check_reg_write(inst, validated_shader, validation_state, -+ false) && -+ check_reg_write(inst, validated_shader, validation_state, -+ true)); -+ -+ track_live_clamps(inst, validated_shader, validation_state); -+ -+ return ok; -+} -+ -+static bool -+check_instruction_reads(uint64_t inst, -+ struct vc4_validated_shader_info *validated_shader) -+{ -+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); -+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); -+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); -+ -+ if (raddr_a == QPU_R_UNIF || -+ (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) { -+ /* This can't overflow the uint32_t, because we're reading 8 -+ * bytes of instruction to increment by 4 here, so we'd -+ * already be OOM. -+ */ -+ validated_shader->uniforms_size += 4; -+ } -+ -+ return true; -+} -+ -+struct vc4_validated_shader_info * -+vc4_validate_shader(struct drm_gem_cma_object *shader_obj) -+{ -+ bool found_shader_end = false; -+ int shader_end_ip = 0; -+ uint32_t ip, max_ip; -+ uint64_t *shader; -+ struct vc4_validated_shader_info *validated_shader; -+ struct vc4_shader_validation_state validation_state; -+ int i; -+ -+ memset(&validation_state, 0, sizeof(validation_state)); -+ -+ for (i = 0; i < 8; i++) -+ validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0; -+ for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++) -+ validation_state.live_min_clamp_offsets[i] = ~0; -+ -+ shader = shader_obj->vaddr; -+ max_ip = shader_obj->base.size / sizeof(uint64_t); -+ -+ validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL); -+ if (!validated_shader) -+ return NULL; -+ -+ for (ip = 0; ip < max_ip; ip++) { -+ uint64_t inst = shader[ip]; -+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); -+ -+ switch (sig) { -+ case QPU_SIG_NONE: -+ case QPU_SIG_WAIT_FOR_SCOREBOARD: -+ case QPU_SIG_SCOREBOARD_UNLOCK: -+ case QPU_SIG_COLOR_LOAD: -+ case QPU_SIG_LOAD_TMU0: -+ case QPU_SIG_LOAD_TMU1: -+ case QPU_SIG_PROG_END: -+ case QPU_SIG_SMALL_IMM: -+ if (!check_instruction_writes(inst, validated_shader, -+ &validation_state)) { -+ DRM_ERROR("Bad write at ip %d\n", ip); -+ goto fail; -+ } -+ -+ if (!check_instruction_reads(inst, validated_shader)) -+ goto fail; -+ -+ if (sig == QPU_SIG_PROG_END) { -+ found_shader_end = true; -+ shader_end_ip = ip; -+ } -+ -+ break; -+ -+ case QPU_SIG_LOAD_IMM: -+ if (!check_instruction_writes(inst, validated_shader, -+ &validation_state)) { -+ DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); -+ goto fail; -+ } -+ break; -+ -+ default: -+ DRM_ERROR("Unsupported QPU signal %d at " -+ "instruction %d\n", sig, ip); -+ goto fail; -+ } -+ -+ /* There are two delay slots after program end is signaled -+ * that are still executed, then we're finished. -+ */ -+ if (found_shader_end && ip == shader_end_ip + 2) -+ break; -+ } -+ -+ if (ip == max_ip) { -+ DRM_ERROR("shader failed to terminate before " -+ "shader BO end at %zd\n", -+ shader_obj->base.size); -+ goto fail; -+ } -+ -+ /* Again, no chance of integer overflow here because the worst case -+ * scenario is 8 bytes of uniforms plus handles per 8-byte -+ * instruction. -+ */ -+ validated_shader->uniforms_src_size = -+ (validated_shader->uniforms_size + -+ 4 * validated_shader->num_texture_samples); -+ -+ return validated_shader; -+ -+fail: -+ if (validated_shader) { -+ kfree(validated_shader->texture_samples); -+ kfree(validated_shader); -+ } -+ return NULL; -+} -diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h -index 219d34c..74de184 100644 ---- a/include/uapi/drm/vc4_drm.h -+++ b/include/uapi/drm/vc4_drm.h -@@ -28,9 +28,11 @@ - - #define DRM_VC4_CREATE_BO 0x03 - #define DRM_VC4_MMAP_BO 0x04 -+#define DRM_VC4_CREATE_SHADER_BO 0x05 - - #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) - #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) -+#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) - - /** - * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. -@@ -65,4 +67,27 @@ struct drm_vc4_mmap_bo { - __u64 offset; - }; - -+/** -+ * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4 -+ * shader BOs. -+ * -+ * Since allowing a shader to be overwritten while it's also being -+ * executed from would allow privlege escalation, shaders must be -+ * created using this ioctl, and they can't be mmapped later. -+ */ -+struct drm_vc4_create_shader_bo { -+ /* Size of the data argument. */ -+ __u32 size; -+ /* Flags, currently must be 0. */ -+ __u32 flags; -+ -+ /* Pointer to the data. */ -+ __u64 data; -+ -+ /** Returned GEM handle for the BO. */ -+ __u32 handle; -+ /* Pad, must be 0. */ -+ __u32 pad; -+}; -+ - #endif /* _UAPI_VC4_DRM_H_ */ diff --git a/debian/patches/features/arm/rpi/drm-vc4-add-an-interface-for-capturing-the-gpu-state.patch b/debian/patches/features/arm/rpi/drm-vc4-add-an-interface-for-capturing-the-gpu-state.patch deleted file mode 100644 index 326ed4c0a..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-add-an-interface-for-capturing-the-gpu-state.patch +++ /dev/null @@ -1,330 +0,0 @@ -From: Eric Anholt -Date: Fri, 30 Oct 2015 10:09:02 -0700 -Subject: [08/16] drm/vc4: Add an interface for capturing the GPU state after a - hang. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=214613656b5179f0daab6e0a080814b5100d45f0 - -This can be parsed with vc4-gpu-tools tools for trying to figure out -what was going on. - -v2: Use __u32-style types. - -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/vc4/vc4_drv.c | 2 + - drivers/gpu/drm/vc4/vc4_drv.h | 4 + - drivers/gpu/drm/vc4/vc4_gem.c | 185 ++++++++++++++++++++++++++++++++++++++++++ - include/uapi/drm/vc4_drm.h | 45 ++++++++++ - 4 files changed, 236 insertions(+) - -diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c -index 2cfee59..97226b6 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.c -+++ b/drivers/gpu/drm/vc4/vc4_drv.c -@@ -80,6 +80,8 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = { - DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), -+ DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, -+ DRM_ROOT_ONLY), - }; - - static struct drm_driver vc4_drm_driver = { -diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h -index f9927d8..080865e 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.h -+++ b/drivers/gpu/drm/vc4/vc4_drv.h -@@ -19,6 +19,8 @@ struct vc4_dev { - - struct drm_fbdev_cma *fbdev; - -+ struct vc4_hang_state *hang_state; -+ - /* The kernel-space BO cache. Tracks buffers that have been - * unreferenced by all other users (refcounts of 0!) but not - * yet freed, so we can do cheap allocations. -@@ -361,6 +363,8 @@ int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -+int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); - int vc4_mmap(struct file *filp, struct vm_area_struct *vma); - int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); - void *vc4_prime_vmap(struct drm_gem_object *obj); -diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c -index 5fb0556..39f29e7 100644 ---- a/drivers/gpu/drm/vc4/vc4_gem.c -+++ b/drivers/gpu/drm/vc4/vc4_gem.c -@@ -40,6 +40,186 @@ vc4_queue_hangcheck(struct drm_device *dev) - round_jiffies_up(jiffies + msecs_to_jiffies(100))); - } - -+struct vc4_hang_state { -+ struct drm_vc4_get_hang_state user_state; -+ -+ u32 bo_count; -+ struct drm_gem_object **bo; -+}; -+ -+static void -+vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state) -+{ -+ unsigned int i; -+ -+ mutex_lock(&dev->struct_mutex); -+ for (i = 0; i < state->user_state.bo_count; i++) -+ drm_gem_object_unreference(state->bo[i]); -+ mutex_unlock(&dev->struct_mutex); -+ -+ kfree(state); -+} -+ -+int -+vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_vc4_get_hang_state *get_state = data; -+ struct drm_vc4_get_hang_state_bo *bo_state; -+ struct vc4_hang_state *kernel_state; -+ struct drm_vc4_get_hang_state *state; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ unsigned long irqflags; -+ u32 i; -+ int ret; -+ -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ kernel_state = vc4->hang_state; -+ if (!kernel_state) { -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ return -ENOENT; -+ } -+ state = &kernel_state->user_state; -+ -+ /* If the user's array isn't big enough, just return the -+ * required array size. -+ */ -+ if (get_state->bo_count < state->bo_count) { -+ get_state->bo_count = state->bo_count; -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ return 0; -+ } -+ -+ vc4->hang_state = NULL; -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ -+ /* Save the user's BO pointer, so we don't stomp it with the memcpy. */ -+ state->bo = get_state->bo; -+ memcpy(get_state, state, sizeof(*state)); -+ -+ bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL); -+ if (!bo_state) { -+ ret = -ENOMEM; -+ goto err_free; -+ } -+ -+ for (i = 0; i < state->bo_count; i++) { -+ struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]); -+ u32 handle; -+ -+ ret = drm_gem_handle_create(file_priv, kernel_state->bo[i], -+ &handle); -+ -+ if (ret) { -+ state->bo_count = i - 1; -+ goto err; -+ } -+ bo_state[i].handle = handle; -+ bo_state[i].paddr = vc4_bo->base.paddr; -+ bo_state[i].size = vc4_bo->base.base.size; -+ } -+ -+ ret = copy_to_user((void __user *)(uintptr_t)get_state->bo, -+ bo_state, -+ state->bo_count * sizeof(*bo_state)); -+ kfree(bo_state); -+ -+err_free: -+ -+ vc4_free_hang_state(dev, kernel_state); -+ -+err: -+ return ret; -+} -+ -+static void -+vc4_save_hang_state(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct drm_vc4_get_hang_state *state; -+ struct vc4_hang_state *kernel_state; -+ struct vc4_exec_info *exec; -+ struct vc4_bo *bo; -+ unsigned long irqflags; -+ unsigned int i, unref_list_count; -+ -+ kernel_state = kcalloc(1, sizeof(*state), GFP_KERNEL); -+ if (!kernel_state) -+ return; -+ -+ state = &kernel_state->user_state; -+ -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ exec = vc4_first_job(vc4); -+ if (!exec) { -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ return; -+ } -+ -+ unref_list_count = 0; -+ list_for_each_entry(bo, &exec->unref_list, unref_head) -+ unref_list_count++; -+ -+ state->bo_count = exec->bo_count + unref_list_count; -+ kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo), -+ GFP_ATOMIC); -+ if (!kernel_state->bo) { -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ return; -+ } -+ -+ for (i = 0; i < exec->bo_count; i++) { -+ drm_gem_object_reference(&exec->bo[i]->base); -+ kernel_state->bo[i] = &exec->bo[i]->base; -+ } -+ -+ list_for_each_entry(bo, &exec->unref_list, unref_head) { -+ drm_gem_object_reference(&bo->base.base); -+ kernel_state->bo[i] = &bo->base.base; -+ i++; -+ } -+ -+ state->start_bin = exec->ct0ca; -+ state->start_render = exec->ct1ca; -+ -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ -+ state->ct0ca = V3D_READ(V3D_CTNCA(0)); -+ state->ct0ea = V3D_READ(V3D_CTNEA(0)); -+ -+ state->ct1ca = V3D_READ(V3D_CTNCA(1)); -+ state->ct1ea = V3D_READ(V3D_CTNEA(1)); -+ -+ state->ct0cs = V3D_READ(V3D_CTNCS(0)); -+ state->ct1cs = V3D_READ(V3D_CTNCS(1)); -+ -+ state->ct0ra0 = V3D_READ(V3D_CT00RA0); -+ state->ct1ra0 = V3D_READ(V3D_CT01RA0); -+ -+ state->bpca = V3D_READ(V3D_BPCA); -+ state->bpcs = V3D_READ(V3D_BPCS); -+ state->bpoa = V3D_READ(V3D_BPOA); -+ state->bpos = V3D_READ(V3D_BPOS); -+ -+ state->vpmbase = V3D_READ(V3D_VPMBASE); -+ -+ state->dbge = V3D_READ(V3D_DBGE); -+ state->fdbgo = V3D_READ(V3D_FDBGO); -+ state->fdbgb = V3D_READ(V3D_FDBGB); -+ state->fdbgr = V3D_READ(V3D_FDBGR); -+ state->fdbgs = V3D_READ(V3D_FDBGS); -+ state->errstat = V3D_READ(V3D_ERRSTAT); -+ -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ if (vc4->hang_state) { -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ vc4_free_hang_state(dev, kernel_state); -+ } else { -+ vc4->hang_state = kernel_state; -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ } -+} -+ - static void - vc4_reset(struct drm_device *dev) - { -@@ -64,6 +244,8 @@ vc4_reset_work(struct work_struct *work) - struct vc4_dev *vc4 = - container_of(work, struct vc4_dev, hangcheck.reset_work); - -+ vc4_save_hang_state(vc4->dev); -+ - vc4_reset(vc4->dev); - } - -@@ -679,4 +861,7 @@ vc4_gem_destroy(struct drm_device *dev) - } - - vc4_bo_cache_destroy(dev); -+ -+ if (vc4->hang_state) -+ vc4_free_hang_state(dev, vc4->hang_state); - } -diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h -index fe4161b..eeb37e3 100644 ---- a/include/uapi/drm/vc4_drm.h -+++ b/include/uapi/drm/vc4_drm.h -@@ -32,6 +32,7 @@ - #define DRM_VC4_CREATE_BO 0x03 - #define DRM_VC4_MMAP_BO 0x04 - #define DRM_VC4_CREATE_SHADER_BO 0x05 -+#define DRM_VC4_GET_HANG_STATE 0x06 - - #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) - #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) -@@ -39,6 +40,7 @@ - #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) - #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) - #define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) -+#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state) - - struct drm_vc4_submit_rcl_surface { - __u32 hindex; /* Handle index, or ~0 if not present. */ -@@ -231,4 +233,47 @@ struct drm_vc4_create_shader_bo { - __u32 pad; - }; - -+struct drm_vc4_get_hang_state_bo { -+ __u32 handle; -+ __u32 paddr; -+ __u32 size; -+ __u32 pad; -+}; -+ -+/** -+ * struct drm_vc4_hang_state - ioctl argument for collecting state -+ * from a GPU hang for analysis. -+*/ -+struct drm_vc4_get_hang_state { -+ /** Pointer to array of struct drm_vc4_get_hang_state_bo. */ -+ __u64 bo; -+ /** -+ * On input, the size of the bo array. Output is the number -+ * of bos to be returned. -+ */ -+ __u32 bo_count; -+ -+ __u32 start_bin, start_render; -+ -+ __u32 ct0ca, ct0ea; -+ __u32 ct1ca, ct1ea; -+ __u32 ct0cs, ct1cs; -+ __u32 ct0ra0, ct1ra0; -+ -+ __u32 bpca, bpcs; -+ __u32 bpoa, bpos; -+ -+ __u32 vpmbase; -+ -+ __u32 dbge; -+ __u32 fdbgo; -+ __u32 fdbgb; -+ __u32 fdbgr; -+ __u32 fdbgs; -+ __u32 errstat; -+ -+ /* Pad that we may save more registers into in the future. */ -+ __u32 pad[16]; -+}; -+ - #endif /* _UAPI_VC4_DRM_H_ */ diff --git a/debian/patches/features/arm/rpi/drm-vc4-add-create-and-map-bo-ioctls.patch b/debian/patches/features/arm/rpi/drm-vc4-add-create-and-map-bo-ioctls.patch deleted file mode 100644 index 30f45db19..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-add-create-and-map-bo-ioctls.patch +++ /dev/null @@ -1,201 +0,0 @@ -From: Eric Anholt -Date: Sun, 18 Jan 2015 09:33:17 +1300 -Subject: [02/16] drm/vc4: Add create and map BO ioctls. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=d5bc60f6ad05b3c676b057bec662cfafc3ee24dd - -While there exist dumb APIs for creating and mapping BOs, one of the -rules is that drivers doing 3D acceleration have to provide their own -APIs for buffer allocation (besides, the pitch/height parameters of -the dumb alloc don't really make sense for a lot of 3D allocations). - -v2: Use __u32-style types, use "drm.h" instead of . - -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/vc4/vc4_bo.c | 41 ++++++++++++++++++++++++++ - drivers/gpu/drm/vc4/vc4_drv.c | 3 ++ - drivers/gpu/drm/vc4/vc4_drv.h | 4 +++ - include/uapi/drm/Kbuild | 1 + - include/uapi/drm/vc4_drm.h | 68 +++++++++++++++++++++++++++++++++++++++++++ - 5 files changed, 117 insertions(+) - create mode 100644 include/uapi/drm/vc4_drm.h - -diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c -index 18faa5b..06cba26 100644 ---- a/drivers/gpu/drm/vc4/vc4_bo.c -+++ b/drivers/gpu/drm/vc4/vc4_bo.c -@@ -19,6 +19,7 @@ - */ - - #include "vc4_drv.h" -+#include "uapi/drm/vc4_drm.h" - - static void vc4_bo_stats_dump(struct vc4_dev *vc4) - { -@@ -346,6 +347,46 @@ static void vc4_bo_cache_time_timer(unsigned long data) - schedule_work(&vc4->bo_cache.time_work); - } - -+int vc4_create_bo_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_vc4_create_bo *args = data; -+ struct vc4_bo *bo = NULL; -+ int ret; -+ -+ /* -+ * We can't allocate from the BO cache, because the BOs don't -+ * get zeroed, and that might leak data between users. -+ */ -+ bo = vc4_bo_create(dev, args->size, false); -+ if (!bo) -+ return -ENOMEM; -+ -+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); -+ drm_gem_object_unreference_unlocked(&bo->base.base); -+ -+ return ret; -+} -+ -+int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_vc4_mmap_bo *args = data; -+ struct drm_gem_object *gem_obj; -+ -+ gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); -+ if (!gem_obj) { -+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); -+ return -EINVAL; -+ } -+ -+ /* The mmap offset was set up at BO allocation time. */ -+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); -+ -+ drm_gem_object_unreference_unlocked(gem_obj); -+ return 0; -+} -+ - void vc4_bo_cache_init(struct drm_device *dev) - { - struct vc4_dev *vc4 = to_vc4_dev(dev); -diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c -index da041fa..5fa4688 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.c -+++ b/drivers/gpu/drm/vc4/vc4_drv.c -@@ -16,6 +16,7 @@ - #include - #include "drm_fb_cma_helper.h" - -+#include "uapi/drm/vc4_drm.h" - #include "vc4_drv.h" - #include "vc4_regs.h" - -@@ -73,6 +74,8 @@ static const struct file_operations vc4_drm_fops = { - }; - - static const struct drm_ioctl_desc vc4_drm_ioctls[] = { -+ DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), -+ DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), - }; - - static struct drm_driver vc4_drm_driver = { -diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h -index 39a1ff5..fddb0a0 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.h -+++ b/drivers/gpu/drm/vc4/vc4_drv.h -@@ -155,6 +155,10 @@ int vc4_dumb_create(struct drm_file *file_priv, - struct drm_mode_create_dumb *args); - struct dma_buf *vc4_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags); -+int vc4_create_bo_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); - void vc4_bo_cache_init(struct drm_device *dev); - void vc4_bo_cache_destroy(struct drm_device *dev); - int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); -diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild -index 38d4370..974fcd5 100644 ---- a/include/uapi/drm/Kbuild -+++ b/include/uapi/drm/Kbuild -@@ -17,4 +17,5 @@ header-y += tegra_drm.h - header-y += via_drm.h - header-y += vmwgfx_drm.h - header-y += msm_drm.h -+header-y += vc4_drm.h - header-y += virtgpu_drm.h -diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h -new file mode 100644 -index 0000000..219d34c ---- /dev/null -+++ b/include/uapi/drm/vc4_drm.h -@@ -0,0 +1,68 @@ -+/* -+ * Copyright © 2014-2015 Broadcom -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#ifndef _UAPI_VC4_DRM_H_ -+#define _UAPI_VC4_DRM_H_ -+ -+#include "drm.h" -+ -+#define DRM_VC4_CREATE_BO 0x03 -+#define DRM_VC4_MMAP_BO 0x04 -+ -+#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) -+#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) -+ -+/** -+ * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. -+ * -+ * There are currently no values for the flags argument, but it may be -+ * used in a future extension. -+ */ -+struct drm_vc4_create_bo { -+ __u32 size; -+ __u32 flags; -+ /** Returned GEM handle for the BO. */ -+ __u32 handle; -+ __u32 pad; -+}; -+ -+/** -+ * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs. -+ * -+ * This doesn't actually perform an mmap. Instead, it returns the -+ * offset you need to use in an mmap on the DRM device node. This -+ * means that tools like valgrind end up knowing about the mapped -+ * memory. -+ * -+ * There are currently no values for the flags argument, but it may be -+ * used in a future extension. -+ */ -+struct drm_vc4_mmap_bo { -+ /** Handle for the object being mapped. */ -+ __u32 handle; -+ __u32 flags; -+ /** offset into the drm node to use for subsequent mmap call. */ -+ __u64 offset; -+}; -+ -+#endif /* _UAPI_VC4_DRM_H_ */ diff --git a/debian/patches/features/arm/rpi/drm-vc4-add-support-for-async-pageflips.patch b/debian/patches/features/arm/rpi/drm-vc4-add-support-for-async-pageflips.patch deleted file mode 100644 index 244981fee..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-add-support-for-async-pageflips.patch +++ /dev/null @@ -1,508 +0,0 @@ -From: Eric Anholt -Date: Mon, 30 Nov 2015 12:34:01 -0800 -Subject: [07/16] drm/vc4: Add support for async pageflips. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=b501bacc6060fd62654b756469cc3091eb53de3a - -An async pageflip stores the modeset to be done and executes it once -the BOs are ready to be displayed. This gets us about 3x performance -in full screen rendering with pageflipping. - -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/vc4/vc4_crtc.c | 99 +++++++++++++++++++++++++- - drivers/gpu/drm/vc4/vc4_drv.h | 16 +++++ - drivers/gpu/drm/vc4/vc4_gem.c | 40 +++++++++++ - drivers/gpu/drm/vc4/vc4_kms.c | 149 +++++++++++++++++++++++++++++++++++++++- - drivers/gpu/drm/vc4/vc4_plane.c | 40 +++++++++++ - 5 files changed, 342 insertions(+), 2 deletions(-) - -diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c -index 7a9f476..a319332 100644 ---- a/drivers/gpu/drm/vc4/vc4_crtc.c -+++ b/drivers/gpu/drm/vc4/vc4_crtc.c -@@ -35,6 +35,7 @@ - #include "drm_atomic_helper.h" - #include "drm_crtc_helper.h" - #include "linux/clk.h" -+#include "drm_fb_cma_helper.h" - #include "linux/component.h" - #include "linux/of_device.h" - #include "vc4_drv.h" -@@ -475,10 +476,106 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) - return ret; - } - -+struct vc4_async_flip_state { -+ struct drm_crtc *crtc; -+ struct drm_framebuffer *fb; -+ struct drm_pending_vblank_event *event; -+ -+ struct vc4_seqno_cb cb; -+}; -+ -+/* Called when the V3D execution for the BO being flipped to is done, so that -+ * we can actually update the plane's address to point to it. -+ */ -+static void -+vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) -+{ -+ struct vc4_async_flip_state *flip_state = -+ container_of(cb, struct vc4_async_flip_state, cb); -+ struct drm_crtc *crtc = flip_state->crtc; -+ struct drm_device *dev = crtc->dev; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct drm_plane *plane = crtc->primary; -+ -+ vc4_plane_async_set_fb(plane, flip_state->fb); -+ if (flip_state->event) { -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dev->event_lock, flags); -+ drm_crtc_send_vblank_event(crtc, flip_state->event); -+ spin_unlock_irqrestore(&dev->event_lock, flags); -+ } -+ -+ drm_framebuffer_unreference(flip_state->fb); -+ kfree(flip_state); -+ -+ up(&vc4->async_modeset); -+} -+ -+/* Implements async (non-vblank-synced) page flips. -+ * -+ * The page flip ioctl needs to return immediately, so we grab the -+ * modeset semaphore on the pipe, and queue the address update for -+ * when V3D is done with the BO being flipped to. -+ */ -+static int vc4_async_page_flip(struct drm_crtc *crtc, -+ struct drm_framebuffer *fb, -+ struct drm_pending_vblank_event *event, -+ uint32_t flags) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct drm_plane *plane = crtc->primary; -+ int ret = 0; -+ struct vc4_async_flip_state *flip_state; -+ struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); -+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); -+ -+ flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); -+ if (!flip_state) -+ return -ENOMEM; -+ -+ drm_framebuffer_reference(fb); -+ flip_state->fb = fb; -+ flip_state->crtc = crtc; -+ flip_state->event = event; -+ -+ /* Make sure all other async modesetes have landed. */ -+ ret = down_interruptible(&vc4->async_modeset); -+ if (ret) { -+ kfree(flip_state); -+ return ret; -+ } -+ -+ /* Immediately update the plane's legacy fb pointer, so that later -+ * modeset prep sees the state that will be present when the semaphore -+ * is released. -+ */ -+ drm_atomic_set_fb_for_plane(plane->state, fb); -+ plane->fb = fb; -+ -+ vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno, -+ vc4_async_page_flip_complete); -+ -+ /* Driver takes ownership of state on successful async commit. */ -+ return 0; -+} -+ -+static int vc4_page_flip(struct drm_crtc *crtc, -+ struct drm_framebuffer *fb, -+ struct drm_pending_vblank_event *event, -+ uint32_t flags) -+{ -+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC) -+ return vc4_async_page_flip(crtc, fb, event, flags); -+ else -+ return drm_atomic_helper_page_flip(crtc, fb, event, flags); -+} -+ - static const struct drm_crtc_funcs vc4_crtc_funcs = { - .set_config = drm_atomic_helper_set_config, - .destroy = vc4_crtc_destroy, -- .page_flip = drm_atomic_helper_page_flip, -+ .page_flip = vc4_page_flip, - .set_property = NULL, - .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ - .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ -diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h -index 0bc8c57..f9927d8 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.h -+++ b/drivers/gpu/drm/vc4/vc4_drv.h -@@ -76,6 +76,11 @@ struct vc4_dev { - wait_queue_head_t job_wait_queue; - struct work_struct job_done_work; - -+ /* List of struct vc4_seqno_cb for callbacks to be made from a -+ * workqueue when the given seqno is passed. -+ */ -+ struct list_head seqno_cb_list; -+ - /* The binner overflow memory that's currently set up in - * BPOA/BPOS registers. When overflow occurs and a new one is - * allocated, the previous one will be moved to -@@ -128,6 +133,12 @@ to_vc4_bo(struct drm_gem_object *bo) - return (struct vc4_bo *)bo; - } - -+struct vc4_seqno_cb { -+ struct work_struct work; -+ uint64_t seqno; -+ void (*func)(struct vc4_seqno_cb *cb); -+}; -+ - struct vc4_v3d { - struct platform_device *pdev; - void __iomem *regs; -@@ -384,6 +395,9 @@ void vc4_submit_next_job(struct drm_device *dev); - int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, - uint64_t timeout_ns, bool interruptible); - void vc4_job_handle_completed(struct vc4_dev *vc4); -+int vc4_queue_seqno_cb(struct drm_device *dev, -+ struct vc4_seqno_cb *cb, uint64_t seqno, -+ void (*func)(struct vc4_seqno_cb *cb)); - - /* vc4_hdmi.c */ - extern struct platform_driver vc4_hdmi_driver; -@@ -409,6 +423,8 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, - enum drm_plane_type type); - u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); - u32 vc4_plane_dlist_size(struct drm_plane_state *state); -+void vc4_plane_async_set_fb(struct drm_plane *plane, -+ struct drm_framebuffer *fb); - - /* vc4_v3d.c */ - extern struct platform_driver vc4_v3d_driver; -diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c -index 936dddf..5fb0556 100644 ---- a/drivers/gpu/drm/vc4/vc4_gem.c -+++ b/drivers/gpu/drm/vc4/vc4_gem.c -@@ -461,6 +461,7 @@ void - vc4_job_handle_completed(struct vc4_dev *vc4) - { - unsigned long irqflags; -+ struct vc4_seqno_cb *cb, *cb_temp; - - spin_lock_irqsave(&vc4->job_lock, irqflags); - while (!list_empty(&vc4->job_done_list)) { -@@ -473,7 +474,45 @@ vc4_job_handle_completed(struct vc4_dev *vc4) - vc4_complete_exec(vc4->dev, exec); - spin_lock_irqsave(&vc4->job_lock, irqflags); - } -+ -+ list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) { -+ if (cb->seqno <= vc4->finished_seqno) { -+ list_del_init(&cb->work.entry); -+ schedule_work(&cb->work); -+ } -+ } -+ -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+} -+ -+static void vc4_seqno_cb_work(struct work_struct *work) -+{ -+ struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work); -+ -+ cb->func(cb); -+} -+ -+int vc4_queue_seqno_cb(struct drm_device *dev, -+ struct vc4_seqno_cb *cb, uint64_t seqno, -+ void (*func)(struct vc4_seqno_cb *cb)) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ int ret = 0; -+ unsigned long irqflags; -+ -+ cb->func = func; -+ INIT_WORK(&cb->work, vc4_seqno_cb_work); -+ -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ if (seqno > vc4->finished_seqno) { -+ cb->seqno = seqno; -+ list_add_tail(&cb->work.entry, &vc4->seqno_cb_list); -+ } else { -+ schedule_work(&cb->work); -+ } - spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ -+ return ret; - } - - /* Scheduled when any job has been completed, this walks the list of -@@ -610,6 +649,7 @@ vc4_gem_init(struct drm_device *dev) - - INIT_LIST_HEAD(&vc4->job_list); - INIT_LIST_HEAD(&vc4->job_done_list); -+ INIT_LIST_HEAD(&vc4->seqno_cb_list); - spin_lock_init(&vc4->job_lock); - - INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work); -diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c -index 2e5597d..f95f2df 100644 ---- a/drivers/gpu/drm/vc4/vc4_kms.c -+++ b/drivers/gpu/drm/vc4/vc4_kms.c -@@ -15,6 +15,7 @@ - */ - - #include "drm_crtc.h" -+#include "drm_atomic.h" - #include "drm_atomic_helper.h" - #include "drm_crtc_helper.h" - #include "drm_plane_helper.h" -@@ -29,10 +30,152 @@ static void vc4_output_poll_changed(struct drm_device *dev) - drm_fbdev_cma_hotplug_event(vc4->fbdev); - } - -+struct vc4_commit { -+ struct drm_device *dev; -+ struct drm_atomic_state *state; -+ struct vc4_seqno_cb cb; -+}; -+ -+static void -+vc4_atomic_complete_commit(struct vc4_commit *c) -+{ -+ struct drm_atomic_state *state = c->state; -+ struct drm_device *dev = state->dev; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ drm_atomic_helper_commit_modeset_disables(dev, state); -+ -+ drm_atomic_helper_commit_planes(dev, state, false); -+ -+ drm_atomic_helper_commit_modeset_enables(dev, state); -+ -+ drm_atomic_helper_wait_for_vblanks(dev, state); -+ -+ drm_atomic_helper_cleanup_planes(dev, state); -+ -+ drm_atomic_state_free(state); -+ -+ up(&vc4->async_modeset); -+ -+ kfree(c); -+} -+ -+static void -+vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb) -+{ -+ struct vc4_commit *c = container_of(cb, struct vc4_commit, cb); -+ -+ vc4_atomic_complete_commit(c); -+} -+ -+static struct vc4_commit *commit_init(struct drm_atomic_state *state) -+{ -+ struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); -+ -+ if (!c) -+ return NULL; -+ c->dev = state->dev; -+ c->state = state; -+ -+ return c; -+} -+ -+/** -+ * vc4_atomic_commit - commit validated state object -+ * @dev: DRM device -+ * @state: the driver state object -+ * @async: asynchronous commit -+ * -+ * This function commits a with drm_atomic_helper_check() pre-validated state -+ * object. This can still fail when e.g. the framebuffer reservation fails. For -+ * now this doesn't implement asynchronous commits. -+ * -+ * RETURNS -+ * Zero for success or -errno. -+ */ -+static int vc4_atomic_commit(struct drm_device *dev, -+ struct drm_atomic_state *state, -+ bool async) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ int ret; -+ int i; -+ uint64_t wait_seqno = 0; -+ struct vc4_commit *c; -+ -+ c = commit_init(state); -+ if (!c) -+ return -ENOMEM; -+ -+ /* Make sure that any outstanding modesets have finished. */ -+ ret = down_interruptible(&vc4->async_modeset); -+ if (ret) { -+ kfree(c); -+ return ret; -+ } -+ -+ ret = drm_atomic_helper_prepare_planes(dev, state); -+ if (ret) { -+ kfree(c); -+ up(&vc4->async_modeset); -+ return ret; -+ } -+ -+ for (i = 0; i < dev->mode_config.num_total_plane; i++) { -+ struct drm_plane *plane = state->planes[i]; -+ struct drm_plane_state *new_state = state->plane_states[i]; -+ -+ if (!plane) -+ continue; -+ -+ if ((plane->state->fb != new_state->fb) && new_state->fb) { -+ struct drm_gem_cma_object *cma_bo = -+ drm_fb_cma_get_gem_obj(new_state->fb, 0); -+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); -+ -+ wait_seqno = max(bo->seqno, wait_seqno); -+ } -+ } -+ -+ /* -+ * This is the point of no return - everything below never fails except -+ * when the hw goes bonghits. Which means we can commit the new state on -+ * the software side now. -+ */ -+ -+ drm_atomic_helper_swap_state(dev, state); -+ -+ /* -+ * Everything below can be run asynchronously without the need to grab -+ * any modeset locks at all under one condition: It must be guaranteed -+ * that the asynchronous work has either been cancelled (if the driver -+ * supports it, which at least requires that the framebuffers get -+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed -+ * before the new state gets committed on the software side with -+ * drm_atomic_helper_swap_state(). -+ * -+ * This scheme allows new atomic state updates to be prepared and -+ * checked in parallel to the asynchronous completion of the previous -+ * update. Which is important since compositors need to figure out the -+ * composition of the next frame right after having submitted the -+ * current layout. -+ */ -+ -+ if (async) { -+ vc4_queue_seqno_cb(dev, &c->cb, wait_seqno, -+ vc4_atomic_complete_commit_seqno_cb); -+ } else { -+ vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false); -+ vc4_atomic_complete_commit(c); -+ } -+ -+ return 0; -+} -+ - static const struct drm_mode_config_funcs vc4_mode_funcs = { - .output_poll_changed = vc4_output_poll_changed, - .atomic_check = drm_atomic_helper_check, -- .atomic_commit = drm_atomic_helper_commit, -+ .atomic_commit = vc4_atomic_commit, - .fb_create = drm_fb_cma_create, - }; - -@@ -41,6 +184,8 @@ int vc4_kms_load(struct drm_device *dev) - struct vc4_dev *vc4 = to_vc4_dev(dev); - int ret; - -+ sema_init(&vc4->async_modeset, 1); -+ - ret = drm_vblank_init(dev, dev->mode_config.num_crtc); - if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); -@@ -51,6 +196,8 @@ int vc4_kms_load(struct drm_device *dev) - dev->mode_config.max_height = 2048; - dev->mode_config.funcs = &vc4_mode_funcs; - dev->mode_config.preferred_depth = 24; -+ dev->mode_config.async_page_flip = true; -+ - dev->vblank_disable_allowed = true; - - drm_mode_config_reset(dev); -diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c -index cdd8b10..db32c373 100644 ---- a/drivers/gpu/drm/vc4/vc4_plane.c -+++ b/drivers/gpu/drm/vc4/vc4_plane.c -@@ -29,6 +29,14 @@ struct vc4_plane_state { - u32 *dlist; - u32 dlist_size; /* Number of dwords in allocated for the display list */ - u32 dlist_count; /* Number of used dwords in the display list. */ -+ -+ /* Offset in the dlist to pointer word 0. */ -+ u32 pw0_offset; -+ -+ /* Offset where the plane's dlist was last stored in the -+ hardware at vc4_crtc_atomic_flush() time. -+ */ -+ u32 *hw_dlist; - }; - - static inline struct vc4_plane_state * -@@ -197,6 +205,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, - /* Position Word 3: Context. Written by the HVS. */ - vc4_dlist_write(vc4_state, 0xc0c0c0c0); - -+ vc4_state->pw0_offset = vc4_state->dlist_count; -+ - /* Pointer Word 0: RGB / Y Pointer */ - vc4_dlist_write(vc4_state, bo->paddr + offset); - -@@ -248,6 +258,8 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist) - struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); - int i; - -+ vc4_state->hw_dlist = dlist; -+ - /* Can't memcpy_toio() because it needs to be 32-bit writes. */ - for (i = 0; i < vc4_state->dlist_count; i++) - writel(vc4_state->dlist[i], &dlist[i]); -@@ -262,6 +274,34 @@ u32 vc4_plane_dlist_size(struct drm_plane_state *state) - return vc4_state->dlist_count; - } - -+/* Updates the plane to immediately (well, once the FIFO needs -+ * refilling) scan out from at a new framebuffer. -+ */ -+void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) -+{ -+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); -+ struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); -+ uint32_t addr; -+ -+ /* We're skipping the address adjustment for negative origin, -+ * because this is only called on the primary plane. -+ */ -+ WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0); -+ addr = bo->paddr + fb->offsets[0]; -+ -+ /* Write the new address into the hardware immediately. The -+ * scanout will start from this address as soon as the FIFO -+ * needs to refill with pixels. -+ */ -+ writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]); -+ -+ /* Also update the CPU-side dlist copy, so that any later -+ * atomic updates that don't do a new modeset on our plane -+ * also use our updated address. -+ */ -+ vc4_state->dlist[vc4_state->pw0_offset] = addr; -+} -+ - static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { - .prepare_fb = NULL, - .cleanup_fb = NULL, diff --git a/debian/patches/features/arm/rpi/drm-vc4-add-support-for-drawing-3d-frames.patch b/debian/patches/features/arm/rpi/drm-vc4-add-support-for-drawing-3d-frames.patch deleted file mode 100644 index af34dc8e7..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-add-support-for-drawing-3d-frames.patch +++ /dev/null @@ -1,3474 +0,0 @@ -From: Eric Anholt -Date: Mon, 30 Nov 2015 12:13:37 -0800 -Subject: [06/16] drm/vc4: Add support for drawing 3D frames. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=d5b1a78a772f1e31a94f8babfa964152ec5e9aa5 - -The user submission is basically a pointer to a command list and a -pointer to uniforms. We copy those in to the kernel, validate and -relocate them, and store the result in a GPU BO which we queue for -execution. - -v2: Drop support for NV shader recs (not necessary for GL), simplify - vc4_use_bo(), improve bin flush/semaphore checks, use __u32 style - types. - -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/vc4/Makefile | 7 + - drivers/gpu/drm/vc4/vc4_drv.c | 15 +- - drivers/gpu/drm/vc4/vc4_drv.h | 182 +++++++ - drivers/gpu/drm/vc4/vc4_gem.c | 642 +++++++++++++++++++++++ - drivers/gpu/drm/vc4/vc4_irq.c | 210 ++++++++ - drivers/gpu/drm/vc4/vc4_packet.h | 399 +++++++++++++++ - drivers/gpu/drm/vc4/vc4_render_cl.c | 634 +++++++++++++++++++++++ - drivers/gpu/drm/vc4/vc4_trace.h | 63 +++ - drivers/gpu/drm/vc4/vc4_trace_points.c | 14 + - drivers/gpu/drm/vc4/vc4_v3d.c | 37 ++ - drivers/gpu/drm/vc4/vc4_validate.c | 900 +++++++++++++++++++++++++++++++++ - include/uapi/drm/vc4_drm.h | 141 ++++++ - 12 files changed, 3243 insertions(+), 1 deletion(-) - create mode 100644 drivers/gpu/drm/vc4/vc4_gem.c - create mode 100644 drivers/gpu/drm/vc4/vc4_irq.c - create mode 100644 drivers/gpu/drm/vc4/vc4_packet.h - create mode 100644 drivers/gpu/drm/vc4/vc4_render_cl.c - create mode 100644 drivers/gpu/drm/vc4/vc4_trace.h - create mode 100644 drivers/gpu/drm/vc4/vc4_trace_points.c - create mode 100644 drivers/gpu/drm/vc4/vc4_validate.c - -diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile -index e87a6f2..4c6a99f 100644 ---- a/drivers/gpu/drm/vc4/Makefile -+++ b/drivers/gpu/drm/vc4/Makefile -@@ -8,12 +8,19 @@ vc4-y := \ - vc4_crtc.o \ - vc4_drv.o \ - vc4_kms.o \ -+ vc4_gem.o \ - vc4_hdmi.o \ - vc4_hvs.o \ -+ vc4_irq.o \ - vc4_plane.o \ -+ vc4_render_cl.o \ -+ vc4_trace_points.o \ - vc4_v3d.o \ -+ vc4_validate.o \ - vc4_validate_shaders.o - - vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o - - obj-$(CONFIG_DRM_VC4) += vc4.o -+ -+CFLAGS_vc4_trace_points.o := -I$(src) -diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c -index db58d74..2cfee59 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.c -+++ b/drivers/gpu/drm/vc4/vc4_drv.c -@@ -74,6 +74,9 @@ static const struct file_operations vc4_drm_fops = { - }; - - static const struct drm_ioctl_desc vc4_drm_ioctls[] = { -+ DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), -+ DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), -+ DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), -@@ -83,10 +86,16 @@ static struct drm_driver vc4_drm_driver = { - .driver_features = (DRIVER_MODESET | - DRIVER_ATOMIC | - DRIVER_GEM | -+ DRIVER_HAVE_IRQ | - DRIVER_PRIME), - .lastclose = vc4_lastclose, - .preclose = vc4_drm_preclose, - -+ .irq_handler = vc4_irq, -+ .irq_preinstall = vc4_irq_preinstall, -+ .irq_postinstall = vc4_irq_postinstall, -+ .irq_uninstall = vc4_irq_uninstall, -+ - .enable_vblank = vc4_enable_vblank, - .disable_vblank = vc4_disable_vblank, - .get_vblank_counter = drm_vblank_count, -@@ -181,9 +190,11 @@ static int vc4_drm_bind(struct device *dev) - if (ret) - goto unref; - -+ vc4_gem_init(drm); -+ - ret = component_bind_all(dev, drm); - if (ret) -- goto unref; -+ goto gem_destroy; - - ret = drm_dev_register(drm, 0); - if (ret < 0) -@@ -207,6 +218,8 @@ unregister: - drm_dev_unregister(drm); - unbind_all: - component_unbind_all(dev, drm); -+gem_destroy: -+ vc4_gem_destroy(drm); - unref: - drm_dev_unref(drm); - vc4_bo_cache_destroy(drm); -diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h -index 8945463..0bc8c57 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.h -+++ b/drivers/gpu/drm/vc4/vc4_drv.h -@@ -49,6 +49,48 @@ struct vc4_dev { - - /* Protects bo_cache and the BO stats. */ - struct mutex bo_lock; -+ -+ /* Sequence number for the last job queued in job_list. -+ * Starts at 0 (no jobs emitted). -+ */ -+ uint64_t emit_seqno; -+ -+ /* Sequence number for the last completed job on the GPU. -+ * Starts at 0 (no jobs completed). -+ */ -+ uint64_t finished_seqno; -+ -+ /* List of all struct vc4_exec_info for jobs to be executed. -+ * The first job in the list is the one currently programmed -+ * into ct0ca/ct1ca for execution. -+ */ -+ struct list_head job_list; -+ /* List of the finished vc4_exec_infos waiting to be freed by -+ * job_done_work. -+ */ -+ struct list_head job_done_list; -+ /* Spinlock used to synchronize the job_list and seqno -+ * accesses between the IRQ handler and GEM ioctls. -+ */ -+ spinlock_t job_lock; -+ wait_queue_head_t job_wait_queue; -+ struct work_struct job_done_work; -+ -+ /* The binner overflow memory that's currently set up in -+ * BPOA/BPOS registers. When overflow occurs and a new one is -+ * allocated, the previous one will be moved to -+ * vc4->current_exec's free list. -+ */ -+ struct vc4_bo *overflow_mem; -+ struct work_struct overflow_mem_work; -+ -+ struct { -+ uint32_t last_ct0ca, last_ct1ca; -+ struct timer_list timer; -+ struct work_struct reset_work; -+ } hangcheck; -+ -+ struct semaphore async_modeset; - }; - - static inline struct vc4_dev * -@@ -60,6 +102,9 @@ to_vc4_dev(struct drm_device *dev) - struct vc4_bo { - struct drm_gem_cma_object base; - -+ /* seqno of the last job to render to this BO. */ -+ uint64_t seqno; -+ - /* List entry for the BO's position in either - * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list - */ -@@ -130,6 +175,101 @@ to_vc4_encoder(struct drm_encoder *encoder) - #define HVS_READ(offset) readl(vc4->hvs->regs + offset) - #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) - -+struct vc4_exec_info { -+ /* Sequence number for this bin/render job. */ -+ uint64_t seqno; -+ -+ /* Kernel-space copy of the ioctl arguments */ -+ struct drm_vc4_submit_cl *args; -+ -+ /* This is the array of BOs that were looked up at the start of exec. -+ * Command validation will use indices into this array. -+ */ -+ struct drm_gem_cma_object **bo; -+ uint32_t bo_count; -+ -+ /* Pointers for our position in vc4->job_list */ -+ struct list_head head; -+ -+ /* List of other BOs used in the job that need to be released -+ * once the job is complete. -+ */ -+ struct list_head unref_list; -+ -+ /* Current unvalidated indices into @bo loaded by the non-hardware -+ * VC4_PACKET_GEM_HANDLES. -+ */ -+ uint32_t bo_index[2]; -+ -+ /* This is the BO where we store the validated command lists, shader -+ * records, and uniforms. -+ */ -+ struct drm_gem_cma_object *exec_bo; -+ -+ /** -+ * This tracks the per-shader-record state (packet 64) that -+ * determines the length of the shader record and the offset -+ * it's expected to be found at. It gets read in from the -+ * command lists. -+ */ -+ struct vc4_shader_state { -+ uint32_t addr; -+ /* Maximum vertex index referenced by any primitive using this -+ * shader state. -+ */ -+ uint32_t max_index; -+ } *shader_state; -+ -+ /** How many shader states the user declared they were using. */ -+ uint32_t shader_state_size; -+ /** How many shader state records the validator has seen. */ -+ uint32_t shader_state_count; -+ -+ bool found_tile_binning_mode_config_packet; -+ bool found_start_tile_binning_packet; -+ bool found_increment_semaphore_packet; -+ bool found_flush; -+ uint8_t bin_tiles_x, bin_tiles_y; -+ struct drm_gem_cma_object *tile_bo; -+ uint32_t tile_alloc_offset; -+ -+ /** -+ * Computed addresses pointing into exec_bo where we start the -+ * bin thread (ct0) and render thread (ct1). -+ */ -+ uint32_t ct0ca, ct0ea; -+ uint32_t ct1ca, ct1ea; -+ -+ /* Pointer to the unvalidated bin CL (if present). */ -+ void *bin_u; -+ -+ /* Pointers to the shader recs. These paddr gets incremented as CL -+ * packets are relocated in validate_gl_shader_state, and the vaddrs -+ * (u and v) get incremented and size decremented as the shader recs -+ * themselves are validated. -+ */ -+ void *shader_rec_u; -+ void *shader_rec_v; -+ uint32_t shader_rec_p; -+ uint32_t shader_rec_size; -+ -+ /* Pointers to the uniform data. These pointers are incremented, and -+ * size decremented, as each batch of uniforms is uploaded. -+ */ -+ void *uniforms_u; -+ void *uniforms_v; -+ uint32_t uniforms_p; -+ uint32_t uniforms_size; -+}; -+ -+static inline struct vc4_exec_info * -+vc4_first_job(struct vc4_dev *vc4) -+{ -+ if (list_empty(&vc4->job_list)) -+ return NULL; -+ return list_first_entry(&vc4->job_list, struct vc4_exec_info, head); -+} -+ - /** - * struct vc4_texture_sample_info - saves the offsets into the UBO for texture - * setup parameters. -@@ -231,10 +371,31 @@ void vc4_debugfs_cleanup(struct drm_minor *minor); - /* vc4_drv.c */ - void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); - -+/* vc4_gem.c */ -+void vc4_gem_init(struct drm_device *dev); -+void vc4_gem_destroy(struct drm_device *dev); -+int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+void vc4_submit_next_job(struct drm_device *dev); -+int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, -+ uint64_t timeout_ns, bool interruptible); -+void vc4_job_handle_completed(struct vc4_dev *vc4); -+ - /* vc4_hdmi.c */ - extern struct platform_driver vc4_hdmi_driver; - int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); - -+/* vc4_irq.c */ -+irqreturn_t vc4_irq(int irq, void *arg); -+void vc4_irq_preinstall(struct drm_device *dev); -+int vc4_irq_postinstall(struct drm_device *dev); -+void vc4_irq_uninstall(struct drm_device *dev); -+void vc4_irq_reset(struct drm_device *dev); -+ - /* vc4_hvs.c */ - extern struct platform_driver vc4_hvs_driver; - void vc4_hvs_dump_state(struct drm_device *dev); -@@ -253,6 +414,27 @@ u32 vc4_plane_dlist_size(struct drm_plane_state *state); - extern struct platform_driver vc4_v3d_driver; - int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); - int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); -+int vc4_v3d_set_power(struct vc4_dev *vc4, bool on); -+ -+/* vc4_validate.c */ -+int -+vc4_validate_bin_cl(struct drm_device *dev, -+ void *validated, -+ void *unvalidated, -+ struct vc4_exec_info *exec); -+ -+int -+vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); -+ -+struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, -+ uint32_t hindex); -+ -+int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); -+ -+bool vc4_check_tex_size(struct vc4_exec_info *exec, -+ struct drm_gem_cma_object *fbo, -+ uint32_t offset, uint8_t tiling_format, -+ uint32_t width, uint32_t height, uint8_t cpp); - - /* vc4_validate_shader.c */ - struct vc4_validated_shader_info * -diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c -new file mode 100644 -index 0000000..936dddf ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_gem.c -@@ -0,0 +1,642 @@ -+/* -+ * Copyright © 2014 Broadcom -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "uapi/drm/vc4_drm.h" -+#include "vc4_drv.h" -+#include "vc4_regs.h" -+#include "vc4_trace.h" -+ -+static void -+vc4_queue_hangcheck(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ mod_timer(&vc4->hangcheck.timer, -+ round_jiffies_up(jiffies + msecs_to_jiffies(100))); -+} -+ -+static void -+vc4_reset(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ DRM_INFO("Resetting GPU.\n"); -+ vc4_v3d_set_power(vc4, false); -+ vc4_v3d_set_power(vc4, true); -+ -+ vc4_irq_reset(dev); -+ -+ /* Rearm the hangcheck -- another job might have been waiting -+ * for our hung one to get kicked off, and vc4_irq_reset() -+ * would have started it. -+ */ -+ vc4_queue_hangcheck(dev); -+} -+ -+static void -+vc4_reset_work(struct work_struct *work) -+{ -+ struct vc4_dev *vc4 = -+ container_of(work, struct vc4_dev, hangcheck.reset_work); -+ -+ vc4_reset(vc4->dev); -+} -+ -+static void -+vc4_hangcheck_elapsed(unsigned long data) -+{ -+ struct drm_device *dev = (struct drm_device *)data; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ uint32_t ct0ca, ct1ca; -+ -+ /* If idle, we can stop watching for hangs. */ -+ if (list_empty(&vc4->job_list)) -+ return; -+ -+ ct0ca = V3D_READ(V3D_CTNCA(0)); -+ ct1ca = V3D_READ(V3D_CTNCA(1)); -+ -+ /* If we've made any progress in execution, rearm the timer -+ * and wait. -+ */ -+ if (ct0ca != vc4->hangcheck.last_ct0ca || -+ ct1ca != vc4->hangcheck.last_ct1ca) { -+ vc4->hangcheck.last_ct0ca = ct0ca; -+ vc4->hangcheck.last_ct1ca = ct1ca; -+ vc4_queue_hangcheck(dev); -+ return; -+ } -+ -+ /* We've gone too long with no progress, reset. This has to -+ * be done from a work struct, since resetting can sleep and -+ * this timer hook isn't allowed to. -+ */ -+ schedule_work(&vc4->hangcheck.reset_work); -+} -+ -+static void -+submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ /* Set the current and end address of the control list. -+ * Writing the end register is what starts the job. -+ */ -+ V3D_WRITE(V3D_CTNCA(thread), start); -+ V3D_WRITE(V3D_CTNEA(thread), end); -+} -+ -+int -+vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, -+ bool interruptible) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ int ret = 0; -+ unsigned long timeout_expire; -+ DEFINE_WAIT(wait); -+ -+ if (vc4->finished_seqno >= seqno) -+ return 0; -+ -+ if (timeout_ns == 0) -+ return -ETIME; -+ -+ timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns); -+ -+ trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns); -+ for (;;) { -+ prepare_to_wait(&vc4->job_wait_queue, &wait, -+ interruptible ? TASK_INTERRUPTIBLE : -+ TASK_UNINTERRUPTIBLE); -+ -+ if (interruptible && signal_pending(current)) { -+ ret = -ERESTARTSYS; -+ break; -+ } -+ -+ if (vc4->finished_seqno >= seqno) -+ break; -+ -+ if (timeout_ns != ~0ull) { -+ if (time_after_eq(jiffies, timeout_expire)) { -+ ret = -ETIME; -+ break; -+ } -+ schedule_timeout(timeout_expire - jiffies); -+ } else { -+ schedule(); -+ } -+ } -+ -+ finish_wait(&vc4->job_wait_queue, &wait); -+ trace_vc4_wait_for_seqno_end(dev, seqno); -+ -+ if (ret && ret != -ERESTARTSYS) { -+ DRM_ERROR("timeout waiting for render thread idle\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static void -+vc4_flush_caches(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ /* Flush the GPU L2 caches. These caches sit on top of system -+ * L3 (the 128kb or so shared with the CPU), and are -+ * non-allocating in the L3. -+ */ -+ V3D_WRITE(V3D_L2CACTL, -+ V3D_L2CACTL_L2CCLR); -+ -+ V3D_WRITE(V3D_SLCACTL, -+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) | -+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) | -+ VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) | -+ VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC)); -+} -+ -+/* Sets the registers for the next job to be actually be executed in -+ * the hardware. -+ * -+ * The job_lock should be held during this. -+ */ -+void -+vc4_submit_next_job(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct vc4_exec_info *exec = vc4_first_job(vc4); -+ -+ if (!exec) -+ return; -+ -+ vc4_flush_caches(dev); -+ -+ /* Disable the binner's pre-loaded overflow memory address */ -+ V3D_WRITE(V3D_BPOA, 0); -+ V3D_WRITE(V3D_BPOS, 0); -+ -+ if (exec->ct0ca != exec->ct0ea) -+ submit_cl(dev, 0, exec->ct0ca, exec->ct0ea); -+ submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); -+} -+ -+static void -+vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) -+{ -+ struct vc4_bo *bo; -+ unsigned i; -+ -+ for (i = 0; i < exec->bo_count; i++) { -+ bo = to_vc4_bo(&exec->bo[i]->base); -+ bo->seqno = seqno; -+ } -+ -+ list_for_each_entry(bo, &exec->unref_list, unref_head) { -+ bo->seqno = seqno; -+ } -+} -+ -+/* Queues a struct vc4_exec_info for execution. If no job is -+ * currently executing, then submits it. -+ * -+ * Unlike most GPUs, our hardware only handles one command list at a -+ * time. To queue multiple jobs at once, we'd need to edit the -+ * previous command list to have a jump to the new one at the end, and -+ * then bump the end address. That's a change for a later date, -+ * though. -+ */ -+static void -+vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ uint64_t seqno; -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ -+ seqno = ++vc4->emit_seqno; -+ exec->seqno = seqno; -+ vc4_update_bo_seqnos(exec, seqno); -+ -+ list_add_tail(&exec->head, &vc4->job_list); -+ -+ /* If no job was executing, kick ours off. Otherwise, it'll -+ * get started when the previous job's frame done interrupt -+ * occurs. -+ */ -+ if (vc4_first_job(vc4) == exec) { -+ vc4_submit_next_job(dev); -+ vc4_queue_hangcheck(dev); -+ } -+ -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+} -+ -+/** -+ * Looks up a bunch of GEM handles for BOs and stores the array for -+ * use in the command validator that actually writes relocated -+ * addresses pointing to them. -+ */ -+static int -+vc4_cl_lookup_bos(struct drm_device *dev, -+ struct drm_file *file_priv, -+ struct vc4_exec_info *exec) -+{ -+ struct drm_vc4_submit_cl *args = exec->args; -+ uint32_t *handles; -+ int ret = 0; -+ int i; -+ -+ exec->bo_count = args->bo_handle_count; -+ -+ if (!exec->bo_count) { -+ /* See comment on bo_index for why we have to check -+ * this. -+ */ -+ DRM_ERROR("Rendering requires BOs to validate\n"); -+ return -EINVAL; -+ } -+ -+ exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *), -+ GFP_KERNEL); -+ if (!exec->bo) { -+ DRM_ERROR("Failed to allocate validated BO pointers\n"); -+ return -ENOMEM; -+ } -+ -+ handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t)); -+ if (!handles) { -+ DRM_ERROR("Failed to allocate incoming GEM handles\n"); -+ goto fail; -+ } -+ -+ ret = copy_from_user(handles, -+ (void __user *)(uintptr_t)args->bo_handles, -+ exec->bo_count * sizeof(uint32_t)); -+ if (ret) { -+ DRM_ERROR("Failed to copy in GEM handles\n"); -+ goto fail; -+ } -+ -+ spin_lock(&file_priv->table_lock); -+ for (i = 0; i < exec->bo_count; i++) { -+ struct drm_gem_object *bo = idr_find(&file_priv->object_idr, -+ handles[i]); -+ if (!bo) { -+ DRM_ERROR("Failed to look up GEM BO %d: %d\n", -+ i, handles[i]); -+ ret = -EINVAL; -+ spin_unlock(&file_priv->table_lock); -+ goto fail; -+ } -+ drm_gem_object_reference(bo); -+ exec->bo[i] = (struct drm_gem_cma_object *)bo; -+ } -+ spin_unlock(&file_priv->table_lock); -+ -+fail: -+ kfree(handles); -+ return 0; -+} -+ -+static int -+vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) -+{ -+ struct drm_vc4_submit_cl *args = exec->args; -+ void *temp = NULL; -+ void *bin; -+ int ret = 0; -+ uint32_t bin_offset = 0; -+ uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size, -+ 16); -+ uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size; -+ uint32_t exec_size = uniforms_offset + args->uniforms_size; -+ uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) * -+ args->shader_rec_count); -+ struct vc4_bo *bo; -+ -+ if (uniforms_offset < shader_rec_offset || -+ exec_size < uniforms_offset || -+ args->shader_rec_count >= (UINT_MAX / -+ sizeof(struct vc4_shader_state)) || -+ temp_size < exec_size) { -+ DRM_ERROR("overflow in exec arguments\n"); -+ goto fail; -+ } -+ -+ /* Allocate space where we'll store the copied in user command lists -+ * and shader records. -+ * -+ * We don't just copy directly into the BOs because we need to -+ * read the contents back for validation, and I think the -+ * bo->vaddr is uncached access. -+ */ -+ temp = kmalloc(temp_size, GFP_KERNEL); -+ if (!temp) { -+ DRM_ERROR("Failed to allocate storage for copying " -+ "in bin/render CLs.\n"); -+ ret = -ENOMEM; -+ goto fail; -+ } -+ bin = temp + bin_offset; -+ exec->shader_rec_u = temp + shader_rec_offset; -+ exec->uniforms_u = temp + uniforms_offset; -+ exec->shader_state = temp + exec_size; -+ exec->shader_state_size = args->shader_rec_count; -+ -+ ret = copy_from_user(bin, -+ (void __user *)(uintptr_t)args->bin_cl, -+ args->bin_cl_size); -+ if (ret) { -+ DRM_ERROR("Failed to copy in bin cl\n"); -+ goto fail; -+ } -+ -+ ret = copy_from_user(exec->shader_rec_u, -+ (void __user *)(uintptr_t)args->shader_rec, -+ args->shader_rec_size); -+ if (ret) { -+ DRM_ERROR("Failed to copy in shader recs\n"); -+ goto fail; -+ } -+ -+ ret = copy_from_user(exec->uniforms_u, -+ (void __user *)(uintptr_t)args->uniforms, -+ args->uniforms_size); -+ if (ret) { -+ DRM_ERROR("Failed to copy in uniforms cl\n"); -+ goto fail; -+ } -+ -+ bo = vc4_bo_create(dev, exec_size, true); -+ if (!bo) { -+ DRM_ERROR("Couldn't allocate BO for binning\n"); -+ ret = PTR_ERR(exec->exec_bo); -+ goto fail; -+ } -+ exec->exec_bo = &bo->base; -+ -+ list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head, -+ &exec->unref_list); -+ -+ exec->ct0ca = exec->exec_bo->paddr + bin_offset; -+ -+ exec->bin_u = bin; -+ -+ exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset; -+ exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset; -+ exec->shader_rec_size = args->shader_rec_size; -+ -+ exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset; -+ exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset; -+ exec->uniforms_size = args->uniforms_size; -+ -+ ret = vc4_validate_bin_cl(dev, -+ exec->exec_bo->vaddr + bin_offset, -+ bin, -+ exec); -+ if (ret) -+ goto fail; -+ -+ ret = vc4_validate_shader_recs(dev, exec); -+ -+fail: -+ kfree(temp); -+ return ret; -+} -+ -+static void -+vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) -+{ -+ unsigned i; -+ -+ /* Need the struct lock for drm_gem_object_unreference(). */ -+ mutex_lock(&dev->struct_mutex); -+ if (exec->bo) { -+ for (i = 0; i < exec->bo_count; i++) -+ drm_gem_object_unreference(&exec->bo[i]->base); -+ kfree(exec->bo); -+ } -+ -+ while (!list_empty(&exec->unref_list)) { -+ struct vc4_bo *bo = list_first_entry(&exec->unref_list, -+ struct vc4_bo, unref_head); -+ list_del(&bo->unref_head); -+ drm_gem_object_unreference(&bo->base.base); -+ } -+ mutex_unlock(&dev->struct_mutex); -+ -+ kfree(exec); -+} -+ -+void -+vc4_job_handle_completed(struct vc4_dev *vc4) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ while (!list_empty(&vc4->job_done_list)) { -+ struct vc4_exec_info *exec = -+ list_first_entry(&vc4->job_done_list, -+ struct vc4_exec_info, head); -+ list_del(&exec->head); -+ -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ vc4_complete_exec(vc4->dev, exec); -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ } -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+} -+ -+/* Scheduled when any job has been completed, this walks the list of -+ * jobs that had completed and unrefs their BOs and frees their exec -+ * structs. -+ */ -+static void -+vc4_job_done_work(struct work_struct *work) -+{ -+ struct vc4_dev *vc4 = -+ container_of(work, struct vc4_dev, job_done_work); -+ -+ vc4_job_handle_completed(vc4); -+} -+ -+static int -+vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev, -+ uint64_t seqno, -+ uint64_t *timeout_ns) -+{ -+ unsigned long start = jiffies; -+ int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true); -+ -+ if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) { -+ uint64_t delta = jiffies_to_nsecs(jiffies - start); -+ -+ if (*timeout_ns >= delta) -+ *timeout_ns -= delta; -+ } -+ -+ return ret; -+} -+ -+int -+vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_vc4_wait_seqno *args = data; -+ -+ return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, -+ &args->timeout_ns); -+} -+ -+int -+vc4_wait_bo_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ int ret; -+ struct drm_vc4_wait_bo *args = data; -+ struct drm_gem_object *gem_obj; -+ struct vc4_bo *bo; -+ -+ gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); -+ if (!gem_obj) { -+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); -+ return -EINVAL; -+ } -+ bo = to_vc4_bo(gem_obj); -+ -+ ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, -+ &args->timeout_ns); -+ -+ drm_gem_object_unreference_unlocked(gem_obj); -+ return ret; -+} -+ -+/** -+ * Submits a command list to the VC4. -+ * -+ * This is what is called batchbuffer emitting on other hardware. -+ */ -+int -+vc4_submit_cl_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct drm_vc4_submit_cl *args = data; -+ struct vc4_exec_info *exec; -+ int ret; -+ -+ if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { -+ DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); -+ return -EINVAL; -+ } -+ -+ exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); -+ if (!exec) { -+ DRM_ERROR("malloc failure on exec struct\n"); -+ return -ENOMEM; -+ } -+ -+ exec->args = args; -+ INIT_LIST_HEAD(&exec->unref_list); -+ -+ ret = vc4_cl_lookup_bos(dev, file_priv, exec); -+ if (ret) -+ goto fail; -+ -+ if (exec->args->bin_cl_size != 0) { -+ ret = vc4_get_bcl(dev, exec); -+ if (ret) -+ goto fail; -+ } else { -+ exec->ct0ca = 0; -+ exec->ct0ea = 0; -+ } -+ -+ ret = vc4_get_rcl(dev, exec); -+ if (ret) -+ goto fail; -+ -+ /* Clear this out of the struct we'll be putting in the queue, -+ * since it's part of our stack. -+ */ -+ exec->args = NULL; -+ -+ vc4_queue_submit(dev, exec); -+ -+ /* Return the seqno for our job. */ -+ args->seqno = vc4->emit_seqno; -+ -+ return 0; -+ -+fail: -+ vc4_complete_exec(vc4->dev, exec); -+ -+ return ret; -+} -+ -+void -+vc4_gem_init(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ INIT_LIST_HEAD(&vc4->job_list); -+ INIT_LIST_HEAD(&vc4->job_done_list); -+ spin_lock_init(&vc4->job_lock); -+ -+ INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work); -+ setup_timer(&vc4->hangcheck.timer, -+ vc4_hangcheck_elapsed, -+ (unsigned long)dev); -+ -+ INIT_WORK(&vc4->job_done_work, vc4_job_done_work); -+} -+ -+void -+vc4_gem_destroy(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ /* Waiting for exec to finish would need to be done before -+ * unregistering V3D. -+ */ -+ WARN_ON(vc4->emit_seqno != vc4->finished_seqno); -+ -+ /* V3D should already have disabled its interrupt and cleared -+ * the overflow allocation registers. Now free the object. -+ */ -+ if (vc4->overflow_mem) { -+ drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base); -+ vc4->overflow_mem = NULL; -+ } -+ -+ vc4_bo_cache_destroy(dev); -+} -diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c -new file mode 100644 -index 0000000..b68060e ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_irq.c -@@ -0,0 +1,210 @@ -+/* -+ * Copyright © 2014 Broadcom -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+/** DOC: Interrupt management for the V3D engine. -+ * -+ * We have an interrupt status register (V3D_INTCTL) which reports -+ * interrupts, and where writing 1 bits clears those interrupts. -+ * There are also a pair of interrupt registers -+ * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or -+ * disables that specific interrupt, and 0s written are ignored -+ * (reading either one returns the set of enabled interrupts). -+ * -+ * When we take a render frame interrupt, we need to wake the -+ * processes waiting for some frame to be done, and get the next frame -+ * submitted ASAP (so the hardware doesn't sit idle when there's work -+ * to do). -+ * -+ * When we take the binner out of memory interrupt, we need to -+ * allocate some new memory and pass it to the binner so that the -+ * current job can make progress. -+ */ -+ -+#include "vc4_drv.h" -+#include "vc4_regs.h" -+ -+#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \ -+ V3D_INT_FRDONE) -+ -+DECLARE_WAIT_QUEUE_HEAD(render_wait); -+ -+static void -+vc4_overflow_mem_work(struct work_struct *work) -+{ -+ struct vc4_dev *vc4 = -+ container_of(work, struct vc4_dev, overflow_mem_work); -+ struct drm_device *dev = vc4->dev; -+ struct vc4_bo *bo; -+ -+ bo = vc4_bo_create(dev, 256 * 1024, true); -+ if (!bo) { -+ DRM_ERROR("Couldn't allocate binner overflow mem\n"); -+ return; -+ } -+ -+ /* If there's a job executing currently, then our previous -+ * overflow allocation is getting used in that job and we need -+ * to queue it to be released when the job is done. But if no -+ * job is executing at all, then we can free the old overflow -+ * object direcctly. -+ * -+ * No lock necessary for this pointer since we're the only -+ * ones that update the pointer, and our workqueue won't -+ * reenter. -+ */ -+ if (vc4->overflow_mem) { -+ struct vc4_exec_info *current_exec; -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ current_exec = vc4_first_job(vc4); -+ if (current_exec) { -+ vc4->overflow_mem->seqno = vc4->finished_seqno + 1; -+ list_add_tail(&vc4->overflow_mem->unref_head, -+ ¤t_exec->unref_list); -+ vc4->overflow_mem = NULL; -+ } -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+ } -+ -+ if (vc4->overflow_mem) -+ drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base); -+ vc4->overflow_mem = bo; -+ -+ V3D_WRITE(V3D_BPOA, bo->base.paddr); -+ V3D_WRITE(V3D_BPOS, bo->base.base.size); -+ V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM); -+ V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM); -+} -+ -+static void -+vc4_irq_finish_job(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ struct vc4_exec_info *exec = vc4_first_job(vc4); -+ -+ if (!exec) -+ return; -+ -+ vc4->finished_seqno++; -+ list_move_tail(&exec->head, &vc4->job_done_list); -+ vc4_submit_next_job(dev); -+ -+ wake_up_all(&vc4->job_wait_queue); -+ schedule_work(&vc4->job_done_work); -+} -+ -+irqreturn_t -+vc4_irq(int irq, void *arg) -+{ -+ struct drm_device *dev = arg; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ uint32_t intctl; -+ irqreturn_t status = IRQ_NONE; -+ -+ barrier(); -+ intctl = V3D_READ(V3D_INTCTL); -+ -+ /* Acknowledge the interrupts we're handling here. The render -+ * frame done interrupt will be cleared, while OUTOMEM will -+ * stay high until the underlying cause is cleared. -+ */ -+ V3D_WRITE(V3D_INTCTL, intctl); -+ -+ if (intctl & V3D_INT_OUTOMEM) { -+ /* Disable OUTOMEM until the work is done. */ -+ V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM); -+ schedule_work(&vc4->overflow_mem_work); -+ status = IRQ_HANDLED; -+ } -+ -+ if (intctl & V3D_INT_FRDONE) { -+ spin_lock(&vc4->job_lock); -+ vc4_irq_finish_job(dev); -+ spin_unlock(&vc4->job_lock); -+ status = IRQ_HANDLED; -+ } -+ -+ return status; -+} -+ -+void -+vc4_irq_preinstall(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ init_waitqueue_head(&vc4->job_wait_queue); -+ INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work); -+ -+ /* Clear any pending interrupts someone might have left around -+ * for us. -+ */ -+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); -+} -+ -+int -+vc4_irq_postinstall(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ /* Enable both the render done and out of memory interrupts. */ -+ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); -+ -+ return 0; -+} -+ -+void -+vc4_irq_uninstall(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ /* Disable sending interrupts for our driver's IRQs. */ -+ V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS); -+ -+ /* Clear any pending interrupts we might have left. */ -+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); -+ -+ cancel_work_sync(&vc4->overflow_mem_work); -+} -+ -+/** Reinitializes interrupt registers when a GPU reset is performed. */ -+void vc4_irq_reset(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ unsigned long irqflags; -+ -+ /* Acknowledge any stale IRQs. */ -+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); -+ -+ /* -+ * Turn all our interrupts on. Binner out of memory is the -+ * only one we expect to trigger at this point, since we've -+ * just come from poweron and haven't supplied any overflow -+ * memory yet. -+ */ -+ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); -+ -+ spin_lock_irqsave(&vc4->job_lock, irqflags); -+ vc4_irq_finish_job(dev); -+ spin_unlock_irqrestore(&vc4->job_lock, irqflags); -+} -diff --git a/drivers/gpu/drm/vc4/vc4_packet.h b/drivers/gpu/drm/vc4/vc4_packet.h -new file mode 100644 -index 0000000..0f31cc0 ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_packet.h -@@ -0,0 +1,399 @@ -+/* -+ * Copyright © 2014 Broadcom -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+#ifndef VC4_PACKET_H -+#define VC4_PACKET_H -+ -+#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */ -+ -+enum vc4_packet { -+ VC4_PACKET_HALT = 0, -+ VC4_PACKET_NOP = 1, -+ -+ VC4_PACKET_FLUSH = 4, -+ VC4_PACKET_FLUSH_ALL = 5, -+ VC4_PACKET_START_TILE_BINNING = 6, -+ VC4_PACKET_INCREMENT_SEMAPHORE = 7, -+ VC4_PACKET_WAIT_ON_SEMAPHORE = 8, -+ -+ VC4_PACKET_BRANCH = 16, -+ VC4_PACKET_BRANCH_TO_SUB_LIST = 17, -+ -+ VC4_PACKET_STORE_MS_TILE_BUFFER = 24, -+ VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25, -+ VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26, -+ VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27, -+ VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28, -+ VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29, -+ -+ VC4_PACKET_GL_INDEXED_PRIMITIVE = 32, -+ VC4_PACKET_GL_ARRAY_PRIMITIVE = 33, -+ -+ VC4_PACKET_COMPRESSED_PRIMITIVE = 48, -+ VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49, -+ -+ VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56, -+ -+ VC4_PACKET_GL_SHADER_STATE = 64, -+ VC4_PACKET_NV_SHADER_STATE = 65, -+ VC4_PACKET_VG_SHADER_STATE = 66, -+ -+ VC4_PACKET_CONFIGURATION_BITS = 96, -+ VC4_PACKET_FLAT_SHADE_FLAGS = 97, -+ VC4_PACKET_POINT_SIZE = 98, -+ VC4_PACKET_LINE_WIDTH = 99, -+ VC4_PACKET_RHT_X_BOUNDARY = 100, -+ VC4_PACKET_DEPTH_OFFSET = 101, -+ VC4_PACKET_CLIP_WINDOW = 102, -+ VC4_PACKET_VIEWPORT_OFFSET = 103, -+ VC4_PACKET_Z_CLIPPING = 104, -+ VC4_PACKET_CLIPPER_XY_SCALING = 105, -+ VC4_PACKET_CLIPPER_Z_SCALING = 106, -+ -+ VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112, -+ VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113, -+ VC4_PACKET_CLEAR_COLORS = 114, -+ VC4_PACKET_TILE_COORDINATES = 115, -+ -+ /* Not an actual hardware packet -- this is what we use to put -+ * references to GEM bos in the command stream, since we need the u32 -+ * int the actual address packet in order to store the offset from the -+ * start of the BO. -+ */ -+ VC4_PACKET_GEM_HANDLES = 254, -+} __attribute__ ((__packed__)); -+ -+#define VC4_PACKET_HALT_SIZE 1 -+#define VC4_PACKET_NOP_SIZE 1 -+#define VC4_PACKET_FLUSH_SIZE 1 -+#define VC4_PACKET_FLUSH_ALL_SIZE 1 -+#define VC4_PACKET_START_TILE_BINNING_SIZE 1 -+#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1 -+#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1 -+#define VC4_PACKET_BRANCH_SIZE 5 -+#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5 -+#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1 -+#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1 -+#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5 -+#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5 -+#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7 -+#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7 -+#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14 -+#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10 -+#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1 -+#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1 -+#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2 -+#define VC4_PACKET_GL_SHADER_STATE_SIZE 5 -+#define VC4_PACKET_NV_SHADER_STATE_SIZE 5 -+#define VC4_PACKET_VG_SHADER_STATE_SIZE 5 -+#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4 -+#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5 -+#define VC4_PACKET_POINT_SIZE_SIZE 5 -+#define VC4_PACKET_LINE_WIDTH_SIZE 5 -+#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3 -+#define VC4_PACKET_DEPTH_OFFSET_SIZE 5 -+#define VC4_PACKET_CLIP_WINDOW_SIZE 9 -+#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5 -+#define VC4_PACKET_Z_CLIPPING_SIZE 9 -+#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9 -+#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9 -+#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16 -+#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11 -+#define VC4_PACKET_CLEAR_COLORS_SIZE 14 -+#define VC4_PACKET_TILE_COORDINATES_SIZE 3 -+#define VC4_PACKET_GEM_HANDLES_SIZE 9 -+ -+/* Number of multisamples supported. */ -+#define VC4_MAX_SAMPLES 4 -+/* Size of a full resolution color or Z tile buffer load/store. */ -+#define VC4_TILE_BUFFER_SIZE (64 * 64 * 4) -+ -+/** @{ -+ * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and -+ * VC4_PACKET_TILE_RENDERING_MODE_CONFIG. -+*/ -+#define VC4_TILING_FORMAT_LINEAR 0 -+#define VC4_TILING_FORMAT_T 1 -+#define VC4_TILING_FORMAT_LT 2 -+/** @} */ -+ -+/** @{ -+ * -+ * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and -+ * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER. -+ */ -+#define VC4_LOADSTORE_FULL_RES_EOF BIT(3) -+#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2) -+#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1) -+#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0) -+ -+/** @{ -+ * -+ * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and -+ * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER. -+ */ -+#define VC4_LOADSTORE_FULL_RES_EOF BIT(3) -+#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2) -+#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1) -+#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0) -+ -+/** @{ -+ * -+ * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and -+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address) -+ */ -+ -+#define VC4_LOADSTORE_TILE_BUFFER_EOF BIT(3) -+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK BIT(2) -+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS BIT(1) -+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR BIT(0) -+ -+/** @} */ -+ -+/** @{ -+ * -+ * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and -+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL -+ */ -+#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR BIT(15) -+#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR BIT(14) -+#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR BIT(13) -+#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP BIT(12) -+ -+#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8) -+#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8 -+#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0 -+#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1 -+#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2 -+/** @} */ -+ -+/** @{ -+ * -+ * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and -+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL -+ */ -+#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6) -+#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6 -+#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6) -+#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6) -+#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6) -+ -+/** The values of the field are VC4_TILING_FORMAT_* */ -+#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4) -+#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4 -+ -+#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0) -+#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0 -+#define VC4_LOADSTORE_TILE_BUFFER_NONE 0 -+#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1 -+#define VC4_LOADSTORE_TILE_BUFFER_ZS 2 -+#define VC4_LOADSTORE_TILE_BUFFER_Z 3 -+#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4 -+#define VC4_LOADSTORE_TILE_BUFFER_FULL 5 -+/** @} */ -+ -+#define VC4_INDEX_BUFFER_U8 (0 << 4) -+#define VC4_INDEX_BUFFER_U16 (1 << 4) -+ -+/* This flag is only present in NV shader state. */ -+#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS BIT(3) -+#define VC4_SHADER_FLAG_ENABLE_CLIPPING BIT(2) -+#define VC4_SHADER_FLAG_VS_POINT_SIZE BIT(1) -+#define VC4_SHADER_FLAG_FS_SINGLE_THREAD BIT(0) -+ -+/** @{ byte 2 of config bits. */ -+#define VC4_CONFIG_BITS_EARLY_Z_UPDATE BIT(1) -+#define VC4_CONFIG_BITS_EARLY_Z BIT(0) -+/** @} */ -+ -+/** @{ byte 1 of config bits. */ -+#define VC4_CONFIG_BITS_Z_UPDATE BIT(7) -+/** same values in this 3-bit field as PIPE_FUNC_* */ -+#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4 -+#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE BIT(3) -+ -+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1) -+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1) -+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1) -+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1) -+ -+#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT BIT(0) -+/** @} */ -+ -+/** @{ byte 0 of config bits. */ -+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6) -+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6) -+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6) -+ -+#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES BIT(4) -+#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET BIT(3) -+#define VC4_CONFIG_BITS_CW_PRIMITIVES BIT(2) -+#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK BIT(1) -+#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT BIT(0) -+/** @} */ -+ -+/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */ -+#define VC4_BIN_CONFIG_DB_NON_MS BIT(7) -+ -+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5) -+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5 -+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0 -+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1 -+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2 -+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3 -+ -+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3) -+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3 -+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0 -+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1 -+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2 -+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3 -+ -+#define VC4_BIN_CONFIG_AUTO_INIT_TSDA BIT(2) -+#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT BIT(1) -+#define VC4_BIN_CONFIG_MS_MODE_4X BIT(0) -+/** @} */ -+ -+/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */ -+#define VC4_RENDER_CONFIG_DB_NON_MS BIT(12) -+#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE BIT(11) -+#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G BIT(10) -+#define VC4_RENDER_CONFIG_COVERAGE_MODE BIT(9) -+#define VC4_RENDER_CONFIG_ENABLE_VG_MASK BIT(8) -+ -+/** The values of the field are VC4_TILING_FORMAT_* */ -+#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6) -+#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6 -+ -+#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4) -+#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4) -+#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4) -+ -+#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2) -+#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2 -+#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0 -+#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1 -+#define VC4_RENDER_CONFIG_FORMAT_BGR565 2 -+ -+#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT BIT(1) -+#define VC4_RENDER_CONFIG_MS_MODE_4X BIT(0) -+ -+#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4) -+#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4) -+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0) -+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0) -+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0) -+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0) -+ -+enum vc4_texture_data_type { -+ VC4_TEXTURE_TYPE_RGBA8888 = 0, -+ VC4_TEXTURE_TYPE_RGBX8888 = 1, -+ VC4_TEXTURE_TYPE_RGBA4444 = 2, -+ VC4_TEXTURE_TYPE_RGBA5551 = 3, -+ VC4_TEXTURE_TYPE_RGB565 = 4, -+ VC4_TEXTURE_TYPE_LUMINANCE = 5, -+ VC4_TEXTURE_TYPE_ALPHA = 6, -+ VC4_TEXTURE_TYPE_LUMALPHA = 7, -+ VC4_TEXTURE_TYPE_ETC1 = 8, -+ VC4_TEXTURE_TYPE_S16F = 9, -+ VC4_TEXTURE_TYPE_S8 = 10, -+ VC4_TEXTURE_TYPE_S16 = 11, -+ VC4_TEXTURE_TYPE_BW1 = 12, -+ VC4_TEXTURE_TYPE_A4 = 13, -+ VC4_TEXTURE_TYPE_A1 = 14, -+ VC4_TEXTURE_TYPE_RGBA64 = 15, -+ VC4_TEXTURE_TYPE_RGBA32R = 16, -+ VC4_TEXTURE_TYPE_YUV422R = 17, -+}; -+ -+#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12) -+#define VC4_TEX_P0_OFFSET_SHIFT 12 -+#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10) -+#define VC4_TEX_P0_CSWIZ_SHIFT 10 -+#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9) -+#define VC4_TEX_P0_CMMODE_SHIFT 9 -+#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8) -+#define VC4_TEX_P0_FLIPY_SHIFT 8 -+#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4) -+#define VC4_TEX_P0_TYPE_SHIFT 4 -+#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0) -+#define VC4_TEX_P0_MIPLVLS_SHIFT 0 -+ -+#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31) -+#define VC4_TEX_P1_TYPE4_SHIFT 31 -+#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20) -+#define VC4_TEX_P1_HEIGHT_SHIFT 20 -+#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19) -+#define VC4_TEX_P1_ETCFLIP_SHIFT 19 -+#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8) -+#define VC4_TEX_P1_WIDTH_SHIFT 8 -+ -+#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7) -+#define VC4_TEX_P1_MAGFILT_SHIFT 7 -+# define VC4_TEX_P1_MAGFILT_LINEAR 0 -+# define VC4_TEX_P1_MAGFILT_NEAREST 1 -+ -+#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4) -+#define VC4_TEX_P1_MINFILT_SHIFT 4 -+# define VC4_TEX_P1_MINFILT_LINEAR 0 -+# define VC4_TEX_P1_MINFILT_NEAREST 1 -+# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2 -+# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3 -+# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4 -+# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5 -+ -+#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2) -+#define VC4_TEX_P1_WRAP_T_SHIFT 2 -+#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0) -+#define VC4_TEX_P1_WRAP_S_SHIFT 0 -+# define VC4_TEX_P1_WRAP_REPEAT 0 -+# define VC4_TEX_P1_WRAP_CLAMP 1 -+# define VC4_TEX_P1_WRAP_MIRROR 2 -+# define VC4_TEX_P1_WRAP_BORDER 3 -+ -+#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30) -+#define VC4_TEX_P2_PTYPE_SHIFT 30 -+# define VC4_TEX_P2_PTYPE_IGNORED 0 -+# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1 -+# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2 -+# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3 -+ -+/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */ -+#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12) -+#define VC4_TEX_P2_CMST_SHIFT 12 -+#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0) -+#define VC4_TEX_P2_BSLOD_SHIFT 0 -+ -+/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */ -+#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12) -+#define VC4_TEX_P2_CHEIGHT_SHIFT 12 -+#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0) -+#define VC4_TEX_P2_CWIDTH_SHIFT 0 -+ -+/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */ -+#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12) -+#define VC4_TEX_P2_CYOFF_SHIFT 12 -+#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0) -+#define VC4_TEX_P2_CXOFF_SHIFT 0 -+ -+#endif /* VC4_PACKET_H */ -diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c -new file mode 100644 -index 0000000..8a2a312 ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c -@@ -0,0 +1,634 @@ -+/* -+ * Copyright © 2014-2015 Broadcom -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+/** -+ * DOC: Render command list generation -+ * -+ * In the VC4 driver, render command list generation is performed by the -+ * kernel instead of userspace. We do this because validating a -+ * user-submitted command list is hard to get right and has high CPU overhead, -+ * while the number of valid configurations for render command lists is -+ * actually fairly low. -+ */ -+ -+#include "uapi/drm/vc4_drm.h" -+#include "vc4_drv.h" -+#include "vc4_packet.h" -+ -+struct vc4_rcl_setup { -+ struct drm_gem_cma_object *color_read; -+ struct drm_gem_cma_object *color_write; -+ struct drm_gem_cma_object *zs_read; -+ struct drm_gem_cma_object *zs_write; -+ struct drm_gem_cma_object *msaa_color_write; -+ struct drm_gem_cma_object *msaa_zs_write; -+ -+ struct drm_gem_cma_object *rcl; -+ u32 next_offset; -+}; -+ -+static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val) -+{ -+ *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val; -+ setup->next_offset += 1; -+} -+ -+static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val) -+{ -+ *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val; -+ setup->next_offset += 2; -+} -+ -+static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val) -+{ -+ *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val; -+ setup->next_offset += 4; -+} -+ -+/* -+ * Emits a no-op STORE_TILE_BUFFER_GENERAL. -+ * -+ * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of -+ * some sort before another load is triggered. -+ */ -+static void vc4_store_before_load(struct vc4_rcl_setup *setup) -+{ -+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); -+ rcl_u16(setup, -+ VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE, -+ VC4_LOADSTORE_TILE_BUFFER_BUFFER) | -+ VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR | -+ VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR | -+ VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR); -+ rcl_u32(setup, 0); /* no address, since we're in None mode */ -+} -+ -+/* -+ * Calculates the physical address of the start of a tile in a RCL surface. -+ * -+ * Unlike the other load/store packets, -+ * VC4_PACKET_LOAD/STORE_FULL_RES_TILE_BUFFER don't look at the tile -+ * coordinates packet, and instead just store to the address given. -+ */ -+static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec, -+ struct drm_gem_cma_object *bo, -+ struct drm_vc4_submit_rcl_surface *surf, -+ uint8_t x, uint8_t y) -+{ -+ return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE * -+ (DIV_ROUND_UP(exec->args->width, 32) * y + x); -+} -+ -+/* -+ * Emits a PACKET_TILE_COORDINATES if one isn't already pending. -+ * -+ * The tile coordinates packet triggers a pending load if there is one, are -+ * used for clipping during rendering, and determine where loads/stores happen -+ * relative to their base address. -+ */ -+static void vc4_tile_coordinates(struct vc4_rcl_setup *setup, -+ uint32_t x, uint32_t y) -+{ -+ rcl_u8(setup, VC4_PACKET_TILE_COORDINATES); -+ rcl_u8(setup, x); -+ rcl_u8(setup, y); -+} -+ -+static void emit_tile(struct vc4_exec_info *exec, -+ struct vc4_rcl_setup *setup, -+ uint8_t x, uint8_t y, bool first, bool last) -+{ -+ struct drm_vc4_submit_cl *args = exec->args; -+ bool has_bin = args->bin_cl_size != 0; -+ -+ /* Note that the load doesn't actually occur until the -+ * tile coords packet is processed, and only one load -+ * may be outstanding at a time. -+ */ -+ if (setup->color_read) { -+ if (args->color_read.flags & -+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { -+ rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER); -+ rcl_u32(setup, -+ vc4_full_res_offset(exec, setup->color_read, -+ &args->color_read, x, y) | -+ VC4_LOADSTORE_FULL_RES_DISABLE_ZS); -+ } else { -+ rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); -+ rcl_u16(setup, args->color_read.bits); -+ rcl_u32(setup, setup->color_read->paddr + -+ args->color_read.offset); -+ } -+ } -+ -+ if (setup->zs_read) { -+ if (args->zs_read.flags & -+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { -+ rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER); -+ rcl_u32(setup, -+ vc4_full_res_offset(exec, setup->zs_read, -+ &args->zs_read, x, y) | -+ VC4_LOADSTORE_FULL_RES_DISABLE_COLOR); -+ } else { -+ if (setup->color_read) { -+ /* Exec previous load. */ -+ vc4_tile_coordinates(setup, x, y); -+ vc4_store_before_load(setup); -+ } -+ -+ rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); -+ rcl_u16(setup, args->zs_read.bits); -+ rcl_u32(setup, setup->zs_read->paddr + -+ args->zs_read.offset); -+ } -+ } -+ -+ /* Clipping depends on tile coordinates having been -+ * emitted, so we always need one here. -+ */ -+ vc4_tile_coordinates(setup, x, y); -+ -+ /* Wait for the binner before jumping to the first -+ * tile's lists. -+ */ -+ if (first && has_bin) -+ rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE); -+ -+ if (has_bin) { -+ rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST); -+ rcl_u32(setup, (exec->tile_bo->paddr + -+ exec->tile_alloc_offset + -+ (y * exec->bin_tiles_x + x) * 32)); -+ } -+ -+ if (setup->msaa_color_write) { -+ bool last_tile_write = (!setup->msaa_zs_write && -+ !setup->zs_write && -+ !setup->color_write); -+ uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_ZS; -+ -+ if (!last_tile_write) -+ bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL; -+ else if (last) -+ bits |= VC4_LOADSTORE_FULL_RES_EOF; -+ rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER); -+ rcl_u32(setup, -+ vc4_full_res_offset(exec, setup->msaa_color_write, -+ &args->msaa_color_write, x, y) | -+ bits); -+ } -+ -+ if (setup->msaa_zs_write) { -+ bool last_tile_write = (!setup->zs_write && -+ !setup->color_write); -+ uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_COLOR; -+ -+ if (setup->msaa_color_write) -+ vc4_tile_coordinates(setup, x, y); -+ if (!last_tile_write) -+ bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL; -+ else if (last) -+ bits |= VC4_LOADSTORE_FULL_RES_EOF; -+ rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER); -+ rcl_u32(setup, -+ vc4_full_res_offset(exec, setup->msaa_zs_write, -+ &args->msaa_zs_write, x, y) | -+ bits); -+ } -+ -+ if (setup->zs_write) { -+ bool last_tile_write = !setup->color_write; -+ -+ if (setup->msaa_color_write || setup->msaa_zs_write) -+ vc4_tile_coordinates(setup, x, y); -+ -+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); -+ rcl_u16(setup, args->zs_write.bits | -+ (last_tile_write ? -+ 0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR)); -+ rcl_u32(setup, -+ (setup->zs_write->paddr + args->zs_write.offset) | -+ ((last && last_tile_write) ? -+ VC4_LOADSTORE_TILE_BUFFER_EOF : 0)); -+ } -+ -+ if (setup->color_write) { -+ if (setup->msaa_color_write || setup->msaa_zs_write || -+ setup->zs_write) { -+ vc4_tile_coordinates(setup, x, y); -+ } -+ -+ if (last) -+ rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF); -+ else -+ rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER); -+ } -+} -+ -+static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, -+ struct vc4_rcl_setup *setup) -+{ -+ struct drm_vc4_submit_cl *args = exec->args; -+ bool has_bin = args->bin_cl_size != 0; -+ uint8_t min_x_tile = args->min_x_tile; -+ uint8_t min_y_tile = args->min_y_tile; -+ uint8_t max_x_tile = args->max_x_tile; -+ uint8_t max_y_tile = args->max_y_tile; -+ uint8_t xtiles = max_x_tile - min_x_tile + 1; -+ uint8_t ytiles = max_y_tile - min_y_tile + 1; -+ uint8_t x, y; -+ uint32_t size, loop_body_size; -+ -+ size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE; -+ loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE; -+ -+ if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) { -+ size += VC4_PACKET_CLEAR_COLORS_SIZE + -+ VC4_PACKET_TILE_COORDINATES_SIZE + -+ VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; -+ } -+ -+ if (setup->color_read) { -+ if (args->color_read.flags & -+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { -+ loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE; -+ } else { -+ loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE; -+ } -+ } -+ if (setup->zs_read) { -+ if (args->zs_read.flags & -+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { -+ loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE; -+ } else { -+ if (setup->color_read && -+ !(args->color_read.flags & -+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) { -+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE; -+ loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; -+ } -+ loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE; -+ } -+ } -+ -+ if (has_bin) { -+ size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE; -+ loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE; -+ } -+ -+ if (setup->msaa_color_write) -+ loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE; -+ if (setup->msaa_zs_write) -+ loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE; -+ -+ if (setup->zs_write) -+ loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; -+ if (setup->color_write) -+ loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE; -+ -+ /* We need a VC4_PACKET_TILE_COORDINATES in between each store. */ -+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE * -+ ((setup->msaa_color_write != NULL) + -+ (setup->msaa_zs_write != NULL) + -+ (setup->color_write != NULL) + -+ (setup->zs_write != NULL) - 1); -+ -+ size += xtiles * ytiles * loop_body_size; -+ -+ setup->rcl = &vc4_bo_create(dev, size, true)->base; -+ if (!setup->rcl) -+ return -ENOMEM; -+ list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, -+ &exec->unref_list); -+ -+ rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG); -+ rcl_u32(setup, -+ (setup->color_write ? (setup->color_write->paddr + -+ args->color_write.offset) : -+ 0)); -+ rcl_u16(setup, args->width); -+ rcl_u16(setup, args->height); -+ rcl_u16(setup, args->color_write.bits); -+ -+ /* The tile buffer gets cleared when the previous tile is stored. If -+ * the clear values changed between frames, then the tile buffer has -+ * stale clear values in it, so we have to do a store in None mode (no -+ * writes) so that we trigger the tile buffer clear. -+ */ -+ if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) { -+ rcl_u8(setup, VC4_PACKET_CLEAR_COLORS); -+ rcl_u32(setup, args->clear_color[0]); -+ rcl_u32(setup, args->clear_color[1]); -+ rcl_u32(setup, args->clear_z); -+ rcl_u8(setup, args->clear_s); -+ -+ vc4_tile_coordinates(setup, 0, 0); -+ -+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); -+ rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE); -+ rcl_u32(setup, 0); /* no address, since we're in None mode */ -+ } -+ -+ for (y = min_y_tile; y <= max_y_tile; y++) { -+ for (x = min_x_tile; x <= max_x_tile; x++) { -+ bool first = (x == min_x_tile && y == min_y_tile); -+ bool last = (x == max_x_tile && y == max_y_tile); -+ -+ emit_tile(exec, setup, x, y, first, last); -+ } -+ } -+ -+ BUG_ON(setup->next_offset != size); -+ exec->ct1ca = setup->rcl->paddr; -+ exec->ct1ea = setup->rcl->paddr + setup->next_offset; -+ -+ return 0; -+} -+ -+static int vc4_full_res_bounds_check(struct vc4_exec_info *exec, -+ struct drm_gem_cma_object *obj, -+ struct drm_vc4_submit_rcl_surface *surf) -+{ -+ struct drm_vc4_submit_cl *args = exec->args; -+ u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32); -+ -+ if (surf->offset > obj->base.size) { -+ DRM_ERROR("surface offset %d > BO size %zd\n", -+ surf->offset, obj->base.size); -+ return -EINVAL; -+ } -+ -+ if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE < -+ render_tiles_stride * args->max_y_tile + args->max_x_tile) { -+ DRM_ERROR("MSAA tile %d, %d out of bounds " -+ "(bo size %zd, offset %d).\n", -+ args->max_x_tile, args->max_y_tile, -+ obj->base.size, -+ surf->offset); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, -+ struct drm_gem_cma_object **obj, -+ struct drm_vc4_submit_rcl_surface *surf) -+{ -+ if (surf->flags != 0 || surf->bits != 0) { -+ DRM_ERROR("MSAA surface had nonzero flags/bits\n"); -+ return -EINVAL; -+ } -+ -+ if (surf->hindex == ~0) -+ return 0; -+ -+ *obj = vc4_use_bo(exec, surf->hindex); -+ if (!*obj) -+ return -EINVAL; -+ -+ if (surf->offset & 0xf) { -+ DRM_ERROR("MSAA write must be 16b aligned.\n"); -+ return -EINVAL; -+ } -+ -+ return vc4_full_res_bounds_check(exec, *obj, surf); -+} -+ -+static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, -+ struct drm_gem_cma_object **obj, -+ struct drm_vc4_submit_rcl_surface *surf) -+{ -+ uint8_t tiling = VC4_GET_FIELD(surf->bits, -+ VC4_LOADSTORE_TILE_BUFFER_TILING); -+ uint8_t buffer = VC4_GET_FIELD(surf->bits, -+ VC4_LOADSTORE_TILE_BUFFER_BUFFER); -+ uint8_t format = VC4_GET_FIELD(surf->bits, -+ VC4_LOADSTORE_TILE_BUFFER_FORMAT); -+ int cpp; -+ int ret; -+ -+ if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { -+ DRM_ERROR("Extra flags set\n"); -+ return -EINVAL; -+ } -+ -+ if (surf->hindex == ~0) -+ return 0; -+ -+ *obj = vc4_use_bo(exec, surf->hindex); -+ if (!*obj) -+ return -EINVAL; -+ -+ if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { -+ if (surf == &exec->args->zs_write) { -+ DRM_ERROR("general zs write may not be a full-res.\n"); -+ return -EINVAL; -+ } -+ -+ if (surf->bits != 0) { -+ DRM_ERROR("load/store general bits set with " -+ "full res load/store.\n"); -+ return -EINVAL; -+ } -+ -+ ret = vc4_full_res_bounds_check(exec, *obj, surf); -+ if (!ret) -+ return ret; -+ -+ return 0; -+ } -+ -+ if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK | -+ VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK | -+ VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) { -+ DRM_ERROR("Unknown bits in load/store: 0x%04x\n", -+ surf->bits); -+ return -EINVAL; -+ } -+ -+ if (tiling > VC4_TILING_FORMAT_LT) { -+ DRM_ERROR("Bad tiling format\n"); -+ return -EINVAL; -+ } -+ -+ if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) { -+ if (format != 0) { -+ DRM_ERROR("No color format should be set for ZS\n"); -+ return -EINVAL; -+ } -+ cpp = 4; -+ } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) { -+ switch (format) { -+ case VC4_LOADSTORE_TILE_BUFFER_BGR565: -+ case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER: -+ cpp = 2; -+ break; -+ case VC4_LOADSTORE_TILE_BUFFER_RGBA8888: -+ cpp = 4; -+ break; -+ default: -+ DRM_ERROR("Bad tile buffer format\n"); -+ return -EINVAL; -+ } -+ } else { -+ DRM_ERROR("Bad load/store buffer %d.\n", buffer); -+ return -EINVAL; -+ } -+ -+ if (surf->offset & 0xf) { -+ DRM_ERROR("load/store buffer must be 16b aligned.\n"); -+ return -EINVAL; -+ } -+ -+ if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling, -+ exec->args->width, exec->args->height, cpp)) { -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int -+vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, -+ struct vc4_rcl_setup *setup, -+ struct drm_gem_cma_object **obj, -+ struct drm_vc4_submit_rcl_surface *surf) -+{ -+ uint8_t tiling = VC4_GET_FIELD(surf->bits, -+ VC4_RENDER_CONFIG_MEMORY_FORMAT); -+ uint8_t format = VC4_GET_FIELD(surf->bits, -+ VC4_RENDER_CONFIG_FORMAT); -+ int cpp; -+ -+ if (surf->flags != 0) { -+ DRM_ERROR("No flags supported on render config.\n"); -+ return -EINVAL; -+ } -+ -+ if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK | -+ VC4_RENDER_CONFIG_FORMAT_MASK | -+ VC4_RENDER_CONFIG_MS_MODE_4X | -+ VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) { -+ DRM_ERROR("Unknown bits in render config: 0x%04x\n", -+ surf->bits); -+ return -EINVAL; -+ } -+ -+ if (surf->hindex == ~0) -+ return 0; -+ -+ *obj = vc4_use_bo(exec, surf->hindex); -+ if (!*obj) -+ return -EINVAL; -+ -+ if (tiling > VC4_TILING_FORMAT_LT) { -+ DRM_ERROR("Bad tiling format\n"); -+ return -EINVAL; -+ } -+ -+ switch (format) { -+ case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED: -+ case VC4_RENDER_CONFIG_FORMAT_BGR565: -+ cpp = 2; -+ break; -+ case VC4_RENDER_CONFIG_FORMAT_RGBA8888: -+ cpp = 4; -+ break; -+ default: -+ DRM_ERROR("Bad tile buffer format\n"); -+ return -EINVAL; -+ } -+ -+ if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling, -+ exec->args->width, exec->args->height, cpp)) { -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) -+{ -+ struct vc4_rcl_setup setup = {0}; -+ struct drm_vc4_submit_cl *args = exec->args; -+ bool has_bin = args->bin_cl_size != 0; -+ int ret; -+ -+ if (args->min_x_tile > args->max_x_tile || -+ args->min_y_tile > args->max_y_tile) { -+ DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n", -+ args->min_x_tile, args->min_y_tile, -+ args->max_x_tile, args->max_y_tile); -+ return -EINVAL; -+ } -+ -+ if (has_bin && -+ (args->max_x_tile > exec->bin_tiles_x || -+ args->max_y_tile > exec->bin_tiles_y)) { -+ DRM_ERROR("Render tiles (%d,%d) outside of bin config " -+ "(%d,%d)\n", -+ args->max_x_tile, args->max_y_tile, -+ exec->bin_tiles_x, exec->bin_tiles_y); -+ return -EINVAL; -+ } -+ -+ ret = vc4_rcl_render_config_surface_setup(exec, &setup, -+ &setup.color_write, -+ &args->color_write); -+ if (ret) -+ return ret; -+ -+ ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read); -+ if (ret) -+ return ret; -+ -+ ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read); -+ if (ret) -+ return ret; -+ -+ ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write); -+ if (ret) -+ return ret; -+ -+ ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_color_write, -+ &args->msaa_color_write); -+ if (ret) -+ return ret; -+ -+ ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_zs_write, -+ &args->msaa_zs_write); -+ if (ret) -+ return ret; -+ -+ /* We shouldn't even have the job submitted to us if there's no -+ * surface to write out. -+ */ -+ if (!setup.color_write && !setup.zs_write && -+ !setup.msaa_color_write && !setup.msaa_zs_write) { -+ DRM_ERROR("RCL requires color or Z/S write\n"); -+ return -EINVAL; -+ } -+ -+ return vc4_create_rcl_bo(dev, exec, &setup); -+} -diff --git a/drivers/gpu/drm/vc4/vc4_trace.h b/drivers/gpu/drm/vc4/vc4_trace.h -new file mode 100644 -index 0000000..ad7b1ea ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_trace.h -@@ -0,0 +1,63 @@ -+/* -+ * Copyright (C) 2015 Broadcom -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#if !defined(_VC4_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) -+#define _VC4_TRACE_H_ -+ -+#include -+#include -+#include -+ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM vc4 -+#define TRACE_INCLUDE_FILE vc4_trace -+ -+TRACE_EVENT(vc4_wait_for_seqno_begin, -+ TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout), -+ TP_ARGS(dev, seqno, timeout), -+ -+ TP_STRUCT__entry( -+ __field(u32, dev) -+ __field(u64, seqno) -+ __field(u64, timeout) -+ ), -+ -+ TP_fast_assign( -+ __entry->dev = dev->primary->index; -+ __entry->seqno = seqno; -+ __entry->timeout = timeout; -+ ), -+ -+ TP_printk("dev=%u, seqno=%llu, timeout=%llu", -+ __entry->dev, __entry->seqno, __entry->timeout) -+); -+ -+TRACE_EVENT(vc4_wait_for_seqno_end, -+ TP_PROTO(struct drm_device *dev, uint64_t seqno), -+ TP_ARGS(dev, seqno), -+ -+ TP_STRUCT__entry( -+ __field(u32, dev) -+ __field(u64, seqno) -+ ), -+ -+ TP_fast_assign( -+ __entry->dev = dev->primary->index; -+ __entry->seqno = seqno; -+ ), -+ -+ TP_printk("dev=%u, seqno=%llu", -+ __entry->dev, __entry->seqno) -+); -+ -+#endif /* _VC4_TRACE_H_ */ -+ -+/* This part must be outside protection */ -+#undef TRACE_INCLUDE_PATH -+#define TRACE_INCLUDE_PATH . -+#include -diff --git a/drivers/gpu/drm/vc4/vc4_trace_points.c b/drivers/gpu/drm/vc4/vc4_trace_points.c -new file mode 100644 -index 0000000..e6278f2 ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_trace_points.c -@@ -0,0 +1,14 @@ -+/* -+ * Copyright (C) 2015 Broadcom -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#include "vc4_drv.h" -+ -+#ifndef __CHECKER__ -+#define CREATE_TRACE_POINTS -+#include "vc4_trace.h" -+#endif -diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c -index 040ad0d..424d515 100644 ---- a/drivers/gpu/drm/vc4/vc4_v3d.c -+++ b/drivers/gpu/drm/vc4/vc4_v3d.c -@@ -144,6 +144,21 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) - } - #endif /* CONFIG_DEBUG_FS */ - -+/* -+ * Asks the firmware to turn on power to the V3D engine. -+ * -+ * This may be doable with just the clocks interface, though this -+ * packet does some other register setup from the firmware, too. -+ */ -+int -+vc4_v3d_set_power(struct vc4_dev *vc4, bool on) -+{ -+ if (on) -+ return pm_generic_poweroff(&vc4->v3d->pdev->dev); -+ else -+ return pm_generic_resume(&vc4->v3d->pdev->dev); -+} -+ - static void vc4_v3d_init_hw(struct drm_device *dev) - { - struct vc4_dev *vc4 = to_vc4_dev(dev); -@@ -161,6 +176,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); - struct vc4_v3d *v3d = NULL; -+ int ret; - - v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL); - if (!v3d) -@@ -180,8 +196,20 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) - return -EINVAL; - } - -+ /* Reset the binner overflow address/size at setup, to be sure -+ * we don't reuse an old one. -+ */ -+ V3D_WRITE(V3D_BPOA, 0); -+ V3D_WRITE(V3D_BPOS, 0); -+ - vc4_v3d_init_hw(drm); - -+ ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); -+ if (ret) { -+ DRM_ERROR("Failed to install IRQ handler\n"); -+ return ret; -+ } -+ - return 0; - } - -@@ -191,6 +219,15 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master, - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); - -+ drm_irq_uninstall(drm); -+ -+ /* Disable the binner's overflow memory address, so the next -+ * driver probe (if any) doesn't try to reuse our old -+ * allocation. -+ */ -+ V3D_WRITE(V3D_BPOA, 0); -+ V3D_WRITE(V3D_BPOS, 0); -+ - vc4->v3d = NULL; - } - -diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c -new file mode 100644 -index 0000000..0fb5b99 ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_validate.c -@@ -0,0 +1,900 @@ -+/* -+ * Copyright © 2014 Broadcom -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ */ -+ -+/** -+ * Command list validator for VC4. -+ * -+ * The VC4 has no IOMMU between it and system memory. So, a user with -+ * access to execute command lists could escalate privilege by -+ * overwriting system memory (drawing to it as a framebuffer) or -+ * reading system memory it shouldn't (reading it as a texture, or -+ * uniform data, or vertex data). -+ * -+ * This validates command lists to ensure that all accesses are within -+ * the bounds of the GEM objects referenced. It explicitly whitelists -+ * packets, and looks at the offsets in any address fields to make -+ * sure they're constrained within the BOs they reference. -+ * -+ * Note that because of the validation that's happening anyway, this -+ * is where GEM relocation processing happens. -+ */ -+ -+#include "uapi/drm/vc4_drm.h" -+#include "vc4_drv.h" -+#include "vc4_packet.h" -+ -+#define VALIDATE_ARGS \ -+ struct vc4_exec_info *exec, \ -+ void *validated, \ -+ void *untrusted -+ -+/** Return the width in pixels of a 64-byte microtile. */ -+static uint32_t -+utile_width(int cpp) -+{ -+ switch (cpp) { -+ case 1: -+ case 2: -+ return 8; -+ case 4: -+ return 4; -+ case 8: -+ return 2; -+ default: -+ DRM_ERROR("unknown cpp: %d\n", cpp); -+ return 1; -+ } -+} -+ -+/** Return the height in pixels of a 64-byte microtile. */ -+static uint32_t -+utile_height(int cpp) -+{ -+ switch (cpp) { -+ case 1: -+ return 8; -+ case 2: -+ case 4: -+ case 8: -+ return 4; -+ default: -+ DRM_ERROR("unknown cpp: %d\n", cpp); -+ return 1; -+ } -+} -+ -+/** -+ * The texture unit decides what tiling format a particular miplevel is using -+ * this function, so we lay out our miptrees accordingly. -+ */ -+static bool -+size_is_lt(uint32_t width, uint32_t height, int cpp) -+{ -+ return (width <= 4 * utile_width(cpp) || -+ height <= 4 * utile_height(cpp)); -+} -+ -+struct drm_gem_cma_object * -+vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) -+{ -+ struct drm_gem_cma_object *obj; -+ struct vc4_bo *bo; -+ -+ if (hindex >= exec->bo_count) { -+ DRM_ERROR("BO index %d greater than BO count %d\n", -+ hindex, exec->bo_count); -+ return NULL; -+ } -+ obj = exec->bo[hindex]; -+ bo = to_vc4_bo(&obj->base); -+ -+ if (bo->validated_shader) { -+ DRM_ERROR("Trying to use shader BO as something other than " -+ "a shader\n"); -+ return NULL; -+ } -+ -+ return obj; -+} -+ -+static struct drm_gem_cma_object * -+vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index) -+{ -+ return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]); -+} -+ -+static bool -+validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos) -+{ -+ /* Note that the untrusted pointer passed to these functions is -+ * incremented past the packet byte. -+ */ -+ return (untrusted - 1 == exec->bin_u + pos); -+} -+ -+static uint32_t -+gl_shader_rec_size(uint32_t pointer_bits) -+{ -+ uint32_t attribute_count = pointer_bits & 7; -+ bool extended = pointer_bits & 8; -+ -+ if (attribute_count == 0) -+ attribute_count = 8; -+ -+ if (extended) -+ return 100 + attribute_count * 4; -+ else -+ return 36 + attribute_count * 8; -+} -+ -+bool -+vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, -+ uint32_t offset, uint8_t tiling_format, -+ uint32_t width, uint32_t height, uint8_t cpp) -+{ -+ uint32_t aligned_width, aligned_height, stride, size; -+ uint32_t utile_w = utile_width(cpp); -+ uint32_t utile_h = utile_height(cpp); -+ -+ /* The shaded vertex format stores signed 12.4 fixed point -+ * (-2048,2047) offsets from the viewport center, so we should -+ * never have a render target larger than 4096. The texture -+ * unit can only sample from 2048x2048, so it's even more -+ * restricted. This lets us avoid worrying about overflow in -+ * our math. -+ */ -+ if (width > 4096 || height > 4096) { -+ DRM_ERROR("Surface dimesions (%d,%d) too large", width, height); -+ return false; -+ } -+ -+ switch (tiling_format) { -+ case VC4_TILING_FORMAT_LINEAR: -+ aligned_width = round_up(width, utile_w); -+ aligned_height = height; -+ break; -+ case VC4_TILING_FORMAT_T: -+ aligned_width = round_up(width, utile_w * 8); -+ aligned_height = round_up(height, utile_h * 8); -+ break; -+ case VC4_TILING_FORMAT_LT: -+ aligned_width = round_up(width, utile_w); -+ aligned_height = round_up(height, utile_h); -+ break; -+ default: -+ DRM_ERROR("buffer tiling %d unsupported\n", tiling_format); -+ return false; -+ } -+ -+ stride = aligned_width * cpp; -+ size = stride * aligned_height; -+ -+ if (size + offset < size || -+ size + offset > fbo->base.size) { -+ DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n", -+ width, height, -+ aligned_width, aligned_height, -+ size, offset, fbo->base.size); -+ return false; -+ } -+ -+ return true; -+} -+ -+static int -+validate_flush(VALIDATE_ARGS) -+{ -+ if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) { -+ DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n"); -+ return -EINVAL; -+ } -+ exec->found_flush = true; -+ -+ return 0; -+} -+ -+static int -+validate_start_tile_binning(VALIDATE_ARGS) -+{ -+ if (exec->found_start_tile_binning_packet) { -+ DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n"); -+ return -EINVAL; -+ } -+ exec->found_start_tile_binning_packet = true; -+ -+ if (!exec->found_tile_binning_mode_config_packet) { -+ DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int -+validate_increment_semaphore(VALIDATE_ARGS) -+{ -+ if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) { -+ DRM_ERROR("Bin CL must end with " -+ "VC4_PACKET_INCREMENT_SEMAPHORE\n"); -+ return -EINVAL; -+ } -+ exec->found_increment_semaphore_packet = true; -+ -+ return 0; -+} -+ -+static int -+validate_indexed_prim_list(VALIDATE_ARGS) -+{ -+ struct drm_gem_cma_object *ib; -+ uint32_t length = *(uint32_t *)(untrusted + 1); -+ uint32_t offset = *(uint32_t *)(untrusted + 5); -+ uint32_t max_index = *(uint32_t *)(untrusted + 9); -+ uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1; -+ struct vc4_shader_state *shader_state; -+ -+ /* Check overflow condition */ -+ if (exec->shader_state_count == 0) { -+ DRM_ERROR("shader state must precede primitives\n"); -+ return -EINVAL; -+ } -+ shader_state = &exec->shader_state[exec->shader_state_count - 1]; -+ -+ if (max_index > shader_state->max_index) -+ shader_state->max_index = max_index; -+ -+ ib = vc4_use_handle(exec, 0); -+ if (!ib) -+ return -EINVAL; -+ -+ if (offset > ib->base.size || -+ (ib->base.size - offset) / index_size < length) { -+ DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n", -+ offset, length, index_size, ib->base.size); -+ return -EINVAL; -+ } -+ -+ *(uint32_t *)(validated + 5) = ib->paddr + offset; -+ -+ return 0; -+} -+ -+static int -+validate_gl_array_primitive(VALIDATE_ARGS) -+{ -+ uint32_t length = *(uint32_t *)(untrusted + 1); -+ uint32_t base_index = *(uint32_t *)(untrusted + 5); -+ uint32_t max_index; -+ struct vc4_shader_state *shader_state; -+ -+ /* Check overflow condition */ -+ if (exec->shader_state_count == 0) { -+ DRM_ERROR("shader state must precede primitives\n"); -+ return -EINVAL; -+ } -+ shader_state = &exec->shader_state[exec->shader_state_count - 1]; -+ -+ if (length + base_index < length) { -+ DRM_ERROR("primitive vertex count overflow\n"); -+ return -EINVAL; -+ } -+ max_index = length + base_index - 1; -+ -+ if (max_index > shader_state->max_index) -+ shader_state->max_index = max_index; -+ -+ return 0; -+} -+ -+static int -+validate_gl_shader_state(VALIDATE_ARGS) -+{ -+ uint32_t i = exec->shader_state_count++; -+ -+ if (i >= exec->shader_state_size) { -+ DRM_ERROR("More requests for shader states than declared\n"); -+ return -EINVAL; -+ } -+ -+ exec->shader_state[i].addr = *(uint32_t *)untrusted; -+ exec->shader_state[i].max_index = 0; -+ -+ if (exec->shader_state[i].addr & ~0xf) { -+ DRM_ERROR("high bits set in GL shader rec reference\n"); -+ return -EINVAL; -+ } -+ -+ *(uint32_t *)validated = (exec->shader_rec_p + -+ exec->shader_state[i].addr); -+ -+ exec->shader_rec_p += -+ roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16); -+ -+ return 0; -+} -+ -+static int -+validate_tile_binning_config(VALIDATE_ARGS) -+{ -+ struct drm_device *dev = exec->exec_bo->base.dev; -+ struct vc4_bo *tile_bo; -+ uint8_t flags; -+ uint32_t tile_state_size, tile_alloc_size; -+ uint32_t tile_count; -+ -+ if (exec->found_tile_binning_mode_config_packet) { -+ DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); -+ return -EINVAL; -+ } -+ exec->found_tile_binning_mode_config_packet = true; -+ -+ exec->bin_tiles_x = *(uint8_t *)(untrusted + 12); -+ exec->bin_tiles_y = *(uint8_t *)(untrusted + 13); -+ tile_count = exec->bin_tiles_x * exec->bin_tiles_y; -+ flags = *(uint8_t *)(untrusted + 14); -+ -+ if (exec->bin_tiles_x == 0 || -+ exec->bin_tiles_y == 0) { -+ DRM_ERROR("Tile binning config of %dx%d too small\n", -+ exec->bin_tiles_x, exec->bin_tiles_y); -+ return -EINVAL; -+ } -+ -+ if (flags & (VC4_BIN_CONFIG_DB_NON_MS | -+ VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) { -+ DRM_ERROR("unsupported binning config flags 0x%02x\n", flags); -+ return -EINVAL; -+ } -+ -+ /* The tile state data array is 48 bytes per tile, and we put it at -+ * the start of a BO containing both it and the tile alloc. -+ */ -+ tile_state_size = 48 * tile_count; -+ -+ /* Since the tile alloc array will follow us, align. */ -+ exec->tile_alloc_offset = roundup(tile_state_size, 4096); -+ -+ *(uint8_t *)(validated + 14) = -+ ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK | -+ VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) | -+ VC4_BIN_CONFIG_AUTO_INIT_TSDA | -+ VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32, -+ VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) | -+ VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128, -+ VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE)); -+ -+ /* Initial block size. */ -+ tile_alloc_size = 32 * tile_count; -+ -+ /* -+ * The initial allocation gets rounded to the next 256 bytes before -+ * the hardware starts fulfilling further allocations. -+ */ -+ tile_alloc_size = roundup(tile_alloc_size, 256); -+ -+ /* Add space for the extra allocations. This is what gets used first, -+ * before overflow memory. It must have at least 4096 bytes, but we -+ * want to avoid overflow memory usage if possible. -+ */ -+ tile_alloc_size += 1024 * 1024; -+ -+ tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, -+ true); -+ exec->tile_bo = &tile_bo->base; -+ if (!exec->tile_bo) -+ return -ENOMEM; -+ list_add_tail(&tile_bo->unref_head, &exec->unref_list); -+ -+ /* tile alloc address. */ -+ *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr + -+ exec->tile_alloc_offset); -+ /* tile alloc size. */ -+ *(uint32_t *)(validated + 4) = tile_alloc_size; -+ /* tile state address. */ -+ *(uint32_t *)(validated + 8) = exec->tile_bo->paddr; -+ -+ return 0; -+} -+ -+static int -+validate_gem_handles(VALIDATE_ARGS) -+{ -+ memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index)); -+ return 0; -+} -+ -+#define VC4_DEFINE_PACKET(packet, func) \ -+ [packet] = { packet ## _SIZE, #packet, func } -+ -+static const struct cmd_info { -+ uint16_t len; -+ const char *name; -+ int (*func)(struct vc4_exec_info *exec, void *validated, -+ void *untrusted); -+} cmd_info[] = { -+ VC4_DEFINE_PACKET(VC4_PACKET_HALT, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_NOP, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, validate_flush), -+ VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING, -+ validate_start_tile_binning), -+ VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE, -+ validate_increment_semaphore), -+ -+ VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE, -+ validate_indexed_prim_list), -+ VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE, -+ validate_gl_array_primitive), -+ -+ VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, NULL), -+ -+ VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, validate_gl_shader_state), -+ -+ VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, NULL), -+ VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, NULL), -+ /* Note: The docs say this was also 105, but it was 106 in the -+ * initial userland code drop. -+ */ -+ VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, NULL), -+ -+ VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG, -+ validate_tile_binning_config), -+ -+ VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, validate_gem_handles), -+}; -+ -+int -+vc4_validate_bin_cl(struct drm_device *dev, -+ void *validated, -+ void *unvalidated, -+ struct vc4_exec_info *exec) -+{ -+ uint32_t len = exec->args->bin_cl_size; -+ uint32_t dst_offset = 0; -+ uint32_t src_offset = 0; -+ -+ while (src_offset < len) { -+ void *dst_pkt = validated + dst_offset; -+ void *src_pkt = unvalidated + src_offset; -+ u8 cmd = *(uint8_t *)src_pkt; -+ const struct cmd_info *info; -+ -+ if (cmd >= ARRAY_SIZE(cmd_info)) { -+ DRM_ERROR("0x%08x: packet %d out of bounds\n", -+ src_offset, cmd); -+ return -EINVAL; -+ } -+ -+ info = &cmd_info[cmd]; -+ if (!info->name) { -+ DRM_ERROR("0x%08x: packet %d invalid\n", -+ src_offset, cmd); -+ return -EINVAL; -+ } -+ -+ if (src_offset + info->len > len) { -+ DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x " -+ "exceeds bounds (0x%08x)\n", -+ src_offset, cmd, info->name, info->len, -+ src_offset + len); -+ return -EINVAL; -+ } -+ -+ if (cmd != VC4_PACKET_GEM_HANDLES) -+ memcpy(dst_pkt, src_pkt, info->len); -+ -+ if (info->func && info->func(exec, -+ dst_pkt + 1, -+ src_pkt + 1)) { -+ DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n", -+ src_offset, cmd, info->name); -+ return -EINVAL; -+ } -+ -+ src_offset += info->len; -+ /* GEM handle loading doesn't produce HW packets. */ -+ if (cmd != VC4_PACKET_GEM_HANDLES) -+ dst_offset += info->len; -+ -+ /* When the CL hits halt, it'll stop reading anything else. */ -+ if (cmd == VC4_PACKET_HALT) -+ break; -+ } -+ -+ exec->ct0ea = exec->ct0ca + dst_offset; -+ -+ if (!exec->found_start_tile_binning_packet) { -+ DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n"); -+ return -EINVAL; -+ } -+ -+ /* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The -+ * semaphore is used to trigger the render CL to start up, and the -+ * FLUSH is what caps the bin lists with -+ * VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main -+ * render CL when they get called to) and actually triggers the queued -+ * semaphore increment. -+ */ -+ if (!exec->found_increment_semaphore_packet || !exec->found_flush) { -+ DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + " -+ "VC4_PACKET_FLUSH\n"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static bool -+reloc_tex(struct vc4_exec_info *exec, -+ void *uniform_data_u, -+ struct vc4_texture_sample_info *sample, -+ uint32_t texture_handle_index) -+ -+{ -+ struct drm_gem_cma_object *tex; -+ uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]); -+ uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]); -+ uint32_t p2 = (sample->p_offset[2] != ~0 ? -+ *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0); -+ uint32_t p3 = (sample->p_offset[3] != ~0 ? -+ *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0); -+ uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0]; -+ uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK; -+ uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS); -+ uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH); -+ uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT); -+ uint32_t cpp, tiling_format, utile_w, utile_h; -+ uint32_t i; -+ uint32_t cube_map_stride = 0; -+ enum vc4_texture_data_type type; -+ -+ tex = vc4_use_bo(exec, texture_handle_index); -+ if (!tex) -+ return false; -+ -+ if (sample->is_direct) { -+ uint32_t remaining_size = tex->base.size - p0; -+ -+ if (p0 > tex->base.size - 4) { -+ DRM_ERROR("UBO offset greater than UBO size\n"); -+ goto fail; -+ } -+ if (p1 > remaining_size - 4) { -+ DRM_ERROR("UBO clamp would allow reads " -+ "outside of UBO\n"); -+ goto fail; -+ } -+ *validated_p0 = tex->paddr + p0; -+ return true; -+ } -+ -+ if (width == 0) -+ width = 2048; -+ if (height == 0) -+ height = 2048; -+ -+ if (p0 & VC4_TEX_P0_CMMODE_MASK) { -+ if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) == -+ VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) -+ cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK; -+ if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) == -+ VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) { -+ if (cube_map_stride) { -+ DRM_ERROR("Cube map stride set twice\n"); -+ goto fail; -+ } -+ -+ cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK; -+ } -+ if (!cube_map_stride) { -+ DRM_ERROR("Cube map stride not set\n"); -+ goto fail; -+ } -+ } -+ -+ type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) | -+ (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4)); -+ -+ switch (type) { -+ case VC4_TEXTURE_TYPE_RGBA8888: -+ case VC4_TEXTURE_TYPE_RGBX8888: -+ case VC4_TEXTURE_TYPE_RGBA32R: -+ cpp = 4; -+ break; -+ case VC4_TEXTURE_TYPE_RGBA4444: -+ case VC4_TEXTURE_TYPE_RGBA5551: -+ case VC4_TEXTURE_TYPE_RGB565: -+ case VC4_TEXTURE_TYPE_LUMALPHA: -+ case VC4_TEXTURE_TYPE_S16F: -+ case VC4_TEXTURE_TYPE_S16: -+ cpp = 2; -+ break; -+ case VC4_TEXTURE_TYPE_LUMINANCE: -+ case VC4_TEXTURE_TYPE_ALPHA: -+ case VC4_TEXTURE_TYPE_S8: -+ cpp = 1; -+ break; -+ case VC4_TEXTURE_TYPE_ETC1: -+ case VC4_TEXTURE_TYPE_BW1: -+ case VC4_TEXTURE_TYPE_A4: -+ case VC4_TEXTURE_TYPE_A1: -+ case VC4_TEXTURE_TYPE_RGBA64: -+ case VC4_TEXTURE_TYPE_YUV422R: -+ default: -+ DRM_ERROR("Texture format %d unsupported\n", type); -+ goto fail; -+ } -+ utile_w = utile_width(cpp); -+ utile_h = utile_height(cpp); -+ -+ if (type == VC4_TEXTURE_TYPE_RGBA32R) { -+ tiling_format = VC4_TILING_FORMAT_LINEAR; -+ } else { -+ if (size_is_lt(width, height, cpp)) -+ tiling_format = VC4_TILING_FORMAT_LT; -+ else -+ tiling_format = VC4_TILING_FORMAT_T; -+ } -+ -+ if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5, -+ tiling_format, width, height, cpp)) { -+ goto fail; -+ } -+ -+ /* The mipmap levels are stored before the base of the texture. Make -+ * sure there is actually space in the BO. -+ */ -+ for (i = 1; i <= miplevels; i++) { -+ uint32_t level_width = max(width >> i, 1u); -+ uint32_t level_height = max(height >> i, 1u); -+ uint32_t aligned_width, aligned_height; -+ uint32_t level_size; -+ -+ /* Once the levels get small enough, they drop from T to LT. */ -+ if (tiling_format == VC4_TILING_FORMAT_T && -+ size_is_lt(level_width, level_height, cpp)) { -+ tiling_format = VC4_TILING_FORMAT_LT; -+ } -+ -+ switch (tiling_format) { -+ case VC4_TILING_FORMAT_T: -+ aligned_width = round_up(level_width, utile_w * 8); -+ aligned_height = round_up(level_height, utile_h * 8); -+ break; -+ case VC4_TILING_FORMAT_LT: -+ aligned_width = round_up(level_width, utile_w); -+ aligned_height = round_up(level_height, utile_h); -+ break; -+ default: -+ aligned_width = round_up(level_width, utile_w); -+ aligned_height = level_height; -+ break; -+ } -+ -+ level_size = aligned_width * cpp * aligned_height; -+ -+ if (offset < level_size) { -+ DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db " -+ "overflowed buffer bounds (offset %d)\n", -+ i, level_width, level_height, -+ aligned_width, aligned_height, -+ level_size, offset); -+ goto fail; -+ } -+ -+ offset -= level_size; -+ } -+ -+ *validated_p0 = tex->paddr + p0; -+ -+ return true; -+ fail: -+ DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0); -+ DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1); -+ DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2); -+ DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3); -+ return false; -+} -+ -+static int -+validate_gl_shader_rec(struct drm_device *dev, -+ struct vc4_exec_info *exec, -+ struct vc4_shader_state *state) -+{ -+ uint32_t *src_handles; -+ void *pkt_u, *pkt_v; -+ static const uint32_t shader_reloc_offsets[] = { -+ 4, /* fs */ -+ 16, /* vs */ -+ 28, /* cs */ -+ }; -+ uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets); -+ struct drm_gem_cma_object *bo[shader_reloc_count + 8]; -+ uint32_t nr_attributes, nr_relocs, packet_size; -+ int i; -+ -+ nr_attributes = state->addr & 0x7; -+ if (nr_attributes == 0) -+ nr_attributes = 8; -+ packet_size = gl_shader_rec_size(state->addr); -+ -+ nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes; -+ if (nr_relocs * 4 > exec->shader_rec_size) { -+ DRM_ERROR("overflowed shader recs reading %d handles " -+ "from %d bytes left\n", -+ nr_relocs, exec->shader_rec_size); -+ return -EINVAL; -+ } -+ src_handles = exec->shader_rec_u; -+ exec->shader_rec_u += nr_relocs * 4; -+ exec->shader_rec_size -= nr_relocs * 4; -+ -+ if (packet_size > exec->shader_rec_size) { -+ DRM_ERROR("overflowed shader recs copying %db packet " -+ "from %d bytes left\n", -+ packet_size, exec->shader_rec_size); -+ return -EINVAL; -+ } -+ pkt_u = exec->shader_rec_u; -+ pkt_v = exec->shader_rec_v; -+ memcpy(pkt_v, pkt_u, packet_size); -+ exec->shader_rec_u += packet_size; -+ /* Shader recs have to be aligned to 16 bytes (due to the attribute -+ * flags being in the low bytes), so round the next validated shader -+ * rec address up. This should be safe, since we've got so many -+ * relocations in a shader rec packet. -+ */ -+ BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4); -+ exec->shader_rec_v += roundup(packet_size, 16); -+ exec->shader_rec_size -= packet_size; -+ -+ if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) { -+ DRM_ERROR("Multi-threaded fragment shaders not supported.\n"); -+ return -EINVAL; -+ } -+ -+ for (i = 0; i < shader_reloc_count; i++) { -+ if (src_handles[i] > exec->bo_count) { -+ DRM_ERROR("Shader handle %d too big\n", src_handles[i]); -+ return -EINVAL; -+ } -+ -+ bo[i] = exec->bo[src_handles[i]]; -+ if (!bo[i]) -+ return -EINVAL; -+ } -+ for (i = shader_reloc_count; i < nr_relocs; i++) { -+ bo[i] = vc4_use_bo(exec, src_handles[i]); -+ if (!bo[i]) -+ return -EINVAL; -+ } -+ -+ for (i = 0; i < shader_reloc_count; i++) { -+ struct vc4_validated_shader_info *validated_shader; -+ uint32_t o = shader_reloc_offsets[i]; -+ uint32_t src_offset = *(uint32_t *)(pkt_u + o); -+ uint32_t *texture_handles_u; -+ void *uniform_data_u; -+ uint32_t tex; -+ -+ *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset; -+ -+ if (src_offset != 0) { -+ DRM_ERROR("Shaders must be at offset 0 of " -+ "the BO.\n"); -+ return -EINVAL; -+ } -+ -+ validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader; -+ if (!validated_shader) -+ return -EINVAL; -+ -+ if (validated_shader->uniforms_src_size > -+ exec->uniforms_size) { -+ DRM_ERROR("Uniforms src buffer overflow\n"); -+ return -EINVAL; -+ } -+ -+ texture_handles_u = exec->uniforms_u; -+ uniform_data_u = (texture_handles_u + -+ validated_shader->num_texture_samples); -+ -+ memcpy(exec->uniforms_v, uniform_data_u, -+ validated_shader->uniforms_size); -+ -+ for (tex = 0; -+ tex < validated_shader->num_texture_samples; -+ tex++) { -+ if (!reloc_tex(exec, -+ uniform_data_u, -+ &validated_shader->texture_samples[tex], -+ texture_handles_u[tex])) { -+ return -EINVAL; -+ } -+ } -+ -+ *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p; -+ -+ exec->uniforms_u += validated_shader->uniforms_src_size; -+ exec->uniforms_v += validated_shader->uniforms_size; -+ exec->uniforms_p += validated_shader->uniforms_size; -+ } -+ -+ for (i = 0; i < nr_attributes; i++) { -+ struct drm_gem_cma_object *vbo = -+ bo[ARRAY_SIZE(shader_reloc_offsets) + i]; -+ uint32_t o = 36 + i * 8; -+ uint32_t offset = *(uint32_t *)(pkt_u + o + 0); -+ uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1; -+ uint32_t stride = *(uint8_t *)(pkt_u + o + 5); -+ uint32_t max_index; -+ -+ if (state->addr & 0x8) -+ stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff; -+ -+ if (vbo->base.size < offset || -+ vbo->base.size - offset < attr_size) { -+ DRM_ERROR("BO offset overflow (%d + %d > %d)\n", -+ offset, attr_size, vbo->base.size); -+ return -EINVAL; -+ } -+ -+ if (stride != 0) { -+ max_index = ((vbo->base.size - offset - attr_size) / -+ stride); -+ if (state->max_index > max_index) { -+ DRM_ERROR("primitives use index %d out of " -+ "supplied %d\n", -+ state->max_index, max_index); -+ return -EINVAL; -+ } -+ } -+ -+ *(uint32_t *)(pkt_v + o) = vbo->paddr + offset; -+ } -+ -+ return 0; -+} -+ -+int -+vc4_validate_shader_recs(struct drm_device *dev, -+ struct vc4_exec_info *exec) -+{ -+ uint32_t i; -+ int ret = 0; -+ -+ for (i = 0; i < exec->shader_state_count; i++) { -+ ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]); -+ if (ret) -+ return ret; -+ } -+ -+ return ret; -+} -diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h -index 74de184..fe4161b 100644 ---- a/include/uapi/drm/vc4_drm.h -+++ b/include/uapi/drm/vc4_drm.h -@@ -26,14 +26,155 @@ - - #include "drm.h" - -+#define DRM_VC4_SUBMIT_CL 0x00 -+#define DRM_VC4_WAIT_SEQNO 0x01 -+#define DRM_VC4_WAIT_BO 0x02 - #define DRM_VC4_CREATE_BO 0x03 - #define DRM_VC4_MMAP_BO 0x04 - #define DRM_VC4_CREATE_SHADER_BO 0x05 - -+#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) -+#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) -+#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo) - #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) - #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) - #define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) - -+struct drm_vc4_submit_rcl_surface { -+ __u32 hindex; /* Handle index, or ~0 if not present. */ -+ __u32 offset; /* Offset to start of buffer. */ -+ /* -+ * Bits for either render config (color_write) or load/store packet. -+ * Bits should all be 0 for MSAA load/stores. -+ */ -+ __u16 bits; -+ -+#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0) -+ __u16 flags; -+}; -+ -+/** -+ * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D -+ * engine. -+ * -+ * Drivers typically use GPU BOs to store batchbuffers / command lists and -+ * their associated state. However, because the VC4 lacks an MMU, we have to -+ * do validation of memory accesses by the GPU commands. If we were to store -+ * our commands in BOs, we'd need to do uncached readback from them to do the -+ * validation process, which is too expensive. Instead, userspace accumulates -+ * commands and associated state in plain memory, then the kernel copies the -+ * data to its own address space, and then validates and stores it in a GPU -+ * BO. -+ */ -+struct drm_vc4_submit_cl { -+ /* Pointer to the binner command list. -+ * -+ * This is the first set of commands executed, which runs the -+ * coordinate shader to determine where primitives land on the screen, -+ * then writes out the state updates and draw calls necessary per tile -+ * to the tile allocation BO. -+ */ -+ __u64 bin_cl; -+ -+ /* Pointer to the shader records. -+ * -+ * Shader records are the structures read by the hardware that contain -+ * pointers to uniforms, shaders, and vertex attributes. The -+ * reference to the shader record has enough information to determine -+ * how many pointers are necessary (fixed number for shaders/uniforms, -+ * and an attribute count), so those BO indices into bo_handles are -+ * just stored as __u32s before each shader record passed in. -+ */ -+ __u64 shader_rec; -+ -+ /* Pointer to uniform data and texture handles for the textures -+ * referenced by the shader. -+ * -+ * For each shader state record, there is a set of uniform data in the -+ * order referenced by the record (FS, VS, then CS). Each set of -+ * uniform data has a __u32 index into bo_handles per texture -+ * sample operation, in the order the QPU_W_TMUn_S writes appear in -+ * the program. Following the texture BO handle indices is the actual -+ * uniform data. -+ * -+ * The individual uniform state blocks don't have sizes passed in, -+ * because the kernel has to determine the sizes anyway during shader -+ * code validation. -+ */ -+ __u64 uniforms; -+ __u64 bo_handles; -+ -+ /* Size in bytes of the binner command list. */ -+ __u32 bin_cl_size; -+ /* Size in bytes of the set of shader records. */ -+ __u32 shader_rec_size; -+ /* Number of shader records. -+ * -+ * This could just be computed from the contents of shader_records and -+ * the address bits of references to them from the bin CL, but it -+ * keeps the kernel from having to resize some allocations it makes. -+ */ -+ __u32 shader_rec_count; -+ /* Size in bytes of the uniform state. */ -+ __u32 uniforms_size; -+ -+ /* Number of BO handles passed in (size is that times 4). */ -+ __u32 bo_handle_count; -+ -+ /* RCL setup: */ -+ __u16 width; -+ __u16 height; -+ __u8 min_x_tile; -+ __u8 min_y_tile; -+ __u8 max_x_tile; -+ __u8 max_y_tile; -+ struct drm_vc4_submit_rcl_surface color_read; -+ struct drm_vc4_submit_rcl_surface color_write; -+ struct drm_vc4_submit_rcl_surface zs_read; -+ struct drm_vc4_submit_rcl_surface zs_write; -+ struct drm_vc4_submit_rcl_surface msaa_color_write; -+ struct drm_vc4_submit_rcl_surface msaa_zs_write; -+ __u32 clear_color[2]; -+ __u32 clear_z; -+ __u8 clear_s; -+ -+ __u32 pad:24; -+ -+#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) -+ __u32 flags; -+ -+ /* Returned value of the seqno of this render job (for the -+ * wait ioctl). -+ */ -+ __u64 seqno; -+}; -+ -+/** -+ * struct drm_vc4_wait_seqno - ioctl argument for waiting for -+ * DRM_VC4_SUBMIT_CL completion using its returned seqno. -+ * -+ * timeout_ns is the timeout in nanoseconds, where "0" means "don't -+ * block, just return the status." -+ */ -+struct drm_vc4_wait_seqno { -+ __u64 seqno; -+ __u64 timeout_ns; -+}; -+ -+/** -+ * struct drm_vc4_wait_bo - ioctl argument for waiting for -+ * completion of the last DRM_VC4_SUBMIT_CL on a BO. -+ * -+ * This is useful for cases where multiple processes might be -+ * rendering to a BO and you want to wait for all rendering to be -+ * completed. -+ */ -+struct drm_vc4_wait_bo { -+ __u32 handle; -+ __u32 pad; -+ __u64 timeout_ns; -+}; -+ - /** - * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. - * diff --git a/debian/patches/features/arm/rpi/drm-vc4-allocate-enough-memory-in-vc4_save_hang_stat.patch b/debian/patches/features/arm/rpi/drm-vc4-allocate-enough-memory-in-vc4_save_hang_stat.patch deleted file mode 100644 index 5ee1cd153..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-allocate-enough-memory-in-vc4_save_hang_stat.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: Dan Carpenter -Date: Thu, 17 Dec 2015 15:39:08 +0300 -Subject: [14/16] drm/vc4: allocate enough memory in vc4_save_hang_state() -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=7e5082fbc00cc157e57a70cdb6b9bbb21289afb1 - -"state" is smaller than "kernel_state" so we end up corrupting memory. - -Fixes: 214613656b51 ('drm/vc4: Add an interface for capturing the GPU state after a hang.') -Signed-off-by: Dan Carpenter -Reviewed-by: Eric Anholt ---- - drivers/gpu/drm/vc4/vc4_gem.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c -index 461a16c..1928c0a 100644 ---- a/drivers/gpu/drm/vc4/vc4_gem.c -+++ b/drivers/gpu/drm/vc4/vc4_gem.c -@@ -145,7 +145,7 @@ vc4_save_hang_state(struct drm_device *dev) - unsigned long irqflags; - unsigned int i, unref_list_count; - -- kernel_state = kcalloc(1, sizeof(*state), GFP_KERNEL); -+ kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); - if (!kernel_state) - return; - diff --git a/debian/patches/features/arm/rpi/drm-vc4-bind-and-initialize-the-v3d-engine.patch b/debian/patches/features/arm/rpi/drm-vc4-bind-and-initialize-the-v3d-engine.patch deleted file mode 100644 index 4cc5d6a79..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-bind-and-initialize-the-v3d-engine.patch +++ /dev/null @@ -1,330 +0,0 @@ -From: Eric Anholt -Date: Mon, 2 Mar 2015 13:01:12 -0800 -Subject: [05/16] drm/vc4: Bind and initialize the V3D engine. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=d3f5168a0810005920e7a3d5ba83e249bd9a750c - -This is the component of the GPU that does 3D rendering. - -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/vc4/Makefile | 1 + - drivers/gpu/drm/vc4/vc4_debugfs.c | 2 + - drivers/gpu/drm/vc4/vc4_drv.c | 1 + - drivers/gpu/drm/vc4/vc4_drv.h | 13 +++ - drivers/gpu/drm/vc4/vc4_v3d.c | 225 ++++++++++++++++++++++++++++++++++++++ - 5 files changed, 242 insertions(+) - create mode 100644 drivers/gpu/drm/vc4/vc4_v3d.c - -diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile -index eb776a6..e87a6f2 100644 ---- a/drivers/gpu/drm/vc4/Makefile -+++ b/drivers/gpu/drm/vc4/Makefile -@@ -11,6 +11,7 @@ vc4-y := \ - vc4_hdmi.o \ - vc4_hvs.o \ - vc4_plane.o \ -+ vc4_v3d.o \ - vc4_validate_shaders.o - - vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o -diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c -index 6bcf96e..d76ad10 100644 ---- a/drivers/gpu/drm/vc4/vc4_debugfs.c -+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c -@@ -22,6 +22,8 @@ static const struct drm_info_list vc4_debugfs_list[] = { - {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, - {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1}, - {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2}, -+ {"v3d_ident", vc4_v3d_debugfs_ident, 0}, -+ {"v3d_regs", vc4_v3d_debugfs_regs, 0}, - }; - - #define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list) -diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c -index da4be9c8..db58d74 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.c -+++ b/drivers/gpu/drm/vc4/vc4_drv.c -@@ -236,6 +236,7 @@ static struct platform_driver *const component_drivers[] = { - &vc4_hdmi_driver, - &vc4_crtc_driver, - &vc4_hvs_driver, -+ &vc4_v3d_driver, - }; - - static int vc4_platform_drm_probe(struct platform_device *pdev) -diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h -index bd77d55..8945463 100644 ---- a/drivers/gpu/drm/vc4/vc4_drv.h -+++ b/drivers/gpu/drm/vc4/vc4_drv.h -@@ -15,6 +15,7 @@ struct vc4_dev { - struct vc4_hdmi *hdmi; - struct vc4_hvs *hvs; - struct vc4_crtc *crtc[3]; -+ struct vc4_v3d *v3d; - - struct drm_fbdev_cma *fbdev; - -@@ -82,6 +83,11 @@ to_vc4_bo(struct drm_gem_object *bo) - return (struct vc4_bo *)bo; - } - -+struct vc4_v3d { -+ struct platform_device *pdev; -+ void __iomem *regs; -+}; -+ - struct vc4_hvs { - struct platform_device *pdev; - void __iomem *regs; -@@ -119,6 +125,8 @@ to_vc4_encoder(struct drm_encoder *encoder) - return container_of(encoder, struct vc4_encoder, base); - } - -+#define V3D_READ(offset) readl(vc4->v3d->regs + offset) -+#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) - #define HVS_READ(offset) readl(vc4->hvs->regs + offset) - #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) - -@@ -241,6 +249,11 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, - u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); - u32 vc4_plane_dlist_size(struct drm_plane_state *state); - -+/* vc4_v3d.c */ -+extern struct platform_driver vc4_v3d_driver; -+int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); -+int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); -+ - /* vc4_validate_shader.c */ - struct vc4_validated_shader_info * - vc4_validate_shader(struct drm_gem_cma_object *shader_obj); -diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c -new file mode 100644 -index 0000000..040ad0d ---- /dev/null -+++ b/drivers/gpu/drm/vc4/vc4_v3d.c -@@ -0,0 +1,225 @@ -+/* -+ * Copyright (c) 2014 The Linux Foundation. All rights reserved. -+ * Copyright (C) 2013 Red Hat -+ * Author: Rob Clark -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include "linux/component.h" -+#include "vc4_drv.h" -+#include "vc4_regs.h" -+ -+#ifdef CONFIG_DEBUG_FS -+#define REGDEF(reg) { reg, #reg } -+static const struct { -+ uint32_t reg; -+ const char *name; -+} vc4_reg_defs[] = { -+ REGDEF(V3D_IDENT0), -+ REGDEF(V3D_IDENT1), -+ REGDEF(V3D_IDENT2), -+ REGDEF(V3D_SCRATCH), -+ REGDEF(V3D_L2CACTL), -+ REGDEF(V3D_SLCACTL), -+ REGDEF(V3D_INTCTL), -+ REGDEF(V3D_INTENA), -+ REGDEF(V3D_INTDIS), -+ REGDEF(V3D_CT0CS), -+ REGDEF(V3D_CT1CS), -+ REGDEF(V3D_CT0EA), -+ REGDEF(V3D_CT1EA), -+ REGDEF(V3D_CT0CA), -+ REGDEF(V3D_CT1CA), -+ REGDEF(V3D_CT00RA0), -+ REGDEF(V3D_CT01RA0), -+ REGDEF(V3D_CT0LC), -+ REGDEF(V3D_CT1LC), -+ REGDEF(V3D_CT0PC), -+ REGDEF(V3D_CT1PC), -+ REGDEF(V3D_PCS), -+ REGDEF(V3D_BFC), -+ REGDEF(V3D_RFC), -+ REGDEF(V3D_BPCA), -+ REGDEF(V3D_BPCS), -+ REGDEF(V3D_BPOA), -+ REGDEF(V3D_BPOS), -+ REGDEF(V3D_BXCF), -+ REGDEF(V3D_SQRSV0), -+ REGDEF(V3D_SQRSV1), -+ REGDEF(V3D_SQCNTL), -+ REGDEF(V3D_SRQPC), -+ REGDEF(V3D_SRQUA), -+ REGDEF(V3D_SRQUL), -+ REGDEF(V3D_SRQCS), -+ REGDEF(V3D_VPACNTL), -+ REGDEF(V3D_VPMBASE), -+ REGDEF(V3D_PCTRC), -+ REGDEF(V3D_PCTRE), -+ REGDEF(V3D_PCTR0), -+ REGDEF(V3D_PCTRS0), -+ REGDEF(V3D_PCTR1), -+ REGDEF(V3D_PCTRS1), -+ REGDEF(V3D_PCTR2), -+ REGDEF(V3D_PCTRS2), -+ REGDEF(V3D_PCTR3), -+ REGDEF(V3D_PCTRS3), -+ REGDEF(V3D_PCTR4), -+ REGDEF(V3D_PCTRS4), -+ REGDEF(V3D_PCTR5), -+ REGDEF(V3D_PCTRS5), -+ REGDEF(V3D_PCTR6), -+ REGDEF(V3D_PCTRS6), -+ REGDEF(V3D_PCTR7), -+ REGDEF(V3D_PCTRS7), -+ REGDEF(V3D_PCTR8), -+ REGDEF(V3D_PCTRS8), -+ REGDEF(V3D_PCTR9), -+ REGDEF(V3D_PCTRS9), -+ REGDEF(V3D_PCTR10), -+ REGDEF(V3D_PCTRS10), -+ REGDEF(V3D_PCTR11), -+ REGDEF(V3D_PCTRS11), -+ REGDEF(V3D_PCTR12), -+ REGDEF(V3D_PCTRS12), -+ REGDEF(V3D_PCTR13), -+ REGDEF(V3D_PCTRS13), -+ REGDEF(V3D_PCTR14), -+ REGDEF(V3D_PCTRS14), -+ REGDEF(V3D_PCTR15), -+ REGDEF(V3D_PCTRS15), -+ REGDEF(V3D_DBGE), -+ REGDEF(V3D_FDBGO), -+ REGDEF(V3D_FDBGB), -+ REGDEF(V3D_FDBGR), -+ REGDEF(V3D_FDBGS), -+ REGDEF(V3D_ERRSTAT), -+}; -+ -+int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused) -+{ -+ struct drm_info_node *node = (struct drm_info_node *)m->private; -+ struct drm_device *dev = node->minor->dev; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) { -+ seq_printf(m, "%s (0x%04x): 0x%08x\n", -+ vc4_reg_defs[i].name, vc4_reg_defs[i].reg, -+ V3D_READ(vc4_reg_defs[i].reg)); -+ } -+ -+ return 0; -+} -+ -+int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) -+{ -+ struct drm_info_node *node = (struct drm_info_node *)m->private; -+ struct drm_device *dev = node->minor->dev; -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ uint32_t ident1 = V3D_READ(V3D_IDENT1); -+ uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC); -+ uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS); -+ uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS); -+ -+ seq_printf(m, "Revision: %d\n", -+ VC4_GET_FIELD(ident1, V3D_IDENT1_REV)); -+ seq_printf(m, "Slices: %d\n", nslc); -+ seq_printf(m, "TMUs: %d\n", nslc * tups); -+ seq_printf(m, "QPUs: %d\n", nslc * qups); -+ seq_printf(m, "Semaphores: %d\n", -+ VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM)); -+ -+ return 0; -+} -+#endif /* CONFIG_DEBUG_FS */ -+ -+static void vc4_v3d_init_hw(struct drm_device *dev) -+{ -+ struct vc4_dev *vc4 = to_vc4_dev(dev); -+ -+ /* Take all the memory that would have been reserved for user -+ * QPU programs, since we don't have an interface for running -+ * them, anyway. -+ */ -+ V3D_WRITE(V3D_VPMBASE, 0); -+} -+ -+static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) -+{ -+ struct platform_device *pdev = to_platform_device(dev); -+ struct drm_device *drm = dev_get_drvdata(master); -+ struct vc4_dev *vc4 = to_vc4_dev(drm); -+ struct vc4_v3d *v3d = NULL; -+ -+ v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL); -+ if (!v3d) -+ return -ENOMEM; -+ -+ v3d->pdev = pdev; -+ -+ v3d->regs = vc4_ioremap_regs(pdev, 0); -+ if (IS_ERR(v3d->regs)) -+ return PTR_ERR(v3d->regs); -+ -+ vc4->v3d = v3d; -+ -+ if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { -+ DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", -+ V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0); -+ return -EINVAL; -+ } -+ -+ vc4_v3d_init_hw(drm); -+ -+ return 0; -+} -+ -+static void vc4_v3d_unbind(struct device *dev, struct device *master, -+ void *data) -+{ -+ struct drm_device *drm = dev_get_drvdata(master); -+ struct vc4_dev *vc4 = to_vc4_dev(drm); -+ -+ vc4->v3d = NULL; -+} -+ -+static const struct component_ops vc4_v3d_ops = { -+ .bind = vc4_v3d_bind, -+ .unbind = vc4_v3d_unbind, -+}; -+ -+static int vc4_v3d_dev_probe(struct platform_device *pdev) -+{ -+ return component_add(&pdev->dev, &vc4_v3d_ops); -+} -+ -+static int vc4_v3d_dev_remove(struct platform_device *pdev) -+{ -+ component_del(&pdev->dev, &vc4_v3d_ops); -+ return 0; -+} -+ -+static const struct of_device_id vc4_v3d_dt_match[] = { -+ { .compatible = "brcm,vc4-v3d" }, -+ {} -+}; -+ -+struct platform_driver vc4_v3d_driver = { -+ .probe = vc4_v3d_dev_probe, -+ .remove = vc4_v3d_dev_remove, -+ .driver = { -+ .name = "vc4_v3d", -+ .of_match_table = vc4_v3d_dt_match, -+ }, -+}; diff --git a/debian/patches/features/arm/rpi/drm-vc4-copy_to_user-returns-the-number-of-bytes-rem.patch b/debian/patches/features/arm/rpi/drm-vc4-copy_to_user-returns-the-number-of-bytes-rem.patch deleted file mode 100644 index 3c188c717..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-copy_to_user-returns-the-number-of-bytes-rem.patch +++ /dev/null @@ -1,87 +0,0 @@ -From: Dan Carpenter -Date: Thu, 17 Dec 2015 15:36:28 +0300 -Subject: [13/16] drm/vc4: copy_to_user() returns the number of bytes remaining -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=65c4777de54a39b2722a4b1ff3306d044014d511 - -The copy_to/from_user() functions return the number of bytes remaining -to be copied. We want to return error codes here. - -Also it's a bad idea to print an error message if a copy from user fails -because users can use that to spam /var/log/messages which is annoying -so I removed those. - -Fixes: 214613656b51 ('drm/vc4: Add an interface for capturing the GPU state after a hang.') -Signed-off-by: Dan Carpenter -Reviewed-by: Eric Anholt ---- - drivers/gpu/drm/vc4/vc4_gem.c | 37 ++++++++++++++++++------------------- - 1 file changed, 18 insertions(+), 19 deletions(-) - -diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c -index 39f29e7..461a16c 100644 ---- a/drivers/gpu/drm/vc4/vc4_gem.c -+++ b/drivers/gpu/drm/vc4/vc4_gem.c -@@ -71,7 +71,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, - struct vc4_dev *vc4 = to_vc4_dev(dev); - unsigned long irqflags; - u32 i; -- int ret; -+ int ret = 0; - - spin_lock_irqsave(&vc4->job_lock, irqflags); - kernel_state = vc4->hang_state; -@@ -119,9 +119,11 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, - bo_state[i].size = vc4_bo->base.base.size; - } - -- ret = copy_to_user((void __user *)(uintptr_t)get_state->bo, -- bo_state, -- state->bo_count * sizeof(*bo_state)); -+ if (copy_to_user((void __user *)(uintptr_t)get_state->bo, -+ bo_state, -+ state->bo_count * sizeof(*bo_state))) -+ ret = -EFAULT; -+ - kfree(bo_state); - - err_free: -@@ -554,27 +556,24 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) - exec->shader_state = temp + exec_size; - exec->shader_state_size = args->shader_rec_count; - -- ret = copy_from_user(bin, -- (void __user *)(uintptr_t)args->bin_cl, -- args->bin_cl_size); -- if (ret) { -- DRM_ERROR("Failed to copy in bin cl\n"); -+ if (copy_from_user(bin, -+ (void __user *)(uintptr_t)args->bin_cl, -+ args->bin_cl_size)) { -+ ret = -EFAULT; - goto fail; - } - -- ret = copy_from_user(exec->shader_rec_u, -- (void __user *)(uintptr_t)args->shader_rec, -- args->shader_rec_size); -- if (ret) { -- DRM_ERROR("Failed to copy in shader recs\n"); -+ if (copy_from_user(exec->shader_rec_u, -+ (void __user *)(uintptr_t)args->shader_rec, -+ args->shader_rec_size)) { -+ ret = -EFAULT; - goto fail; - } - -- ret = copy_from_user(exec->uniforms_u, -- (void __user *)(uintptr_t)args->uniforms, -- args->uniforms_size); -- if (ret) { -- DRM_ERROR("Failed to copy in uniforms cl\n"); -+ if (copy_from_user(exec->uniforms_u, -+ (void __user *)(uintptr_t)args->uniforms, -+ args->uniforms_size)) { -+ ret = -EFAULT; - goto fail; - } - diff --git a/debian/patches/features/arm/rpi/drm-vc4-fix-a-typo-in-a-v3d-debug-register.patch b/debian/patches/features/arm/rpi/drm-vc4-fix-a-typo-in-a-v3d-debug-register.patch deleted file mode 100644 index e1a8178e8..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-fix-a-typo-in-a-v3d-debug-register.patch +++ /dev/null @@ -1,23 +0,0 @@ -From: Eric Anholt -Date: Fri, 23 Oct 2015 14:57:22 +0100 -Subject: [04/16] drm/vc4: Fix a typo in a V3D debug register. -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=1fa81589bbac16af6baf153ccc9b3f38fb16a498 - -Signed-off-by: Eric Anholt ---- - drivers/gpu/drm/vc4/vc4_regs.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h -index 9e4e904..4e52a0a 100644 ---- a/drivers/gpu/drm/vc4/vc4_regs.h -+++ b/drivers/gpu/drm/vc4/vc4_regs.h -@@ -154,7 +154,7 @@ - #define V3D_PCTRS14 0x006f4 - #define V3D_PCTR15 0x006f8 - #define V3D_PCTRS15 0x006fc --#define V3D_BGE 0x00f00 -+#define V3D_DBGE 0x00f00 - #define V3D_FDBGO 0x00f04 - #define V3D_FDBGB 0x00f08 - #define V3D_FDBGR 0x00f0c diff --git a/debian/patches/features/arm/rpi/drm-vc4-fix-an-error-code.patch b/debian/patches/features/arm/rpi/drm-vc4-fix-an-error-code.patch deleted file mode 100644 index e6dd7c389..000000000 --- a/debian/patches/features/arm/rpi/drm-vc4-fix-an-error-code.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: Dan Carpenter -Date: Thu, 17 Dec 2015 15:40:20 +0300 -Subject: [15/16] drm/vc4: fix an error code -Origin: http://cgit.freedesktop.org/~airlied/linux/commit?id=5645e785cea2f33acdc5e5cee62b3ce8a00f1169 - -"exec->exec_bo" is NULL at this point so this code returns success. We -want to return -ENOMEM. - -Fixes: d5b1a78a772f ('drm/vc4: Add support for drawing 3D frames.') -Signed-off-by: Dan Carpenter -Reviewed-by: Eric Anholt ---- - drivers/gpu/drm/vc4/vc4_gem.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c -index 1928c0a..48ce30a 100644 ---- a/drivers/gpu/drm/vc4/vc4_gem.c -+++ b/drivers/gpu/drm/vc4/vc4_gem.c -@@ -580,7 +580,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) - bo = vc4_bo_create(dev, exec_size, true); - if (!bo) { - DRM_ERROR("Couldn't allocate BO for binning\n"); -- ret = PTR_ERR(exec->exec_bo); -+ ret = -ENOMEM; - goto fail; - } - exec->exec_bo = &bo->base; diff --git a/debian/patches/features/arm/rpi/dt-bindings-add-root-properties-for-raspberry-pi-2.patch b/debian/patches/features/arm/rpi/dt-bindings-add-root-properties-for-raspberry-pi-2.patch deleted file mode 100644 index 55fbf6bb9..000000000 --- a/debian/patches/features/arm/rpi/dt-bindings-add-root-properties-for-raspberry-pi-2.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Eric Anholt -Date: Tue, 21 Apr 2015 09:42:21 -0700 -Subject: [1/3] dt-bindings: Add root properties for Raspberry Pi 2 -Origin: https://github.com/anholt/linux/commit/57e5c6d95b2cde884634586d833b02f54ba1c79d - -Signed-off-by: Eric Anholt -Acked-by: Rob Herring ---- - Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt -index c78576b..11d3056 100644 ---- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt -+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt -@@ -26,6 +26,10 @@ Raspberry Pi Model B+ - Required root node properties: - compatible = "raspberrypi,model-b-plus", "brcm,bcm2835"; - -+Raspberry Pi 2 Model B -+Required root node properties: -+compatible = "raspberrypi,2-model-b", "brcm,bcm2836"; -+ - Raspberry Pi Compute Module - Required root node properties: - compatible = "raspberrypi,compute-module", "brcm,bcm2835"; diff --git a/debian/patches/features/arm/rpi/dt-bindings-add-rpi-power-domain-driver-bindings.patch b/debian/patches/features/arm/rpi/dt-bindings-add-rpi-power-domain-driver-bindings.patch deleted file mode 100644 index a3f2e553c..000000000 --- a/debian/patches/features/arm/rpi/dt-bindings-add-rpi-power-domain-driver-bindings.patch +++ /dev/null @@ -1,71 +0,0 @@ -From: Alexander Aring -Date: Wed, 16 Dec 2015 16:26:48 -0800 -Subject: [2/3] dt-bindings: add rpi power domain driver bindings -Origin: https://github.com/anholt/linux/commit/4c8b338f9ae38dee9c77bda023babc7f7543f52c - -This patch adds devicetree tree bindings for the Raspberry Pi power -domain driver. - -Signed-off-by: Alexander Aring -Signed-off-by: Eric Anholt -Acked-by: Rob Herring -Reviewed-by: Ulf Hansson -Reviewed-by: Kevin Hilman ---- - .../bindings/soc/bcm/raspberrypi,bcm2835-power.txt | 47 ++++++++++++++++++++++ - 1 file changed, 47 insertions(+) - create mode 100644 Documentation/devicetree/bindings/soc/bcm/raspberrypi,bcm2835-power.txt - -diff --git a/Documentation/devicetree/bindings/soc/bcm/raspberrypi,bcm2835-power.txt b/Documentation/devicetree/bindings/soc/bcm/raspberrypi,bcm2835-power.txt -new file mode 100644 -index 0000000..30942cf ---- /dev/null -+++ b/Documentation/devicetree/bindings/soc/bcm/raspberrypi,bcm2835-power.txt -@@ -0,0 +1,47 @@ -+Raspberry Pi power domain driver -+ -+Required properties: -+ -+- compatible: Should be "raspberrypi,bcm2835-power". -+- firmware: Reference to the RPi firmware device node. -+- #power-domain-cells: Should be <1>, we providing multiple power domains. -+ -+The valid defines for power domain are: -+ -+ RPI_POWER_DOMAIN_I2C0 -+ RPI_POWER_DOMAIN_I2C1 -+ RPI_POWER_DOMAIN_I2C2 -+ RPI_POWER_DOMAIN_VIDEO_SCALER -+ RPI_POWER_DOMAIN_VPU1 -+ RPI_POWER_DOMAIN_HDMI -+ RPI_POWER_DOMAIN_USB -+ RPI_POWER_DOMAIN_VEC -+ RPI_POWER_DOMAIN_JPEG -+ RPI_POWER_DOMAIN_H264 -+ RPI_POWER_DOMAIN_V3D -+ RPI_POWER_DOMAIN_ISP -+ RPI_POWER_DOMAIN_UNICAM0 -+ RPI_POWER_DOMAIN_UNICAM1 -+ RPI_POWER_DOMAIN_CCP2RX -+ RPI_POWER_DOMAIN_CSI2 -+ RPI_POWER_DOMAIN_CPI -+ RPI_POWER_DOMAIN_DSI0 -+ RPI_POWER_DOMAIN_DSI1 -+ RPI_POWER_DOMAIN_TRANSPOSER -+ RPI_POWER_DOMAIN_CCP2TX -+ RPI_POWER_DOMAIN_CDP -+ RPI_POWER_DOMAIN_ARM -+ -+Example: -+ -+power: power { -+ compatible = "raspberrypi,bcm2835-power"; -+ firmware = <&firmware>; -+ #power-domain-cells = <1>; -+}; -+ -+Example for using power domain: -+ -+&usb { -+ power-domains = <&power RPI_POWER_DOMAIN_USB>; -+}; diff --git a/debian/patches/features/arm/rpi/pwm-bcm2835-calculate-scaler-in-config.patch b/debian/patches/features/arm/rpi/pwm-bcm2835-calculate-scaler-in-config.patch deleted file mode 100644 index 9f3d64775..000000000 --- a/debian/patches/features/arm/rpi/pwm-bcm2835-calculate-scaler-in-config.patch +++ /dev/null @@ -1,57 +0,0 @@ -From: Stefan Wahren -Date: Tue, 1 Dec 2015 22:55:39 +0000 -Subject: [1/3] pwm: bcm2835: Calculate scaler in ->config() -Origin: https://git.kernel.org/cgit/linux/kernel/git/thierry.reding/linux-pwm.git/commit?id=ebe88b6ae41ff8f2b48608b6019c4341aa24bcea - -Currently pwm-bcm2835 assumes a fixed clock rate and stores the -resulting scaler in the driver structure. But with the upcoming -PWM clock support for clk-bcm2835 the rate could change, so -calculate the scaler in the ->config() callback. - -Signed-off-by: Stefan Wahren -Reviewed-by: Eric Anholt -Signed-off-by: Thierry Reding ---- - drivers/pwm/pwm-bcm2835.c | 8 +++----- - 1 file changed, 3 insertions(+), 5 deletions(-) - -diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c -index b4c7f95..174cca9 100644 ---- a/drivers/pwm/pwm-bcm2835.c -+++ b/drivers/pwm/pwm-bcm2835.c -@@ -29,7 +29,6 @@ - struct bcm2835_pwm { - struct pwm_chip chip; - struct device *dev; -- unsigned long scaler; - void __iomem *base; - struct clk *clk; - }; -@@ -66,6 +65,7 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) - { - struct bcm2835_pwm *pc = to_bcm2835_pwm(chip); -+ unsigned long scaler = NSEC_PER_SEC / clk_get_rate(pc->clk); - - if (period_ns <= MIN_PERIOD) { - dev_err(pc->dev, "period %d not supported, minimum %d\n", -@@ -73,8 +73,8 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - return -EINVAL; - } - -- writel(duty_ns / pc->scaler, pc->base + DUTY(pwm->hwpwm)); -- writel(period_ns / pc->scaler, pc->base + PERIOD(pwm->hwpwm)); -+ writel(duty_ns / scaler, pc->base + DUTY(pwm->hwpwm)); -+ writel(period_ns / scaler, pc->base + PERIOD(pwm->hwpwm)); - - return 0; - } -@@ -156,8 +156,6 @@ static int bcm2835_pwm_probe(struct platform_device *pdev) - if (ret) - return ret; - -- pc->scaler = NSEC_PER_SEC / clk_get_rate(pc->clk); -- - pc->chip.dev = &pdev->dev; - pc->chip.ops = &bcm2835_pwm_ops; - pc->chip.npwm = 2; diff --git a/debian/patches/features/arm/rpi/pwm-bcm2835-fix-email-address-specification.patch b/debian/patches/features/arm/rpi/pwm-bcm2835-fix-email-address-specification.patch deleted file mode 100644 index e15c0c476..000000000 --- a/debian/patches/features/arm/rpi/pwm-bcm2835-fix-email-address-specification.patch +++ /dev/null @@ -1,24 +0,0 @@ -From: Stefan Wahren -Date: Tue, 1 Dec 2015 22:55:41 +0000 -Subject: [3/3] pwm: bcm2835: Fix email address specification -Origin: https://git.kernel.org/cgit/linux/kernel/git/thierry.reding/linux-pwm.git/commit?id=6ef7d1c46f0cbe2b8e9c66d5d95ffa5a612df45d - -Signed-off-by: Stefan Wahren -Reviewed-by: Eric Anholt -Signed-off-by: Thierry Reding ---- - drivers/pwm/pwm-bcm2835.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c -index 31a6992..c5dbf16 100644 ---- a/drivers/pwm/pwm-bcm2835.c -+++ b/drivers/pwm/pwm-bcm2835.c -@@ -206,6 +206,6 @@ static struct platform_driver bcm2835_pwm_driver = { - }; - module_platform_driver(bcm2835_pwm_driver); - --MODULE_AUTHOR("Bart Tanghe "); - MODULE_DESCRIPTION("Broadcom BCM2835 PWM driver"); - MODULE_LICENSE("GPL v2"); diff --git a/debian/patches/features/arm/rpi/pwm-bcm2835-prevent-division-by-zero.patch b/debian/patches/features/arm/rpi/pwm-bcm2835-prevent-division-by-zero.patch deleted file mode 100644 index 10a8fb41c..000000000 --- a/debian/patches/features/arm/rpi/pwm-bcm2835-prevent-division-by-zero.patch +++ /dev/null @@ -1,36 +0,0 @@ -From: Stefan Wahren -Date: Tue, 1 Dec 2015 22:55:40 +0000 -Subject: [2/3] pwm: bcm2835: Prevent division by zero -Origin: https://git.kernel.org/cgit/linux/kernel/git/thierry.reding/linux-pwm.git/commit?id=fd13c14426299e75983a0cd3edf53dfa4083a70a - -It's possible that the PWM clock becomes an orphan. So better check the -result of clk_get_rate() in order to prevent a division by zero. - -Signed-off-by: Stefan Wahren -Reviewed-by: Eric Anholt -Signed-off-by: Thierry Reding ---- - drivers/pwm/pwm-bcm2835.c | 10 +++++++++- - 1 file changed, 9 insertions(+), 1 deletion(-) - -diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c -index 174cca9..31a6992 100644 ---- a/drivers/pwm/pwm-bcm2835.c -+++ b/drivers/pwm/pwm-bcm2835.c -@@ -65,7 +65,15 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) - { - struct bcm2835_pwm *pc = to_bcm2835_pwm(chip); -- unsigned long scaler = NSEC_PER_SEC / clk_get_rate(pc->clk); -+ unsigned long rate = clk_get_rate(pc->clk); -+ unsigned long scaler; -+ -+ if (!rate) { -+ dev_err(pc->dev, "failed to get clock rate\n"); -+ return -EINVAL; -+ } -+ -+ scaler = NSEC_PER_SEC / rate; - - if (period_ns <= MIN_PERIOD) { - dev_err(pc->dev, "period %d not supported, minimum %d\n", diff --git a/debian/patches/series b/debian/patches/series index d6a5a28f4..a89db2c37 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -42,10 +42,6 @@ debian/snd-pcsp-disable-autoload.patch bugfix/x86/viafb-autoload-on-olpc-xo1.5-only.patch # Arch bug fixes -bugfix/x86/drm-i915-shut-up-gen8-sde-irq-dmesg-noise.patch -bugfix/arm/crypto-sun4i-ss-add-missing-statesize.patch -bugfix/x86/drm-vmwgfx-fix-a-width-pitch-mismatch-on-framebuffer.patch -bugfix/mips/mips-math-emu-correctly-handle-nop-emulation.patch # Arch features features/mips/MIPS-increase-MAX-PHYSMEM-BITS-on-Loongson-3-only.patch @@ -53,54 +49,13 @@ features/mips/MIPS-Loongson-3-Add-Loongson-LS3A-RS780E-1-way-machi.patch features/mips/MIPS-octeon-Add-support-for-the-UBNT-E200-board.patch features/x86/x86-memtest-WARN-if-bad-RAM-found.patch features/x86/x86-make-x32-syscall-support-conditional.patch -features/arm/rpi/pwm-bcm2835-calculate-scaler-in-config.patch -features/arm/rpi/pwm-bcm2835-prevent-division-by-zero.patch -features/arm/rpi/pwm-bcm2835-fix-email-address-specification.patch -features/arm/rpi/drm-create-a-driver-hook-for-allocating-gem-object-s.patch -features/arm/rpi/drm-vc4-add-a-bo-cache.patch -features/arm/rpi/drm-vc4-add-create-and-map-bo-ioctls.patch -features/arm/rpi/drm-vc4-add-an-api-for-creating-gpu-shaders-in-gem-b.patch -features/arm/rpi/drm-vc4-fix-a-typo-in-a-v3d-debug-register.patch -features/arm/rpi/drm-vc4-bind-and-initialize-the-v3d-engine.patch -features/arm/rpi/drm-vc4-add-support-for-drawing-3d-frames.patch -features/arm/rpi/drm-vc4-add-support-for-async-pageflips.patch -features/arm/rpi/drm-vc4-add-an-interface-for-capturing-the-gpu-state.patch -features/arm/rpi/drm-vc4-copy_to_user-returns-the-number-of-bytes-rem.patch -features/arm/rpi/drm-vc4-allocate-enough-memory-in-vc4_save_hang_stat.patch -features/arm/rpi/drm-vc4-fix-an-error-code.patch -features/arm/rpi/dt-bindings-add-root-properties-for-raspberry-pi-2.patch -features/arm/rpi/arm-bcm2835-add-a-compat-string-for-bcm2836-machine-.patch -features/arm/rpi/arm-bcm2835-add-kconfig-support-for-bcm2836.patch -features/arm/rpi/arm-bcm2835-define-two-new-packets-from-the-latest-f.patch -features/arm/rpi/dt-bindings-add-rpi-power-domain-driver-bindings.patch -features/arm/rpi/arm-bcm2835-add-rpi-power-domain-driver.patch -features/arm/rpi/arm-bcm2835-split-the-dt-for-peripherals-from-the-dt.patch -features/arm/rpi/arm-bcm2835-move-the-cpu-peripheral-include-out-of-c.patch -features/arm/rpi/arm-bcm2835-add-devicetree-for-bcm2836-and-raspberry.patch -features/arm/rpi/arm-bcm2835-add-the-auxiliary-clocks-to-the-device-t.patch features/sparc/hwrng-n2-attach-on-t5-m5-t7-m7-sparc-cpus.patch -features/arm/arm-orion-move-watchdog-setup-to-mach-orion5x.patch -features/arm/arm-orion-always-use-multi_irq_handler.patch -features/arm/arm-orion-use-sparse_irq-everywhere.patch -features/arm/arm-orion5x-clean-up-mach-.h-headers.patch -features/arm/arm-orion5x-multiplatform-support.patch # Miscellaneous bug fixes bugfix/all/misc-bmp085-Enable-building-as-a-module.patch bugfix/all/kbuild-use-nostdinc-in-compile-tests.patch bugfix/all/disable-some-marvell-phys.patch bugfix/all/rtsx_usb_ms-use-msleep_interruptible-in-polling-loop.patch -bugfix/all/revert-xhci-don-t-finish-a-td-if-we-get-a-short-transfer.patch -bugfix/all/bcache-fix-a-livelock-when-we-cause-a-huge-number-of.patch -bugfix/all/bcache-add-a-cond_resched-call-to-gc.patch -bugfix/all/bcache-clear-bcache_dev_unlink_done-flag-when-attach.patch -bugfix/all/bcache-fix-a-leak-in-bch_cached_dev_run.patch -bugfix/all/bcache-unregister-reboot-notifier-if-bcache-fails-to.patch -bugfix/all/bcache-allows-use-of-register-in-udev-to-avoid-devic.patch -bugfix/all/bcache-prevent-crash-on-changing-writeback_running.patch -bugfix/all/bcache-change-refill_dirty-to-always-scan-entire-dis.patch -bugfix/all/scsi-fix-crashes-in-sd-and-sr-runtime-pm.patch -bugfix/all/rt2x00-fix-monitor-mode-regression.patch # Miscellaneous features @@ -111,9 +66,4 @@ features/all/grsecurity/grsecurity-kconfig.patch features/all/grsecurity/grkernsec_perf_harden.patch # Security fixes -bugfix/all/usbvision-fix-overflow-of-interfaces-array.patch -bugfix/all/media-usbvision-fix-crash-on-detecting-device-with-i.patch bugfix/all/ptrace-being-capable-wrt-a-process-requires-mapped-uids-gids.patch -bugfix/all/usb-serial-visor-fix-crash-on-detecting-device-without-write_urbs.patch -bugfix/all/tty-fix-unsafe-ldisc-reference-via-ioctl-tiocgetd.patch -bugfix/all/pipe-limit-the-per-user-amount-of-pages-allocated-in.patch diff --git a/debian/patches/series-rt b/debian/patches/series-rt deleted file mode 100644 index ab3429f6b..000000000 --- a/debian/patches/series-rt +++ /dev/null @@ -1,572 +0,0 @@ -########################################################### -# DELTA against a known Linus release -########################################################### - -############################################################ -# UPSTREAM changes queued -############################################################ - -############################################################ -# UPSTREAM FIXES, patches pending -############################################################ - -############################################################ -# Stuff broken upstream, patches submitted -############################################################ -features/all/rt/btrfs-initialize-the-seq-counter-in-struct-btrfs_dev.patch -features/all/rt/sched-use-tsk_cpus_allowed-instead-of-accessing-cpus.patch -features/all/rt/sched-provide-a-tsk_nr_cpus_allowed-helper.patch -features/all/rt/drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch -features/all/rt/drivers-media-vsp1_video-fix-compile-error.patch - -############################################################ -# Stuff which needs addressing upstream, but requires more -# information -############################################################ -features/all/rt/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch - -############################################################ -# Stuff broken upstream, need to be sent -############################################################ -features/all/rt/rtmutex--Handle-non-enqueued-waiters-gracefully - -############################################################ -# Submitted on LKML -############################################################ -features/all/rt/genirq-Add-default-affinity-mask-command-line-option.patch - -# SPARC part of erly printk consolidation -features/all/rt/sparc64-use-generic-rwsem-spinlocks-rt.patch - -# SRCU -features/all/rt/kernel-SRCU-provide-a-static-initializer.patch - -############################################################ -# Submitted to mips ML -############################################################ - -############################################################ -# Submitted to ARM ML -############################################################ - -############################################################ -# Submitted to PPC ML -############################################################ - -############################################################ -# Submitted on LKML -############################################################ - -############################################################ -# Submitted to net-dev -############################################################ - -############################################################ -# Pending in tip -############################################################ - -############################################################ -# Stuff which should go upstream ASAP -############################################################ - -# SCHED BLOCK/WQ -features/all/rt/block-shorten-interrupt-disabled-regions.patch - -# Timekeeping split jiffies lock. Needs a good argument :) -features/all/rt/timekeeping-split-jiffies-lock.patch - -# CHECKME: Should local_irq_enable() generally do a preemption check ? -features/all/rt/vtime-split-lock-and-seqcount.patch - -# Tracing -features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch - -# PTRACE/SIGNAL crap -features/all/rt/signal-revert-ptrace-preempt-magic.patch - -# ARM lock annotation -features/all/rt/arm-convert-boot-lock-to-raw.patch - -# PREEMPT_ENABLE_NO_RESCHED - -# SIGNALS / POSIXTIMERS -features/all/rt/posix-timers-no-broadcast.patch -features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch - -# SCHED - -# GENERIC CMPXCHG - -# SHORTEN PREEMPT DISABLED -features/all/rt/drivers-random-reduce-preempt-disabled-region.patch - -# CLOCKSOURCE -features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch -features/all/rt/clocksource-tclib-allow-higher-clockrates.patch - -# DRIVERS NET -features/all/rt/drivers-net-8139-disable-irq-nosync.patch - -# PREEMPT - -# PM -features/all/rt/suspend-prevernt-might-sleep-splats.patch - -# NETWORKING -features/all/rt/net-prevent-abba-deadlock.patch -features/all/rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch - -# X86 -features/all/rt/x86-io-apic-migra-no-unmask.patch - -# RCU - -# LOCKING INIT FIXES - -# PCI -features/all/rt/pci-access-use-__wake_up_all_locked.patch - -# WORKQUEUE - - -##################################################### -# Stuff which should go mainline, but wants some care -##################################################### - -# SEQLOCK - -# ANON RW SEMAPHORES - -# TRACING -features/all/rt/latencyhist-disable-jump-labels.patch -features/all/rt/latency-hist.patch -features/all/rt/latency_hist-update-sched_wakeup-probe.patch -features/all/rt/trace-latency-hist-Consider-new-argument-when-probin.patch - -# HW LATENCY DETECTOR - this really wants a rewrite -features/all/rt/hwlatdetect.patch -features/all/rt/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch -features/all/rt/hwlat-detector-Use-trace_clock_local-if-available.patch -features/all/rt/hwlat-detector-Use-thread-instead-of-stop-machine.patch -features/all/rt/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch - -################################################## -# REAL RT STUFF starts here -################################################## - -# PRINTK -features/all/rt/printk-kill.patch -features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch - -# Enable RT CONFIG -features/all/rt/rt-preempt-base-config.patch -features/all/rt/kconfig-disable-a-few-options-rt.patch -features/all/rt/kconfig-preempt-rt-full.patch - -# WARN/BUG_ON_RT -features/all/rt/bug-rt-dependend-variants.patch - -# LOCAL_IRQ_RT/NON_RT -features/all/rt/local-irq-rt-depending-variants.patch - -# PREEMPT NORT -features/all/rt/preempt-nort-rt-variants.patch - -# local locks & migrate disable -features/all/rt/introduce_migrate_disable_cpu_light.patch -features/all/rt/rt-local-irq-lock.patch - -# ANNOTATE local_irq_disable sites -features/all/rt/ata-disable-interrupts-if-non-rt.patch -features/all/rt/ide-use-nort-local-irq-variants.patch -features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch -features/all/rt/inpt-gameport-use-local-irq-nort.patch -features/all/rt/user-use-local-irq-nort.patch -features/all/rt/usb-use-_nort-in-giveback.patch -features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch -features/all/rt/mm-workingset-do-not-protect-workingset_shadow_nodes.patch - -# Sigh -features/all/rt/signal-fix-up-rcu-wreckage.patch -features/all/rt/oleg-signal-rt-fix.patch -features/all/rt/x86-signal-delay-calling-signals-on-32bit.patch - -# ANNOTATE BUG/WARNON -features/all/rt/net-wireless-warn-nort.patch - -# BIT SPINLOCKS - SIGH -features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch -features/all/rt/fs-jbd-replace-bh_state-lock.patch - -# GENIRQ -features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch -features/all/rt/genirq-disable-irqpoll-on-rt.patch -features/all/rt/genirq-force-threading.patch -features/all/rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch - -# DRIVERS NET -features/all/rt/drivers-net-fix-livelock-issues.patch -features/all/rt/drivers-net-vortex-fix-locking-issues.patch - -# MM PAGE_ALLOC -features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch -features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch - -# MM SWAP -features/all/rt/mm-convert-swap-to-percpu-locked.patch - -# MM vmstat -features/all/rt/mm-make-vmstat-rt-aware.patch - -# MM memory -features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch - -# MM bounce -features/all/rt/mm-bounce-local-irq-save-nort.patch - -# MM SLxB -features/all/rt/mm-disable-sloub-rt.patch -features/all/rt/mm-enable-slub.patch -features/all/rt/slub-enable-irqs-for-no-wait.patch -features/all/rt/slub-disable-SLUB_CPU_PARTIAL.patch - -# MM -features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch -features/all/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch -features/all/rt/mm-memcontrol-do_not_disable_irq.patch - -# RADIX TREE -features/all/rt/radix-tree-rt-aware.patch - -# PANIC -features/all/rt/panic-disable-random-on-rt.patch - -# IPC -features/all/rt/ipc-msg-Implement-lockless-pipelined-wakeups.patch - -# RELAY -features/all/rt/relay-fix-timer-madness.patch - -# TIMERS -features/all/rt/timers-prepare-for-full-preemption.patch -features/all/rt/timers-preempt-rt-support.patch -features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch -features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch - -# HRTIMERS -features/all/rt/hrtimers-prepare-full-preemption.patch -features/all/rt/hrtimer-enfore-64byte-alignment.patch -features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch -features/all/rt/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch -features/all/rt/timer-fd-avoid-live-lock.patch -features/all/rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch - -# POSIX-CPU-TIMERS -features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch - -# SCHEDULER -features/all/rt/sched-delay-put-task.patch -features/all/rt/sched-limit-nr-migrate.patch -features/all/rt/sched-mmdrop-delayed.patch -features/all/rt/sched-rt-mutex-wakeup.patch -features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch -features/all/rt/cond-resched-softirq-rt.patch -features/all/rt/cond-resched-lock-rt-tweak.patch -features/all/rt/sched-disable-ttwu-queue.patch -features/all/rt/sched-disable-rt-group-sched-on-rt.patch -features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch -features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch - -# STOP MACHINE -features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch -features/all/rt/stop-machine-raw-lock.patch - -# MIGRATE DISABLE AND PER CPU -features/all/rt/hotplug-light-get-online-cpus.patch -features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch -features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch -features/all/rt/ftrace-migrate-disable-tracing.patch -features/all/rt/hotplug-use-migrate-disable.patch - -# NOHZ - -# LOCKDEP -features/all/rt/lockdep-no-softirq-accounting-on-rt.patch - -# SOFTIRQ -features/all/rt/mutex-no-spin-on-rt.patch -features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch -features/all/rt/softirq-preempt-fix-3-re.patch -features/all/rt/softirq-disable-softirq-stacks-for-rt.patch -features/all/rt/softirq-split-locks.patch -features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch -features/all/rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch -features/all/rt/rtmutex-trylock-is-okay-on-RT.patch - -# RAID5 -features/all/rt/md-raid5-percpu-handling-rt-aware.patch - -# FUTEX/RTMUTEX -features/all/rt/rtmutex-futex-prepare-rt.patch -features/all/rt/futex-requeue-pi-fix.patch -features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch - -# RTMUTEX -features/all/rt/pid.h-include-atomic.h.patch -features/all/rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch -features/all/rt/rtmutex-lock-killable.patch -features/all/rt/spinlock-types-separate-raw.patch -features/all/rt/rtmutex-avoid-include-hell.patch -features/all/rt/rtmutex_dont_include_rcu.patch -features/all/rt/rt-add-rt-locks.patch -features/all/rt/rtmutex-Use-chainwalking-control-enum.patch -features/all/rt/rtmutex-add-a-first-shot-of-ww_mutex.patch - -features/all/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch - -# RTMUTEX Fallout -features/all/rt/tasklist-lock-fix-section-conflict.patch -#fold -features/all/rt/ptrace-don-t-open-IRQs-in-ptrace_freeze_traced-too-e.patch - -# RCU -features/all/rt/peter_zijlstra-frob-rcu.patch -features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch -features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch -features/all/rt/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch - -# LGLOCKS - lovely -features/all/rt/lglocks-rt.patch - -# STOP machine (depend on lglock & rtmutex) -features/all/rt/stomp-machine-create-lg_global_trylock_relax-primiti.patch -features/all/rt/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch - -# DRIVERS SERIAL -features/all/rt/drivers-tty-fix-omap-lock-crap.patch -features/all/rt/drivers-tty-pl011-irq-disable-madness.patch -features/all/rt/rt-serial-warn-fix.patch - -# SIMPLE WAITQUEUE -features/all/rt/wait.h-include-atomic.h.patch -features/all/rt/wait-simple-implementation.patch -features/all/rt/work-simple-Simple-work-queue-implemenation.patch -features/all/rt/rcu-more-swait-conversions.patch -features/all/rt/completion-use-simple-wait-queues.patch -features/all/rt/fs-aio-simple-simple-work.patch - -# FS -features/all/rt/fs-namespace-preemption-fix.patch -features/all/rt/mm-protect-activate-switch-mm.patch -features/all/rt/fs-block-rt-support.patch -features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch -features/all/rt/fs-jbd2-pull-your-plug-when-waiting-for-space.patch - -# X86 -features/all/rt/x86-mce-timer-hrtimer.patch -features/all/rt/x86-mce-use-swait-queue-for-mce-wakeups.patch -features/all/rt/x86-stackprot-no-random-on-rt.patch -features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch -features/all/rt/x86-UV-raw_spinlock-conversion.patch -features/all/rt/thermal-Defer-thermal-wakups-to-threads.patch - -# CPU get light -features/all/rt/epoll-use-get-cpu-light.patch -features/all/rt/mm-vmalloc-use-get-cpu-light.patch -features/all/rt/block-mq-use-cpu_light.patch -features/all/rt/block-mq-drop-preempt-disable.patch -features/all/rt/block-mq-don-t-complete-requests-via-IPI.patch -features/all/rt/dump-stack-don-t-disable-preemption-during-trace.patch - -# CPU CHILL -features/all/rt/rt-introduce-cpu-chill.patch -features/all/rt/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch - -# block -features/all/rt/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch -features/all/rt/block-blk-mq-use-swait.patch -# XXX melt -features/all/rt/block-mq-drop-per-ctx-cpu_lock.patch - -# BLOCK LIVELOCK PREVENTION -features/all/rt/block-use-cpu-chill.patch - -# FS LIVELOCK PREVENTION -features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch -features/all/rt/net-use-cpu-chill.patch - -# WORKQUEUE more fixes -features/all/rt/workqueue-use-rcu.patch -features/all/rt/workqueue-use-locallock.patch -features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch -features/all/rt/workqueue-distangle-from-rq-lock.patch - -# IDR -features/all/rt/idr-use-local-lock-for-protection.patch -features/all/rt/percpu_ida-use-locklocks.patch - -# DEBUGOBJECTS -features/all/rt/debugobjects-rt.patch - -# JUMPLABEL -features/all/rt/jump-label-rt.patch - -# NETWORKING -features/all/rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch -features/all/rt/net__Make_synchronize-rcu_expedited_conditional-on-non-rt -features/all/rt/skbufhead-raw-lock.patch -features/all/rt/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch -features/all/rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch -features/all/rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch - -# NETWORK livelock fix -features/all/rt/net-tx-action-avoid-livelock-on-rt.patch - -# NETWORK DEBUGGING AID -features/all/rt/ping-sysrq.patch - -# irqwork -features/all/rt/irqwork-push_most_work_into_softirq_context.patch -features/all/rt/irqwork-Move-irq-safe-work-to-irq-context.patch - -# Sound -features/all/rt/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch - -# CONSOLE. NEEDS more thought !!! -features/all/rt/printk-rt-aware.patch -features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch - -# POWERC -features/all/rt/power-use-generic-rwsem-on-rt.patch -features/all/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch -features/all/rt/powerpc-ps3-device-init.c-adapt-to-completions-using.patch - -# ARM -features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch -features/all/rt/arm-unwind-use_raw_lock.patch -features/all/rt/ARM-enable-irq-in-translation-section-permission-fau.patch - -# ARM64 -features/all/rt/arm64-xen--Make-XEN-depend-on-non-rt.patch - -# KGDB -features/all/rt/kgb-serial-hackaround.patch - -# SYSFS - RT indicator -features/all/rt/sysfs-realtime-entry.patch - -# KMAP/HIGHMEM -features/all/rt/power-disable-highmem-on-rt.patch -features/all/rt/mips-disable-highmem-on-rt.patch -features/all/rt/mm-rt-kmap-atomic-scheduling.patch -features/all/rt/mm--rt--Fix-generic-kmap_atomic-for-RT -features/all/rt/x86-highmem-add-a-already-used-pte-check.patch -features/all/rt/arm-highmem-flush-tlb-on-unmap.patch -features/all/rt/arm-enable-highmem-for-rt.patch - -# IPC -features/all/rt/ipc-sem-rework-semaphore-wakeups.patch - -# SYSRQ - -# KVM require constant freq TSC (smp function call -> cpufreq) -features/all/rt/x86-kvm-require-const-tsc-for-rt.patch -features/all/rt/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch -features/all/rt/KVM-use-simple-waitqueue-for-vcpu-wq.patch - -# SCSI/FCOE -features/all/rt/scsi-fcoe-rt-aware.patch -features/all/rt/sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch - -# X86 crypto -features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch -features/all/rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch - -# Device mapper -features/all/rt/dm-make-rt-aware.patch - -# ACPI -features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch - -# CPUMASK OFFSTACK -features/all/rt/cpumask-disable-offstack-on-rt.patch - -# RANDOM -features/all/rt/random-make-it-work-on-rt.patch - -# SEQLOCKS -features/all/rt/seqlock-prevent-rt-starvation.patch - -# HOTPLUG -features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch -features/all/rt/cpu-rt-rework-cpu-down.patch -features/all/rt/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch -features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch -features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch -features/all/rt/cpu_down_move_migrate_enable_back.patch -features/all/rt/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch - -# SCSCI QLA2xxx -features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch - -# NET -features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch -features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch -features/all/rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch -features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch -features/all/rt/net-make-devnet_rename_seq-a-mutex.patch - -# CRYPTO -features/all/rt/peterz-srcu-crypto-chain.patch - -# LOCKDEP -features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch -features/all/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch - -# PERF -features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch -features/all/rt/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch - -# RCU -features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch -features/all/rt/rcu-Eliminate-softirq-processing-from-rcutree.patch -features/all/rt/rcu-make-RCU_BOOST-default-on-RT.patch - -# PREEMPT LAZY -features/all/rt/preempt-lazy-support.patch -features/all/rt/preempt-lazy-check-preempt_schedule.patch -features/all/rt/x86-preempt-lazy.patch -features/all/rt/arm-preempt-lazy-support.patch -features/all/rt/powerpc-preempt-lazy-support.patch -features/all/rt/arch-arm64-Add-lazy-preempt-support.patch -features/all/rt/arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch - -# LEDS -features/all/rt/leds-trigger-disable-CPU-trigger-on-RT.patch - -# DRIVERS -features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch -features/all/rt/mmci-remove-bogus-irq-save.patch -features/all/rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch - -# I915 -features/all/rt/i915_compile_fix.patch -features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch -features/all/rt/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch - -# CGROUPS -features/all/rt/cgroups-use-simple-wait-in-css_release.patch -features/all/rt/cgroups-scheduling-while-atomic-in-cgroup-code.patch - -# New stuff -# Revisit: We need this in other places as well -features/all/rt/move_sched_delayed_work_to_helper.patch - -# MD -features/all/rt/md-disable-bcache.patch - -# WORKQUEUE SIGH -features/all/rt/workqueue-prevent-deadlock-stall.patch - -# Add RT to version -features/all/rt/localversion.patch