From baa617cd991d9d2a71f06a94cdbae859b2da629a Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 25 Nov 2019 00:04:39 +0000 Subject: [PATCH] [rt] Update to 4.19.82-rt30 --- debian/changelog | 8 + ...M-at91-add-TCB-registers-definitions.patch | 8 +- ...ers-Add-a-new-driver-for-the-Atmel-A.patch | 8 +- ...ers-timer-atmel-tcb-add-clockevent-d.patch | 8 +- ...drivers-atmel-pit-make-option-silent.patch | 8 +- ...at91-Implement-clocksource-selection.patch | 8 +- ...onfigs-at91-use-new-TCB-timer-driver.patch | 8 +- .../0007-ARM-configs-at91-unselect-PIT.patch | 8 +- ...ts-Move-pending-table-allocation-to-.patch | 22 +- ...-convert-worker-lock-to-raw-spinlock.patch | 8 +- ...m-qi-simplify-CGR-allocation-freeing.patch | 8 +- ...obustify-CFS-bandwidth-timer-locking.patch | 26 +- ...012-arm-Convert-arm-boot_lock-to-raw.patch | 8 +- ...-let-setaffinity-unmask-threaded-EOI.patch | 10 +- ...irqsave-in-cgroup_rstat_flush_locked.patch | 8 +- ...lize-cookie-hash-table-raw-spinlocks.patch | 8 +- ...mbus-include-header-for-get_irq_regs.patch | 8 +- ...de-irqflags.h-for-raw_local_irq_save.patch | 8 +- .../0018-efi-Allow-efi-runtime.patch | 10 +- ...fi-drop-task_lock-from-efi_switch_mm.patch | 8 +- ...e_layout-before-altenates-are-applie.patch | 8 +- ...-phandle-cache-outside-of-the-devtre.patch | 8 +- ...ake-quarantine_lock-a-raw_spinlock_t.patch | 8 +- ...xpedited-GP-parallelization-cleverne.patch | 8 +- ...-kmemleak_lock-to-raw-spinlock-on-RT.patch | 10 +- ...-replace-seqcount_t-with-a-seqlock_t.patch | 26 +- ...vide-a-pointer-to-the-valid-CPU-mask.patch | 76 +- ...ernel-sched-core-add-migrate_disable.patch | 18 +- ...sable-Add-export_symbol_gpl-for-__mi.patch | 10 +- ...o-not-disable-enable-clocks-in-a-row.patch | 8 +- ...B-Allow-higher-clock-rates-for-clock.patch | 8 +- ...31-timekeeping-Split-jiffies-seqlock.patch | 10 +- ...2-signal-Revert-ptrace-preempt-magic.patch | 8 +- ...et-sched-Use-msleep-instead-of-yield.patch | 12 +- ...rq-remove-BUG_ON-irqs_disabled-check.patch | 12 +- ...do-no-disable-interrupts-in-giveback.patch | 8 +- ...rovide-PREEMPT_RT_BASE-config-switch.patch | 8 +- ...sable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch | 8 +- ...abel-disable-if-stop_machine-is-used.patch | 10 +- ...config-options-which-are-not-RT-comp.patch | 8 +- .../0040-lockdep-disable-self-test.patch | 10 +- .../0041-mm-Allow-only-slub-on-RT.patch | 8 +- ...locking-Disable-spin-on-owner-for-RT.patch | 8 +- ...043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch | 8 +- ...044-rcu-make-RCU_BOOST-default-on-RT.patch | 8 +- ...-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch | 8 +- ...46-net-core-disable-NET_RX_BUSY_POLL.patch | 8 +- ...0047-arm-disable-NEON-in-kernel-mode.patch | 12 +- ...0048-powerpc-Use-generic-rwsem-on-RT.patch | 8 +- ...ble-in-kernel-MPIC-emulation-for-PRE.patch | 8 +- .../0050-powerpc-Disable-highmem-on-RT.patch | 8 +- .../0051-mips-Disable-highmem-on-RT.patch | 8 +- ...86-Use-generic-rwsem_spinlocks-on-rt.patch | 8 +- ...ds-trigger-disable-CPU-trigger-on-RT.patch | 8 +- ...rop-K8-s-driver-from-beeing-selected.patch | 8 +- .../patches-rt/0055-md-disable-bcache.patch | 8 +- ...6-efi-Disable-runtime-services-on-RT.patch | 10 +- ...0057-printk-Add-a-printk-kill-switch.patch | 10 +- ..._early_printk-boot-param-to-help-wit.patch | 10 +- ...pt-Provide-preempt_-_-no-rt-variants.patch | 8 +- ...-migrate_disable-enable-in-different.patch | 8 +- .../0061-rt-Add-local-irq-locks.patch | 8 +- ...provide-get-put-_locked_ptr-variants.patch | 8 +- ...catterlist-Do-not-disable-irqs-on-RT.patch | 12 +- ...-x86-Delay-calling-signals-in-atomic.patch | 10 +- ...ignal-delay-calling-signals-on-32bit.patch | 8 +- ...head-Replace-bh_uptodate_lock-for-rt.patch | 8 +- ...-state-lock-and-journal-head-lock-rt.patch | 10 +- ...st_bl-Make-list-head-locking-RT-safe.patch | 8 +- ...-list_bl-fixup-bogus-lockdep-warning.patch | 8 +- .../0070-genirq-Disable-irqpoll-on-rt.patch | 8 +- ...-genirq-Force-interrupt-thread-on-RT.patch | 13 +- ...d-zone-lock-while-freeing-pages-from.patch | 8 +- ...d-zone-lock-while-freeing-pages-from.patch | 8 +- ...B-change-list_lock-to-raw_spinlock_t.patch | 10 +- ...ving-back-empty-slubs-to-IRQ-enabled.patch | 10 +- ...page_alloc-rt-friendly-per-cpu-pages.patch | 8 +- ...077-mm-swap-Convert-to-percpu-locked.patch | 14 +- ...m-perform-lru_add_drain_all-remotely.patch | 10 +- ...t-per-cpu-variables-with-preempt-dis.patch | 8 +- ...plit-page-table-locks-for-vector-pag.patch | 8 +- .../0081-mm-Enable-SLUB-for-RT.patch | 10 +- ...0082-slub-Enable-irqs-for-__GFP_WAIT.patch | 10 +- .../0083-slub-Disable-SLUB_CPU_PARTIAL.patch | 8 +- ...n-t-call-schedule_work_on-in-preempt.patch | 14 +- ...place-local_irq_disable-with-local-l.patch | 24 +- ...oc-copy-with-get_cpu_var-and-locking.patch | 32 +- ...le-preemption-__split_large_page-aft.patch | 8 +- .../0088-radix-tree-use-local-locks.patch | 8 +- ...9-timers-Prepare-for-full-preemption.patch | 16 +- ...090-x86-kvm-Require-const-tsc-for-RT.patch | 12 +- ...ec-Don-t-use-completion-s-wait-queue.patch | 8 +- .../0092-wait.h-include-atomic.h.patch | 8 +- ...mple-Simple-work-queue-implemenation.patch | 8 +- ...-a-shit-statement-in-SWORK_EVENT_PEN.patch | 8 +- ...95-completion-Use-simple-wait-queues.patch | 18 +- .../0096-fs-aio-simple-simple-work.patch | 8 +- ...voke-the-affinity-callback-via-a-wor.patch | 20 +- ...id-schedule_work-with-interrupts-dis.patch | 10 +- ...ate-hrtimer_init-hrtimer_init_sleepe.patch | 10 +- ...100-hrtimers-Prepare-full-preemption.patch | 10 +- ...s-by-default-into-the-softirq-contex.patch | 30 +- ...air-Make-the-hrtimers-non-hard-again.patch | 12 +- ...-schedule_work-call-to-helper-thread.patch | 8 +- ...te-change-before-hrtimer_cancel-in-d.patch | 8 +- ...timers-Thread-posix-cpu-timers-on-rt.patch | 18 +- ...ched-Move-task_struct-cleanup-to-RCU.patch | 14 +- ...-number-of-task-migrations-per-batch.patch | 10 +- .../0108-sched-Move-mmdrop-to-RCU-on-RT.patch | 20 +- ...e-stack-kprobe-clean-up-to-__put_tas.patch | 13 +- ...state-for-tasks-blocked-on-sleeping-.patch | 14 +- ...ount-rcu_preempt_depth-on-RT-in-migh.patch | 14 +- ...-proper-LOCK_OFFSET-for-cond_resched.patch | 8 +- .../0113-sched-Disable-TTWU_QUEUE-on-RT.patch | 8 +- ...Only-wake-up-idle-workers-if-not-blo.patch | 12 +- ...ease-the-nr-of-migratory-tasks-when-.patch | 16 +- ...-hotplug-Lightweight-get-online-cpus.patch | 16 +- ...e-disabled-counter-to-tracing-output.patch | 10 +- .../0118-lockdep-Make-it-RT-aware.patch | 14 +- ...tasklets-from-going-into-infinite-sp.patch | 8 +- ...eemption-after-reenabling-interrupts.patch | 12 +- ...oftirq-Disable-softirq-stacks-for-RT.patch | 16 +- .../0122-softirq-Split-softirq-locks.patch | 14 +- ...-use-local_bh_disable-in-netif_rx_ni.patch | 10 +- ...abling-of-softirq-processing-in-irq-.patch | 14 +- ...plit-timer-softirqs-out-of-ksoftirqd.patch | 8 +- ...cal_softirq_pending-messages-if-ksof.patch | 8 +- ...cal_softirq_pending-messages-if-task.patch | 8 +- .../0128-rtmutex-trylock-is-okay-on-RT.patch | 8 +- ...-nfs-turn-rmdir_sem-into-a-semaphore.patch | 18 +- ...he-various-new-futex-race-conditions.patch | 8 +- ...on-when-a-requeued-RT-task-times-out.patch | 8 +- ...k-unlock-symetry-versus-pi_lock-and-.patch | 8 +- .../0133-pid.h-include-atomic.h.patch | 8 +- ...arm-include-definition-for-cpumask_t.patch | 8 +- ...ure-Do-NOT-include-rwlock.h-directly.patch | 8 +- ...36-rtmutex-Add-rtmutex_lock_killable.patch | 8 +- ...0137-rtmutex-Make-lock_killable-work.patch | 8 +- ...spinlock-Split-the-lock-types-header.patch | 8 +- .../0139-rtmutex-Avoid-include-hell.patch | 8 +- ...-rbtree-don-t-include-the-rcu-header.patch | 10 +- ...tex-Provide-rt_mutex_slowlock_locked.patch | 8 +- ...ockdep-less-version-of-rt_mutex-s-lo.patch | 8 +- ...tex-add-sleeping-lock-implementation.patch | 20 +- ...utex-implementation-based-on-rtmutex.patch | 8 +- ...wsem-implementation-based-on-rtmutex.patch | 8 +- ...lock-implementation-based-on-rtmutex.patch | 8 +- ...-preserve-state-like-a-sleeping-lock.patch | 8 +- .../0148-rtmutex-wire-up-RT-s-locking.patch | 8 +- ...utex-add-ww_mutex-addon-for-mutex-rt.patch | 8 +- .../0150-kconfig-Add-PREEMPT_RT_FULL.patch | 10 +- ...-fix-deadlock-in-device-mapper-block.patch | 8 +- ...utex-Flush-block-plug-on-__down_read.patch | 8 +- ...re-init-the-wait_lock-in-rt_mutex_in.patch | 8 +- ...ace-fix-ptrace-vs-tasklist_lock-race.patch | 14 +- ...mutex-annotate-sleeping-lock-context.patch | 16 +- ...sable-fallback-to-preempt_disable-in.patch | 18 +- ...eck-for-__LINUX_SPINLOCK_TYPES_H-on-.patch | 8 +- .../0158-rcu-Frob-softirq-test.patch | 8 +- ...59-rcu-Merge-RCU-bh-into-RCU-preempt.patch | 10 +- ...ke-ksoftirqd-do-RCU-quiescent-states.patch | 8 +- ...nate-softirq-processing-from-rcutree.patch | 8 +- ...-use-cpu_online-instead-custom-check.patch | 8 +- ...place-local_irqsave-with-a-locallock.patch | 8 +- ..._normal_after_boot-by-default-for-RT.patch | 8 +- ...erial-omap-Make-the-locking-RT-aware.patch | 8 +- ...al-pl011-Make-the-locking-work-on-RT.patch | 8 +- ...-explicitly-initialize-the-flags-var.patch | 8 +- ...mprove-the-serial-console-PASS_LIMIT.patch | 8 +- ...0-don-t-take-the-trylock-during-oops.patch | 12 +- ...wsem-Remove-preempt_disable-variants.patch | 8 +- ...ate_mm-by-preempt_-disable-enable-_r.patch | 10 +- ...back-explicit-INIT_HLIST_BL_HEAD-ini.patch | 8 +- ...e-preemption-on-i_dir_seq-s-write-si.patch | 45 +- ...e-of-local-lock-in-multi_cpu-decompr.patch | 8 +- ...rmal-Defer-thermal-wakups-to-threads.patch | 8 +- ...e-preemption-around-local_bh_disable.patch | 8 +- ...poll-Do-not-disable-preemption-on-RT.patch | 8 +- ...er-preempt-disable-region-which-suck.patch | 10 +- .../0179-block-mq-use-cpu_light.patch | 10 +- ...ock-mq-do-not-invoke-preempt_disable.patch | 12 +- ...k-mq-don-t-complete-requests-via-IPI.patch | 18 +- ...-Make-raid5_percpu-handling-RT-aware.patch | 14 +- .../0183-rt-Introduce-cpu_chill.patch | 8 +- ...rtimer-Don-t-lose-state-in-cpu_chill.patch | 8 +- ...chill-save-task-state-in-saved_state.patch | 8 +- ...e-blk_queue_usage_counter_release-in.patch | 20 +- ...-block-Use-cpu_chill-for-retry-loops.patch | 8 +- ...cache-Use-cpu_chill-in-trylock-loops.patch | 8 +- ...t-Use-cpu_chill-instead-of-cpu_relax.patch | 10 +- ...use-swait_queue-instead-of-waitqueue.patch | 24 +- .../0191-workqueue-Use-normal-rcu.patch | 8 +- ...cal-irq-lock-instead-of-irq-disable-.patch | 8 +- ...t-workqueue-versus-ata-piix-livelock.patch | 8 +- ...tangle-worker-accounting-from-rqlock.patch | 18 +- .../0195-debugobjects-Make-RT-aware.patch | 8 +- .../0196-seqlock-Prevent-rt-starvation.patch | 8 +- ...vc_xprt_do_enqueue-use-get_cpu_light.patch | 8 +- ...0198-net-Use-skbufhead-with-raw-lock.patch | 14 +- ...recursion-to-per-task-variable-on-RT.patch | 16 +- ...y-to-delegate-processing-a-softirq-t.patch | 10 +- ...ake-qdisc-s-busylock-in-__dev_xmit_s.patch | 10 +- ...Qdisc-use-a-seqlock-instead-seqcount.patch | 20 +- ...-missing-serialization-in-ip_send_un.patch | 8 +- .../0204-net-add-a-lock-around-icmp_sk.patch | 10 +- ...schedule_irqoff-disable-interrupts-o.patch | 10 +- ...-push-most-work-into-softirq-context.patch | 12 +- .../0207-printk-Make-rt-aware.patch | 10 +- ...n-t-try-to-print-from-IRQ-NMI-region.patch | 10 +- ...intk-Drop-the-logbuf_lock-more-often.patch | 10 +- ...n-translation-section-permission-fau.patch | 10 +- ...-irq_set_irqchip_state-documentation.patch | 12 +- ...wngrade-preempt_disable-d-region-to-.patch | 16 +- ...-preemp_disable-in-addition-to-local.patch | 8 +- ...14-kgdb-serial-Short-term-workaround.patch | 12 +- ...-sysfs-Add-sys-kernel-realtime-entry.patch | 8 +- .../0216-mm-rt-kmap_atomic-scheduling.patch | 12 +- ...highmem-Add-a-already-used-pte-check.patch | 8 +- .../0218-arm-highmem-Flush-tlb-on-unmap.patch | 8 +- .../0219-arm-Enable-highmem-for-rt.patch | 8 +- .../0220-scsi-fcoe-Make-RT-aware.patch | 10 +- ...ypto-Reduce-preempt-disabled-regions.patch | 8 +- ...-preempt-disabled-regions-more-algos.patch | 8 +- ...ypto-limit-more-FPU-enabled-sections.patch | 8 +- ...-serialize-RT-percpu-scratch-buffer-.patch | 8 +- ...d-a-lock-instead-preempt_disable-loc.patch | 8 +- ...andom_bytes-for-RT_FULL-in-init_oops.patch | 12 +- ...ackprotector-Avoid-random-pool-on-rt.patch | 8 +- .../0228-random-Make-it-work-on-rt.patch | 12 +- ...29-cpu-hotplug-Implement-CPU-pinning.patch | 12 +- ...ed-user-tasks-to-be-awakened-to-the-.patch | 10 +- ...duct-tape-RT-rwlock-usage-for-non-RT.patch | 10 +- ...ove-preemption-disabling-in-netif_rx.patch | 10 +- ...r-local_irq_disable-kmalloc-headache.patch | 10 +- ...-users-of-napi_alloc_cache-against-r.patch | 10 +- ...rialize-xt_write_recseq-sections-on-.patch | 8 +- ...Add-a-mutex-around-devnet_rename_seq.patch | 10 +- ...-Only-do-hardirq-context-test-for-ra.patch | 8 +- ...-fix-warnings-due-to-missing-PREEMPT.patch | 8 +- ...ched-Add-support-for-lazy-preemption.patch | 52 +- ...40-ftrace-Fix-trace-header-alignment.patch | 10 +- ...0241-x86-Support-for-lazy-preemption.patch | 14 +- ...-properly-check-against-preempt-mask.patch | 8 +- ...-use-proper-return-label-on-32bit-x8.patch | 10 +- ...-arm-Add-support-for-lazy-preemption.patch | 18 +- ...erpc-Add-support-for-lazy-preemption.patch | 22 +- ...-arch-arm64-Add-lazy-preempt-support.patch | 18 +- ...c-Protect-send_msg-with-a-local-lock.patch | 8 +- ...am-Replace-bit-spinlocks-with-rtmute.patch | 14 +- ...-t-disable-preemption-in-zcomp_strea.patch | 16 +- ...-zcomp_stream_get-smp_processor_id-u.patch | 8 +- ...51-tpm_tis-fix-stall-after-iowrite-s.patch | 8 +- ...t-deferral-of-watchdogd-wakeup-on-RT.patch | 8 +- ...Use-preempt_disable-enable_rt-where-.patch | 8 +- ...al_lock-unlock_irq-in-intel_pipe_upd.patch | 8 +- .../0255-drm-i915-disable-tracing-on-RT.patch | 8 +- ...M_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch | 8 +- ...roups-use-simple-wait-in-css_release.patch | 18 +- ...vert-callback_lock-to-raw_spinlock_t.patch | 8 +- ...-a-locallock-instead-preempt_disable.patch | 8 +- ...rkqueue-Prevent-deadlock-stall-on-RT.patch | 14 +- ...t-tasks-to-cache-one-sigqueue-struct.patch | 14 +- ...0262-Add-localversion-for-RT-release.patch | 8 +- ...iommu-Use-a-locallock-instead-local_.patch | 8 +- .../0264-powerpc-reshuffle-TIF-bits.patch | 16 +- ...-Convert-show_lock-to-raw_spinlock_t.patch | 8 +- ...isable-interrupts-independently-of-t.patch | 8 +- ...-Fix-a-lockup-in-wait_for_completion.patch | 8 +- ...8-kthread-add-a-global-worker-thread.patch | 10 +- ...voke-the-affinity-callback-via-a-wor.patch | 16 +- ...ssing-work_struct-in-irq_set_affinit.patch | 12 +- ...-arm-imx6-cpuidle-Use-raw_spinlock_t.patch | 8 +- ...to-change-rcu_normal_after_boot-on-R.patch | 8 +- ...chtec-fix-stream_open.cocci-warnings.patch | 8 +- ...-Drop-a-preempt_disable_rt-statement.patch | 10 +- ...notification-of-canceling-timers-on-.patch | 34 +- ...ure-lock-unlock-symetry-versus-pi_lo.patch | 8 +- ...-bug-on-when-a-requeued-RT-task-time.patch | 8 +- ...andle-the-various-new-futex-race-con.patch | 8 +- ...karound-migrate_disable-enable-in-di.patch | 8 +- ...-Make-the-futex_hash_bucket-lock-raw.patch | 8 +- ...futex-Delay-deallocation-of-pi_state.patch | 8 +- ...disable-preemption-in-zswap_frontswa.patch | 8 +- .../0283-Linux-4.19.59-rt24-REBASE.patch | 20 - debian/patches-rt/0283-revert-aio.patch | 67 + .../0284-fs-aio-simple-simple-work.patch | 72 + debian/patches-rt/0285-revert-thermal.patch | 116 ++ ...rmal-Defer-thermal-wakups-to-threads.patch | 94 + debian/patches-rt/0287-revert-block.patch | 79 + ...e-blk_queue_usage_counter_release-in.patch | 110 ++ debian/patches-rt/0289-workqueue-rework.patch | 1548 +++++++++++++++++ .../0290-Linux-4.19.82-rt30-REBASE.patch | 16 + debian/patches-rt/series | 9 +- 293 files changed, 3086 insertions(+), 2136 deletions(-) delete mode 100644 debian/patches-rt/0283-Linux-4.19.59-rt24-REBASE.patch create mode 100644 debian/patches-rt/0283-revert-aio.patch create mode 100644 debian/patches-rt/0284-fs-aio-simple-simple-work.patch create mode 100644 debian/patches-rt/0285-revert-thermal.patch create mode 100644 debian/patches-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch create mode 100644 debian/patches-rt/0287-revert-block.patch create mode 100644 debian/patches-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch create mode 100644 debian/patches-rt/0289-workqueue-rework.patch create mode 100644 debian/patches-rt/0290-Linux-4.19.82-rt30-REBASE.patch diff --git a/debian/changelog b/debian/changelog index 90604275b..0092ebe7e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1356,6 +1356,14 @@ linux (4.19.81-1) UNRELEASED; urgency=medium [ Ben Hutchings ] * debian/bin/genpatch-rt: Fix series generation from git + * [rt] Update to 4.19.82-rt30: + - Drop changes in "fs/dcache: disable preemption on i_dir_seq's write side" + that conflict with "Fix the locking in dcache_readdir() and friends" + - Rewrite "fs/aio: simple simple work" using kthread_work + - Rewrite "thermal: Defer thermal wakups to threads" using kthread_work + - Rewrite "block: blk-mq: move blk_queue_usage_counter_release() into + process context" using kthread_work + - workqueue: rework -- Romain Perier Wed, 28 Aug 2019 13:28:09 +0200 diff --git a/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch b/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch index 703dc9701..26e1ff627 100644 --- a/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch +++ b/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch @@ -1,8 +1,7 @@ -From 200fca9a9f123bcca859fadc996b1e40c0384269 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:18 +0200 -Subject: [PATCH 001/283] ARM: at91: add TCB registers definitions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 001/290] ARM: at91: add TCB registers definitions +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3b15d99032dfca622e695f10a9934c16fa8b05a0 Add registers and bits definitions for the timer counter blocks found on Atmel ARM SoCs. @@ -205,6 +204,3 @@ index 000000000000..657e234b1483 +}; + +#endif /* __SOC_ATMEL_TCB_H */ --- -2.20.1 - diff --git a/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch b/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch index f332bac5d..be9ffe1a9 100644 --- a/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch +++ b/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch @@ -1,9 +1,8 @@ -From 13e8b97c81d4029a2c41ce13ffc84507af252845 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:19 +0200 -Subject: [PATCH 002/283] clocksource/drivers: Add a new driver for the Atmel +Subject: [PATCH 002/290] clocksource/drivers: Add a new driver for the Atmel ARM TC blocks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1cc14d070ff9808e86cd76edc497abd71537b237 Add a driver for the Atmel Timer Counter Blocks. This driver provides a clocksource and two clockevent devices. @@ -480,6 +479,3 @@ index 000000000000..21fbe430f91b + bits); +} +TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init); --- -2.20.1 - diff --git a/debian/patches-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch b/debian/patches-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch index 5c4c47476..8a6b74f3c 100644 --- a/debian/patches-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch +++ b/debian/patches-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch @@ -1,9 +1,8 @@ -From 108301f18eaae6fde1bf8b864d52052bdc2a7043 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:20 +0200 -Subject: [PATCH 003/283] clocksource/drivers: timer-atmel-tcb: add clockevent +Subject: [PATCH 003/290] clocksource/drivers: timer-atmel-tcb: add clockevent device on separate channel -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=10269a72d7656134bd29f2ef8a4cbd4d166ac825 Add an other clockevent device that uses a separate TCB channel when available. @@ -266,6 +265,3 @@ index 21fbe430f91b..63ce3b69338a 100644 } } --- -2.20.1 - diff --git a/debian/patches-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch b/debian/patches-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch index 7dcc22f7a..bc799db50 100644 --- a/debian/patches-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch +++ b/debian/patches-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch @@ -1,8 +1,7 @@ -From e60c9d976e3462237d2f3644c18091ac1e7746c6 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:21 +0200 -Subject: [PATCH 004/283] clocksource/drivers: atmel-pit: make option silent -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 004/290] clocksource/drivers: atmel-pit: make option silent +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7573710162f30e8c45e60a62ffd20cf4027360bf To conform with the other option, make the ATMEL_PIT option silent so it can be selected from the platform @@ -31,6 +30,3 @@ index 0ab22e7037f4..34b07047b91f 100644 config ATMEL_ST bool "Atmel ST timer support" if COMPILE_TEST --- -2.20.1 - diff --git a/debian/patches-rt/0005-ARM-at91-Implement-clocksource-selection.patch b/debian/patches-rt/0005-ARM-at91-Implement-clocksource-selection.patch index 45f3957e0..34bb2a577 100644 --- a/debian/patches-rt/0005-ARM-at91-Implement-clocksource-selection.patch +++ b/debian/patches-rt/0005-ARM-at91-Implement-clocksource-selection.patch @@ -1,8 +1,7 @@ -From 8cd066d01a3bc84384ba64a7521fdc80598a3418 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:22 +0200 -Subject: [PATCH 005/283] ARM: at91: Implement clocksource selection -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 005/290] ARM: at91: Implement clocksource selection +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1c682aaa18596b92057ac104266f0d0fefa05ef0 Allow selecting and unselecting the PIT clocksource driver so it doesn't have to be compile when unused. @@ -50,6 +49,3 @@ index 903f23c309df..fa493a86e2bb 100644 config HAVE_AT91_UTMI bool --- -2.20.1 - diff --git a/debian/patches-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch b/debian/patches-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch index 46012642f..c2bffc70c 100644 --- a/debian/patches-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch +++ b/debian/patches-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch @@ -1,8 +1,7 @@ -From db6f702c9d0558505d757c28c61f4f6a567a898a Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:23 +0200 -Subject: [PATCH 006/283] ARM: configs: at91: use new TCB timer driver -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 006/290] ARM: configs: at91: use new TCB timer driver +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=2100311e3ae9a532f88135fe46237e09ac77c7a3 Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to timer-atmel-tcb. @@ -38,6 +37,3 @@ index 2080025556b5..f2bbc6339ca6 100644 CONFIG_ATMEL_SSC=y CONFIG_EEPROM_AT24=y CONFIG_SCSI=y --- -2.20.1 - diff --git a/debian/patches-rt/0007-ARM-configs-at91-unselect-PIT.patch b/debian/patches-rt/0007-ARM-configs-at91-unselect-PIT.patch index 8315fc6b1..bcb5f9358 100644 --- a/debian/patches-rt/0007-ARM-configs-at91-unselect-PIT.patch +++ b/debian/patches-rt/0007-ARM-configs-at91-unselect-PIT.patch @@ -1,8 +1,7 @@ -From 95cda24e3882fa19a569c029275d14089e8418e9 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:24 +0200 -Subject: [PATCH 007/283] ARM: configs: at91: unselect PIT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 007/290] ARM: configs: at91: unselect PIT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=09237389b9cefeb3f5e277ad4e35c5ca3b62db42 The PIT is not required anymore to successfully boot and may actually harm in case preempt-rt is used because the PIT interrupt is shared. @@ -39,6 +38,3 @@ index f2bbc6339ca6..be92871ab155 100644 CONFIG_AEABI=y CONFIG_UACCESS_WITH_MEMCPY=y CONFIG_ZBOOT_ROM_TEXT=0x0 --- -2.20.1 - diff --git a/debian/patches-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch b/debian/patches-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch index 497781963..c96e5d407 100644 --- a/debian/patches-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch +++ b/debian/patches-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch @@ -1,9 +1,8 @@ -From 44f074c1b1621cbfa2d9f8f44aa69231154399d9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 27 Jul 2018 13:38:54 +0100 -Subject: [PATCH 008/283] irqchip/gic-v3-its: Move pending table allocation to +Subject: [PATCH 008/290] irqchip/gic-v3-its: Move pending table allocation to init time -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a0a08ef23f2e37ccaa18fb80939e9b43871e0667 Signed-off-by: Marc Zyngier Signed-off-by: Sebastian Andrzej Siewior @@ -13,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 53 insertions(+), 28 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index 65ab2c80529c..21681f0f85f4 100644 +index e7549a2b1482..b68650b55b9f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -179,6 +179,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); @@ -24,7 +23,7 @@ index 65ab2c80529c..21681f0f85f4 100644 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) -@@ -1631,7 +1632,7 @@ static void its_free_prop_table(struct page *prop_page) +@@ -1644,7 +1645,7 @@ static void its_free_prop_table(struct page *prop_page) get_order(LPI_PROPBASE_SZ)); } @@ -33,7 +32,7 @@ index 65ab2c80529c..21681f0f85f4 100644 { phys_addr_t paddr; -@@ -1979,30 +1980,47 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +@@ -1992,30 +1993,47 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base) return val; } @@ -96,7 +95,7 @@ index 65ab2c80529c..21681f0f85f4 100644 /* set PROPBASE */ val = (page_to_phys(gic_rdists->prop_page) | GICR_PROPBASER_InnerShareable | -@@ -2078,6 +2096,10 @@ static void its_cpu_init_lpis(void) +@@ -2091,6 +2109,10 @@ static void its_cpu_init_lpis(void) /* Make sure the GIC has seen the above */ dsb(sy); @@ -107,7 +106,7 @@ index 65ab2c80529c..21681f0f85f4 100644 } static void its_cpu_init_collection(struct its_node *its) -@@ -3558,16 +3580,6 @@ static int redist_disable_lpis(void) +@@ -3570,16 +3592,6 @@ static int redist_disable_lpis(void) u64 timeout = USEC_PER_SEC; u64 val; @@ -124,7 +123,7 @@ index 65ab2c80529c..21681f0f85f4 100644 if (!gic_rdists_supports_plpis()) { pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); return -ENXIO; -@@ -3577,7 +3589,18 @@ static int redist_disable_lpis(void) +@@ -3589,7 +3601,18 @@ static int redist_disable_lpis(void) if (!(val & GICR_CTLR_ENABLE_LPIS)) return 0; @@ -144,7 +143,7 @@ index 65ab2c80529c..21681f0f85f4 100644 smp_processor_id()); add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); -@@ -3833,7 +3856,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, +@@ -3845,7 +3868,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, } gic_rdists = rdists; @@ -166,6 +165,3 @@ index 3188c0bef3e7..5b57501fd2e7 100644 } __percpu *rdist; struct page *prop_page; u64 flags; --- -2.20.1 - diff --git a/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch b/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch index 309720b66..a87e44f56 100644 --- a/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch +++ b/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch @@ -1,8 +1,7 @@ -From cd9320a1954642117f572891a8b45b177e6b0ebf Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Fri, 28 Sep 2018 21:03:51 +0000 -Subject: [PATCH 009/283] kthread: convert worker lock to raw spinlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 009/290] kthread: convert worker lock to raw spinlock +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=781154b56e9d694b817b093e6c4a22cbb0ad20d8 In order to enable the queuing of kthread work items from hardirq context even when PREEMPT_RT_FULL is enabled, convert the worker @@ -198,6 +197,3 @@ index 087d18d771b5..5641b55783a6 100644 out: return ret; } --- -2.20.1 - diff --git a/debian/patches-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch b/debian/patches-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch index 74bc3bf12..f76f4eb67 100644 --- a/debian/patches-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch +++ b/debian/patches-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch @@ -1,11 +1,10 @@ -From d4cc8969937e548b95b4d6f40804a4b706c9b441 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Mon, 8 Oct 2018 14:09:37 +0300 -Subject: [PATCH 010/283] crypto: caam/qi - simplify CGR allocation, freeing +Subject: [PATCH 010/290] crypto: caam/qi - simplify CGR allocation, freeing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1d10c08ca4b5279cfa1f77d05dded9377191ed3f [Upstream commit 29e83c757006fd751966bdc53392bb22d74179c6] @@ -135,6 +134,3 @@ index 357b69f57072..b6c8acc30853 100644 /** * qi_cache_alloc - Allocate buffers from CAAM-QI cache --- -2.20.1 - diff --git a/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch index 04dfc765f..476e1a49c 100644 --- a/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch +++ b/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch @@ -1,8 +1,7 @@ -From 256f2e459fd2eea3e04b6b9934f06c46e19185bb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 7 Jan 2019 13:52:31 +0100 -Subject: [PATCH 011/283] sched/fair: Robustify CFS-bandwidth timer locking -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 011/290] sched/fair: Robustify CFS-bandwidth timer locking +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1a1e9d89852d01e24232b177091911f075b64f65 Traditionally hrtimer callbacks were run with IRQs disabled, but with the introduction of HRTIMER_MODE_SOFT it is possible they run from @@ -30,10 +29,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 4a433608ba74..289c966f907a 100644 +index 32d2dac680a7..b40d8c71e335 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4557,7 +4557,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, +@@ -4613,7 +4613,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; @@ -42,7 +41,7 @@ index 4a433608ba74..289c966f907a 100644 if (!cfs_rq_throttled(cfs_rq)) goto next; -@@ -4574,7 +4574,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, +@@ -4633,7 +4633,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, unthrottle_cfs_rq(cfs_rq); next: @@ -51,7 +50,7 @@ index 4a433608ba74..289c966f907a 100644 if (!remaining) break; -@@ -4590,7 +4590,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, +@@ -4649,7 +4649,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, * period the timer is deactivated until scheduling resumes; cfs_b->idle is * used to track this state. */ @@ -60,7 +59,7 @@ index 4a433608ba74..289c966f907a 100644 { u64 runtime, runtime_expires; int throttled; -@@ -4632,11 +4632,11 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) +@@ -4691,11 +4691,11 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; @@ -74,7 +73,7 @@ index 4a433608ba74..289c966f907a 100644 cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); -@@ -4745,17 +4745,18 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) +@@ -4804,17 +4804,18 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) { u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); @@ -96,7 +95,7 @@ index 4a433608ba74..289c966f907a 100644 return; } -@@ -4766,18 +4767,18 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) +@@ -4825,18 +4826,18 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) if (runtime) cfs_b->distribute_running = 1; @@ -118,7 +117,7 @@ index 4a433608ba74..289c966f907a 100644 } /* -@@ -4857,11 +4858,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +@@ -4916,11 +4917,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer); @@ -132,7 +131,7 @@ index 4a433608ba74..289c966f907a 100644 for (;;) { overrun = hrtimer_forward_now(timer, cfs_b->period); if (!overrun) -@@ -4889,11 +4891,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +@@ -4948,11 +4950,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) count = 0; } @@ -146,6 +145,3 @@ index 4a433608ba74..289c966f907a 100644 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; } --- -2.20.1 - diff --git a/debian/patches-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch b/debian/patches-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch index 012d5c20f..a977b48f8 100644 --- a/debian/patches-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch +++ b/debian/patches-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch @@ -1,8 +1,7 @@ -From 3c866fa6b40a3acfe50a091680cd9f51a54cd45b Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Mon, 19 Sep 2011 14:51:14 -0700 -Subject: [PATCH 012/283] arm: Convert arm boot_lock to raw -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 012/290] arm: Convert arm boot_lock to raw +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9ebc1e4c87e1febc77c492b70626744e2249c23f The arm boot_lock is used by the secondary processor startup code. The locking task is the idle thread, which has idle->sched_class == &idle_sched_class. @@ -427,6 +426,3 @@ index c2366510187a..6b60f582b738 100644 return pen_release != -1 ? -ENOSYS : 0; } --- -2.20.1 - diff --git a/debian/patches-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch b/debian/patches-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch index 8015a9079..95ac5c5f7 100644 --- a/debian/patches-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch +++ b/debian/patches-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch @@ -1,9 +1,8 @@ -From abbec8803a7e474a1e1a1b1ee105de8ffd0c8cbc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Jul 2018 18:25:31 +0200 -Subject: [PATCH 013/283] x86/ioapic: Don't let setaffinity unmask threaded EOI +Subject: [PATCH 013/290] x86/ioapic: Don't let setaffinity unmask threaded EOI interrupt too early -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=269b937b10ea6588bf79b4d789c40cc5214711cf There is an issue with threaded interrupts which are marked ONESHOT and using the fasteoi handler. @@ -35,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index ff0d14cd9e82..c2bd6e0433f8 100644 +index ab22eded61d2..91db2ec0c10a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1722,19 +1722,20 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data) @@ -107,6 +106,3 @@ index ff0d14cd9e82..c2bd6e0433f8 100644 } static void ioapic_ir_ack_level(struct irq_data *irq_data) --- -2.20.1 - diff --git a/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch index e80df87ee..7c0b6c9ac 100644 --- a/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch +++ b/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch @@ -1,8 +1,7 @@ -From 39150ca165ea6d7d6b5ffe76efb6170893ffdb06 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 3 Jul 2018 18:19:48 +0200 -Subject: [PATCH 014/283] cgroup: use irqsave in cgroup_rstat_flush_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 014/290] cgroup: use irqsave in cgroup_rstat_flush_locked() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a46bfc03c899ec820ba9e964b1bac8ee7ffc5f2f All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock either with spin_lock_irq() or spin_lock_irqsave(). @@ -45,6 +44,3 @@ index bb95a35e8c2d..3266a9781b4e 100644 /* if @may_sleep, play nice and yield if necessary */ if (may_sleep && (need_resched() || --- -2.20.1 - diff --git a/debian/patches-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch b/debian/patches-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch index 6b3ee79c5..9309c03ee 100644 --- a/debian/patches-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch +++ b/debian/patches-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch @@ -1,8 +1,7 @@ -From 6c83cc3183d8efc6378788160d78a3a917a5ae96 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Tue, 3 Jul 2018 13:34:30 -0500 -Subject: [PATCH 015/283] fscache: initialize cookie hash table raw spinlocks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 015/290] fscache: initialize cookie hash table raw spinlocks +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=534df1b6875bc0ff92dc2bd5c5fcbe82f606443f The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT @@ -59,6 +58,3 @@ index 84b90a79d75a..87a9330eafa2 100644 /** * fscache_register_netfs - Register a filesystem as desiring caching services --- -2.20.1 - diff --git a/debian/patches-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch b/debian/patches-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch index 4847088d4..952713f8f 100644 --- a/debian/patches-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch +++ b/debian/patches-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch @@ -1,11 +1,10 @@ -From fdfc7c94f7e160bd80c27ac31c6823fbb20330f7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 29 Aug 2018 21:59:04 +0200 -Subject: [PATCH 016/283] Drivers: hv: vmbus: include header for get_irq_regs() +Subject: [PATCH 016/290] Drivers: hv: vmbus: include header for get_irq_regs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=32d049f2969bf30456b5afbcf2c709e7667ee6f7 On !RT the header file get_irq_regs() gets pulled in via other header files. On RT it does not and the build fails: @@ -35,6 +34,3 @@ index 87d3d7da78f8..1d2d8a4b837d 100644 #include "hv_trace.h" --- -2.20.1 - diff --git a/debian/patches-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch b/debian/patches-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch index f560f06a9..db88d5a17 100644 --- a/debian/patches-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch +++ b/debian/patches-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch @@ -1,8 +1,7 @@ -From 31f7158d8389cec550de5964422b1123fc94079b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 11 Oct 2018 16:39:59 +0200 -Subject: [PATCH 017/283] percpu: include irqflags.h for raw_local_irq_save() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 017/290] percpu: include irqflags.h for raw_local_irq_save() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f917aafbaad3be951578ed66aa337f869b407357 The header percpu.h header file is using raw_local_irq_save() but does not include irqflags.h for its definition. It compiles because the @@ -28,6 +27,3 @@ index 1817a8415a5e..942d64c0476e 100644 #ifdef CONFIG_SMP --- -2.20.1 - diff --git a/debian/patches-rt/0018-efi-Allow-efi-runtime.patch b/debian/patches-rt/0018-efi-Allow-efi-runtime.patch index b44169d09..b15deaec9 100644 --- a/debian/patches-rt/0018-efi-Allow-efi-runtime.patch +++ b/debian/patches-rt/0018-efi-Allow-efi-runtime.patch @@ -1,8 +1,7 @@ -From fcb3ebea1da6aede14a10c28a06902043072f250 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 15:06:10 +0200 -Subject: [PATCH 018/283] efi: Allow efi=runtime -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 018/290] efi: Allow efi=runtime +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=0e3cdde741d28e9a96be78eff57024bfa1807a9d In case the option "efi=noruntime" is default at built-time, the user could overwrite its sate by `efi=runtime' and allow it again. @@ -14,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 3 insertions(+) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c -index 2a29dd9c986d..ab668e17fd05 100644 +index d54fca902e64..5db20908aa9c 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -113,6 +113,9 @@ static int __init parse_efi_cmdline(char *str) @@ -27,6 +26,3 @@ index 2a29dd9c986d..ab668e17fd05 100644 return 0; } early_param("efi", parse_efi_cmdline); --- -2.20.1 - diff --git a/debian/patches-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch b/debian/patches-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch index b9736f4f8..a669a40b7 100644 --- a/debian/patches-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch +++ b/debian/patches-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch @@ -1,8 +1,7 @@ -From 9dda6e746277e68f244d5660b5a3f3f85b0d9be0 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 24 Jul 2018 14:48:55 +0200 -Subject: [PATCH 019/283] x86/efi: drop task_lock() from efi_switch_mm() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 019/290] x86/efi: drop task_lock() from efi_switch_mm() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=50a52c8407c717fe21f31d5ae907a1fd3ec7fd32 efi_switch_mm() is a wrapper around switch_mm() which saves current's ->active_mm, sets the requests mm as ->active_mm and invokes @@ -50,6 +49,3 @@ index ee5d08f25ce4..e8da7f492970 100644 } #ifdef CONFIG_EFI_MIXED --- -2.20.1 - diff --git a/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch b/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch index e1bfa7a87..f678152cf 100644 --- a/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch +++ b/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch @@ -1,9 +1,8 @@ -From b66a9f85a9e8ee817d0e2de1637bf95b7710127f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 09:13:42 +0200 -Subject: [PATCH 020/283] arm64: KVM: compute_layout before altenates are +Subject: [PATCH 020/290] arm64: KVM: compute_layout before altenates are applied -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7cf32c5e928501c234cefb60c14a7d2516bfdd27 compute_layout() is invoked as part of an alternative fixup under stop_machine() and needs a sleeping lock as part of get_random_long(). @@ -78,6 +77,3 @@ index c712a7376bc1..792da0e125de 100644 /* * Compute HYP VA by using the same computation as kern_hyp_va() */ --- -2.20.1 - diff --git a/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch index f41ae7c07..e2e07377a 100644 --- a/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch +++ b/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch @@ -1,9 +1,8 @@ -From 6d0f5b28de481062ee69b0d62ae5ef2fc5101f9c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 31 Aug 2018 14:16:30 +0200 -Subject: [PATCH 021/283] of: allocate / free phandle cache outside of the +Subject: [PATCH 021/290] of: allocate / free phandle cache outside of the devtree_lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=62698ea1b5cfaf8b88d0df4b9b1acbf8ff591dac The phandle cache code allocates memory while holding devtree_lock which is a raw_spinlock_t. Memory allocation (and free()) is not possible on @@ -98,6 +97,3 @@ index 3f21ea6a90dc..2c7cf83b200c 100644 } void __init of_core_init(void) --- -2.20.1 - diff --git a/debian/patches-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch b/debian/patches-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch index 55168f205..870cb3f1f 100644 --- a/debian/patches-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch +++ b/debian/patches-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch @@ -1,8 +1,7 @@ -From 3ec52d2ee13b6e83429a4f7a048a0005305b8033 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Tue, 18 Sep 2018 10:29:31 -0500 -Subject: [PATCH 022/283] mm/kasan: make quarantine_lock a raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 022/290] mm/kasan: make quarantine_lock a raw_spinlock_t +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=dea0e8e741a34a81211d1fc0eb65e8d48ed39059 The static lock quarantine_lock is used in quarantine.c to protect the quarantine queue datastructures. It is taken inside quarantine queue @@ -93,6 +92,3 @@ index 3a8ddf8baf7d..b209dbaefde8 100644 qlist_free_all(&to_free, cache); --- -2.20.1 - diff --git a/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch b/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch index 908660a08..0d73a3288 100644 --- a/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch +++ b/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch @@ -1,9 +1,8 @@ -From 22ddccee8b48a817b261c98dda99967345475755 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Oct 2018 11:53:01 +0100 -Subject: [PATCH 023/283] EXP rcu: Revert expedited GP parallelization +Subject: [PATCH 023/290] EXP rcu: Revert expedited GP parallelization cleverness -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3a435c0a8c0607a971d75a41cf00b45699e690cc (Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu) @@ -46,6 +45,3 @@ index 0b2c2ad69629..a0486414edb4 100644 rnp->exp_need_flush = true; } --- -2.20.1 - diff --git a/debian/patches-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch b/debian/patches-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch index 8719fbb70..0f5423ab6 100644 --- a/debian/patches-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch +++ b/debian/patches-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch @@ -1,11 +1,10 @@ -From ced9290a5d8460c8a46615a475cd094bc3b0c344 Mon Sep 17 00:00:00 2001 From: He Zhe Date: Wed, 19 Dec 2018 16:30:57 +0100 -Subject: [PATCH 024/283] kmemleak: Turn kmemleak_lock to raw spinlock on RT +Subject: [PATCH 024/290] kmemleak: Turn kmemleak_lock to raw spinlock on RT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d823b15c5b9f5edcf6111f3b480674e4c2e42847 kmemleak_lock, as a rwlock on RT, can possibly be held in atomic context and causes the follow BUG. @@ -79,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c -index 72e3fb3bb037..0ed549045074 100644 +index 5eeabece0c17..92ce99b15f2b 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -26,7 +26,7 @@ @@ -164,6 +163,3 @@ index 72e3fb3bb037..0ed549045074 100644 } /* --- -2.20.1 - diff --git a/debian/patches-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/debian/patches-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch index 6fb235434..1696482b1 100644 --- a/debian/patches-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch +++ b/debian/patches-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch @@ -1,8 +1,7 @@ -From e1b321401ca437984b8973749826aea3a245e15b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 28 Oct 2016 23:05:11 +0200 -Subject: [PATCH 025/283] NFSv4: replace seqcount_t with a seqlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 025/290] NFSv4: replace seqcount_t with a seqlock_t +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=23180e2794932f9ce1ba1e0ef1ebb6a04c9ff676 The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me because it maps to preempt_disable() in -RT which I can't have at this @@ -27,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c -index 75fe92eaa681..e8d05393443f 100644 +index 825a8c52165a..c14f02b41f0d 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -152,11 +152,11 @@ static int nfs_delegation_claim_opens(struct inode *inode, @@ -36,7 +35,7 @@ index 75fe92eaa681..e8d05393443f 100644 mutex_lock(&sp->so_delegreturn_mutex); - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); + seq = read_seqbegin(&sp->so_reclaim_seqlock); - err = nfs4_open_delegation_recall(ctx, state, stateid, type); + err = nfs4_open_delegation_recall(ctx, state, stateid); if (!err) err = nfs_delegation_claim_locks(ctx, state, stateid); - if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) @@ -45,7 +44,7 @@ index 75fe92eaa681..e8d05393443f 100644 mutex_unlock(&sp->so_delegreturn_mutex); put_nfs_open_context(ctx); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h -index 63287d911c08..2ae55eaa4a1e 100644 +index 5b61520dce88..2771aafaca19 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -114,7 +114,7 @@ struct nfs4_state_owner { @@ -58,10 +57,10 @@ index 63287d911c08..2ae55eaa4a1e 100644 }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c -index 1de855e0ae61..78c3f4359e76 100644 +index 75faef7af22d..72abcccb8177 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c -@@ -2865,7 +2865,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, +@@ -2873,7 +2873,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, unsigned int seq; int ret; @@ -70,7 +69,7 @@ index 1de855e0ae61..78c3f4359e76 100644 ret = _nfs4_proc_open(opendata, ctx); if (ret != 0) -@@ -2906,7 +2906,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, +@@ -2914,7 +2914,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, if (d_inode(dentry) == state->inode) { nfs_inode_attach_open_context(ctx); @@ -80,7 +79,7 @@ index 1de855e0ae61..78c3f4359e76 100644 } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c -index 3ba2087469ac..f10952680bd9 100644 +index b3086e99420c..c9bf1eb7e1b2 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -515,7 +515,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, @@ -92,7 +91,7 @@ index 3ba2087469ac..f10952680bd9 100644 mutex_init(&sp->so_delegreturn_mutex); return sp; } -@@ -1568,8 +1568,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs +@@ -1583,8 +1583,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ @@ -106,7 +105,7 @@ index 3ba2087469ac..f10952680bd9 100644 restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) -@@ -1656,14 +1660,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs +@@ -1671,14 +1675,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs spin_lock(&sp->so_lock); goto restart; } @@ -131,6 +130,3 @@ index 3ba2087469ac..f10952680bd9 100644 return status; } --- -2.20.1 - diff --git a/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index 3d793eb82..27bde687b 100644 --- a/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -1,12 +1,11 @@ -From 02954bb06eedf19db3637fea6699d0dc1761b270 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 4 Apr 2017 12:50:16 +0200 -Subject: [PATCH 026/283] kernel: sched: Provide a pointer to the valid CPU +Subject: [PATCH 026/290] kernel: sched: Provide a pointer to the valid CPU mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6e2bd8c3352b171ecbd5aaae16f7abd30a82342c In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not @@ -210,10 +209,10 @@ index bedd5fba33b0..3f4259f11a35 100644 current->pid, current->comm, cpumask_pr_args(proc_mask)); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c -index 88e326d6cc49..b0d01ace6611 100644 +index 64ab92f8a4a2..57ec8bb829f2 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c -@@ -855,14 +855,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, +@@ -852,14 +852,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, { struct sdma_rht_node *rht_node; struct sdma_engine *sde = NULL; @@ -271,7 +270,7 @@ index 9eb99a43f849..e4d0cfebaac5 100644 static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 5dc024e28397..fdb8ba398ea8 100644 +index 20f5ba262cc0..68e673278301 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -660,7 +660,8 @@ struct task_struct { @@ -284,7 +283,7 @@ index 5dc024e28397..fdb8ba398ea8 100644 #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; -@@ -1390,7 +1391,7 @@ extern struct pid *cad_pid; +@@ -1398,7 +1399,7 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ @@ -321,7 +320,7 @@ index ff956ccbb6df..7bb129c5b412 100644 } diff --git a/kernel/fork.c b/kernel/fork.c -index 69874db3fba8..98c971cb1d36 100644 +index aef1430bdce0..173e010cba45 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -845,6 +845,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) @@ -334,7 +333,7 @@ index 69874db3fba8..98c971cb1d36 100644 /* * One for us, one for whoever does the "release_task()" (usually diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 6859ea1d5c04..d6f690064cce 100644 +index 78ecdfae25b6..39bb4b3eb1bf 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -878,7 +878,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) @@ -372,7 +371,7 @@ index 6859ea1d5c04..d6f690064cce 100644 + if (cpumask_equal(p->cpus_ptr, new_mask)) goto out; - if (!cpumask_intersects(new_mask, cpu_valid_mask)) { + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); @@ -1237,10 +1237,10 @@ static int migrate_swap_stop(void *data) if (task_cpu(arg->src_task) != arg->src_cpu) goto unlock; @@ -457,7 +456,7 @@ index 6859ea1d5c04..d6f690064cce 100644 * - any previously selected CPU might disappear through hotplug * * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, -@@ -4275,7 +4275,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -4316,7 +4316,7 @@ static int __sched_setscheduler(struct task_struct *p, * the entire root_domain to become SCHED_DEADLINE. We * will also fail if there's no bandwidth available. */ @@ -466,7 +465,7 @@ index 6859ea1d5c04..d6f690064cce 100644 rq->rd->dl_bw.bw == 0) { task_rq_unlock(rq, p, &rf); return -EPERM; -@@ -4874,7 +4874,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) +@@ -4915,7 +4915,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) goto out_unlock; raw_spin_lock_irqsave(&p->pi_lock, flags); @@ -475,7 +474,7 @@ index 6859ea1d5c04..d6f690064cce 100644 raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: -@@ -5454,7 +5454,7 @@ int task_can_attach(struct task_struct *p, +@@ -5495,7 +5495,7 @@ int task_can_attach(struct task_struct *p, * allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for * success of set_cpus_allowed_ptr() on all attached tasks @@ -484,7 +483,7 @@ index 6859ea1d5c04..d6f690064cce 100644 */ if (p->flags & PF_NO_SETAFFINITY) { ret = -EINVAL; -@@ -5481,7 +5481,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) +@@ -5522,7 +5522,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) if (curr_cpu == target_cpu) return 0; @@ -493,7 +492,7 @@ index 6859ea1d5c04..d6f690064cce 100644 return -EINVAL; /* TODO: This is not properly updating schedstats */ -@@ -5619,7 +5619,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) +@@ -5660,7 +5660,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) put_prev_task(rq, next); /* @@ -542,10 +541,10 @@ index daaadf939ccb..f7d2c10b4c92 100644 /* * We have to ensure that we have at least one bit diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index 72c07059ef37..fb6e64417470 100644 +index ebec37cb3be9..4b13df38c069 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p +@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p * If we cannot preempt any rq, fall back to pick any * online CPU: */ @@ -554,7 +553,7 @@ index 72c07059ef37..fb6e64417470 100644 if (cpu >= nr_cpu_ids) { /* * Failed to find any suitable CPU. -@@ -1823,7 +1823,7 @@ static void set_curr_task_dl(struct rq *rq) +@@ -1856,7 +1856,7 @@ static void set_curr_task_dl(struct rq *rq) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -563,7 +562,7 @@ index 72c07059ef37..fb6e64417470 100644 return 1; return 0; } -@@ -1973,7 +1973,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) +@@ -2006,7 +2006,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || @@ -573,10 +572,10 @@ index 72c07059ef37..fb6e64417470 100644 !dl_task(task) || !task_on_rq_queued(task))) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 289c966f907a..0048a32a3b4d 100644 +index b40d8c71e335..da5d60d25c27 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -1630,7 +1630,7 @@ static void task_numa_compare(struct task_numa_env *env, +@@ -1662,7 +1662,7 @@ static void task_numa_compare(struct task_numa_env *env, * be incurred if the tasks were swapped. */ /* Skip this swap candidate if cannot move to the source cpu */ @@ -585,7 +584,7 @@ index 289c966f907a..0048a32a3b4d 100644 goto unlock; /* -@@ -1727,7 +1727,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, +@@ -1760,7 +1760,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ @@ -594,7 +593,7 @@ index 289c966f907a..0048a32a3b4d 100644 continue; env->dst_cpu = cpu; -@@ -5741,7 +5741,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, +@@ -5800,7 +5800,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -603,7 +602,7 @@ index 289c966f907a..0048a32a3b4d 100644 continue; local_group = cpumask_test_cpu(this_cpu, -@@ -5873,7 +5873,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this +@@ -5932,7 +5932,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -612,7 +611,7 @@ index 289c966f907a..0048a32a3b4d 100644 if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -5913,7 +5913,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p +@@ -5972,7 +5972,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p { int new_cpu = cpu; @@ -621,7 +620,7 @@ index 289c966f907a..0048a32a3b4d 100644 return prev_cpu; /* -@@ -6030,7 +6030,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int +@@ -6089,7 +6089,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int if (!test_idle_cores(target, false)) return -1; @@ -630,7 +629,7 @@ index 289c966f907a..0048a32a3b4d 100644 for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6064,7 +6064,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t +@@ -6123,7 +6123,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -639,7 +638,7 @@ index 289c966f907a..0048a32a3b4d 100644 continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6127,7 +6127,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t +@@ -6186,7 +6186,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; @@ -648,7 +647,7 @@ index 289c966f907a..0048a32a3b4d 100644 continue; if (available_idle_cpu(cpu)) break; -@@ -6164,7 +6164,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) +@@ -6223,7 +6223,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -657,7 +656,7 @@ index 289c966f907a..0048a32a3b4d 100644 /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6382,7 +6382,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f +@@ -6441,7 +6441,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) @@ -666,7 +665,7 @@ index 289c966f907a..0048a32a3b4d 100644 } rcu_read_lock(); -@@ -7121,14 +7121,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -7180,14 +7180,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -683,7 +682,7 @@ index 289c966f907a..0048a32a3b4d 100644 int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7148,7 +7148,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -7207,7 +7207,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -692,7 +691,7 @@ index 289c966f907a..0048a32a3b4d 100644 env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7745,7 +7745,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) +@@ -7804,7 +7804,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -701,7 +700,7 @@ index 289c966f907a..0048a32a3b4d 100644 * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8360,7 +8360,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) +@@ -8419,7 +8419,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -710,7 +709,7 @@ index 289c966f907a..0048a32a3b4d 100644 */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -8756,7 +8756,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, +@@ -8815,7 +8815,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * if the curr task on busiest CPU can't be * moved to this_cpu: */ @@ -742,10 +741,10 @@ index b980cc96604f..b6ca4a630050 100644 !rt_task(task) || !task_on_rq_queued(task))) { diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c -index 1e6db9cbe4dc..fa95139445b2 100644 +index 8030e24dbf14..862f4b0139fc 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c -@@ -277,7 +277,7 @@ static void move_to_next_cpu(void) +@@ -279,7 +279,7 @@ static void move_to_next_cpu(void) * of this thread, than stop migrating for the duration * of the current test. */ @@ -780,6 +779,3 @@ index 5522692100ba..8b4be8e1802a 100644 trace_foo_with_template_simple("HELLO", cnt); --- -2.20.1 - diff --git a/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch b/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch index 1f6634ce1..083cdfbf5 100644 --- a/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch +++ b/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch @@ -1,8 +1,7 @@ -From bda814671dc9f9f74cabd99a65cad3101b68ee83 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 -Subject: [PATCH 027/283] kernel/sched/core: add migrate_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 027/290] kernel/sched/core: add migrate_disable() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=70a9ac89b97eb9275f3eb6d7adbba77ddd059fee --- include/linux/preempt.h | 23 +++++++ @@ -54,7 +53,7 @@ index c01813c3fbe9..3196d0e76719 100644 #ifdef MODULE diff --git a/include/linux/sched.h b/include/linux/sched.h -index fdb8ba398ea8..df39ad5916e7 100644 +index 68e673278301..038d0faaa1d5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -662,6 +662,13 @@ struct task_struct { @@ -86,7 +85,7 @@ index 9fb239e12b82..5801e516ba63 100644 * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index d6f690064cce..b658f0147c3b 100644 +index 39bb4b3eb1bf..1a1ffb873d22 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1008,7 +1008,15 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma @@ -127,7 +126,7 @@ index d6f690064cce..b658f0147c3b 100644 /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on -@@ -1095,9 +1117,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -1096,9 +1118,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, } /* Can the task run on the task's current CPU? If so, we're done */ @@ -142,10 +141,10 @@ index d6f690064cce..b658f0147c3b 100644 + } +#endif + - dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; -@@ -7067,3 +7096,100 @@ const u32 sched_prio_to_wmult[40] = { + /* Need help from migration thread: drop lock and wait. */ +@@ -7104,3 +7133,100 @@ const u32 sched_prio_to_wmult[40] = { }; #undef CREATE_TRACE_POINTS @@ -261,6 +260,3 @@ index 78fadf0438ea..5027158d3908 100644 #undef PN_SCHEDSTAT #undef PN #undef __PN --- -2.20.1 - diff --git a/debian/patches-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/debian/patches-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch index c22787b20..ab135c199 100644 --- a/debian/patches-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch +++ b/debian/patches-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch @@ -1,9 +1,8 @@ -From 6fb56185df42e49e0e2d8fe12d315356a57f4bce Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 9 Oct 2018 17:34:50 +0200 -Subject: [PATCH 028/283] sched/migrate_disable: Add export_symbol_gpl for +Subject: [PATCH 028/290] sched/migrate_disable: Add export_symbol_gpl for __migrate_disabled -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ef09fdabc9d1ade69d87d4d674857172b6d172ee Jonathan reported that lttng/modules can't use __migrate_disabled(). This function is only used by sched/core itself and the tracing @@ -22,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index b658f0147c3b..7a39d56f6a6b 100644 +index 1a1ffb873d22..a33c2c18628d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1013,6 +1013,7 @@ int __migrate_disabled(struct task_struct *p) @@ -33,6 +32,3 @@ index b658f0147c3b..7a39d56f6a6b 100644 #endif static void __do_set_cpus_allowed_tail(struct task_struct *p, --- -2.20.1 - diff --git a/debian/patches-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch b/debian/patches-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch index 207df7e47..2874be8e8 100644 --- a/debian/patches-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch +++ b/debian/patches-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch @@ -1,8 +1,7 @@ -From 35e0294d43f7c53bdb7ecba19df0710037d888ec Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 9 Mar 2016 10:51:06 +0100 -Subject: [PATCH 029/283] arm: at91: do not disable/enable clocks in a row -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 029/290] arm: at91: do not disable/enable clocks in a row +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=619e4305a3a3a2296ec9a1fc32f84fb09a12f555 Currently the driver will disable the clock and enable it one line later if it is switching from periodic mode into one shot. @@ -93,6 +92,3 @@ index 43f4d5c4d6fa..de6baf564dfe 100644 .set_state_periodic = tc_set_periodic, .set_state_oneshot = tc_set_oneshot, }, --- -2.20.1 - diff --git a/debian/patches-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch b/debian/patches-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch index da41f6fa5..43db58efc 100644 --- a/debian/patches-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch +++ b/debian/patches-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch @@ -1,12 +1,11 @@ -From 4d4322de76f7b52df1529acfc6e5fc46e25761f1 Mon Sep 17 00:00:00 2001 From: Benedikt Spranger Date: Mon, 8 Mar 2010 18:57:04 +0100 -Subject: [PATCH 030/283] clocksource: TCLIB: Allow higher clock rates for +Subject: [PATCH 030/290] clocksource: TCLIB: Allow higher clock rates for clock events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8abf3e475b2921798e61e686140e077146360f1a As default the TCLIB uses the 32KiHz base clock rate for clock events. Add a compile time selection to allow higher clock resulution. @@ -165,6 +164,3 @@ index 3726eacdf65d..0900dec7ec04 100644 config DUMMY_IRQ tristate "Dummy IRQ handler" default n --- -2.20.1 - diff --git a/debian/patches-rt/0031-timekeeping-Split-jiffies-seqlock.patch b/debian/patches-rt/0031-timekeeping-Split-jiffies-seqlock.patch index 639c5b019..31c49018f 100644 --- a/debian/patches-rt/0031-timekeeping-Split-jiffies-seqlock.patch +++ b/debian/patches-rt/0031-timekeeping-Split-jiffies-seqlock.patch @@ -1,8 +1,7 @@ -From 64f770d93319861d308ce265a4389e4ca0a6ed5f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 14 Feb 2013 22:36:59 +0100 -Subject: [PATCH 031/283] timekeeping: Split jiffies seqlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 031/290] timekeeping: Split jiffies seqlock +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=37dae8435d79cbcc247b536cb16f55db30295418 Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so it can be taken in atomic context on RT. @@ -136,7 +135,7 @@ index 5b33e2f5c0ed..54fd344ef973 100644 ts->timer_expires_base = basemono; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c -index 443edcddac8a..0517bc42c6b6 100644 +index c2708e1f0c69..13477f8ee80e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2418,8 +2418,10 @@ EXPORT_SYMBOL(hardpps); @@ -166,6 +165,3 @@ index 141ab3ab0354..099737f6f10c 100644 #define CS_NAME_LEN 32 --- -2.20.1 - diff --git a/debian/patches-rt/0032-signal-Revert-ptrace-preempt-magic.patch b/debian/patches-rt/0032-signal-Revert-ptrace-preempt-magic.patch index 3bd2227dc..23b406f55 100644 --- a/debian/patches-rt/0032-signal-Revert-ptrace-preempt-magic.patch +++ b/debian/patches-rt/0032-signal-Revert-ptrace-preempt-magic.patch @@ -1,8 +1,7 @@ -From 8cf90f7e58f51438a7ec0e4e704918afaa450ff1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 21 Sep 2011 19:57:12 +0200 -Subject: [PATCH 032/283] signal: Revert ptrace preempt magic -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 032/290] signal: Revert ptrace preempt magic +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=34b289f78753746be46c707628324cd79730dac2 Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more than a bandaid around the ptrace design trainwreck. It's not a @@ -33,6 +32,3 @@ index 0e6bc3049427..d5a9646b3538 100644 freezable_schedule(); } else { /* --- -2.20.1 - diff --git a/debian/patches-rt/0033-net-sched-Use-msleep-instead-of-yield.patch b/debian/patches-rt/0033-net-sched-Use-msleep-instead-of-yield.patch index 331af4531..eef7da4f8 100644 --- a/debian/patches-rt/0033-net-sched-Use-msleep-instead-of-yield.patch +++ b/debian/patches-rt/0033-net-sched-Use-msleep-instead-of-yield.patch @@ -1,8 +1,7 @@ -From 882c4f88db8d6179773dc733e794fa504aef75e3 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 5 Mar 2014 00:49:47 +0100 -Subject: [PATCH 033/283] net: sched: Use msleep() instead of yield() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 033/290] net: sched: Use msleep() instead of yield() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e83295f1ca6355381c6d506047d641e9e607c96f On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50 (by default). If a high priority userspace process tries to shut down a busy @@ -47,10 +46,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c -index 77b289da7763..31b9c2b415b4 100644 +index 30e32df5f84a..0b9c494f64b0 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c -@@ -1183,7 +1183,7 @@ void dev_deactivate_many(struct list_head *head) +@@ -1192,7 +1192,7 @@ void dev_deactivate_many(struct list_head *head) /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) { while (some_qdisc_is_busy(dev)) @@ -59,6 +58,3 @@ index 77b289da7763..31b9c2b415b4 100644 /* The new qdisc is assigned at this point so we can safely * unwind stale skb lists and qdisc statistics */ --- -2.20.1 - diff --git a/debian/patches-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch b/debian/patches-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch index baef728eb..4ab98bd03 100644 --- a/debian/patches-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch +++ b/debian/patches-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch @@ -1,8 +1,7 @@ -From ea6d238547b58b5fe9ce953cd818ef8bf6cb8915 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 27 Mar 2018 16:24:15 +0200 -Subject: [PATCH 034/283] dm rq: remove BUG_ON(!irqs_disabled) check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 034/290] dm rq: remove BUG_ON(!irqs_disabled) check +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b4b547ae7869e0ca0380a53cde929e51ecbdc173 In commit 052189a2ec95 ("dm: remove superfluous irq disablement in dm_request_fn") the spin_lock_irq() was replaced with spin_lock() + a @@ -21,10 +20,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 deletion(-) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c -index 6e547b8dd298..29736c7e5f1f 100644 +index 4d36373e1c0f..12ed08245130 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c -@@ -688,7 +688,6 @@ static void dm_old_request_fn(struct request_queue *q) +@@ -692,7 +692,6 @@ static void dm_old_request_fn(struct request_queue *q) /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; kthread_queue_work(&md->kworker, &tio->work); @@ -32,6 +31,3 @@ index 6e547b8dd298..29736c7e5f1f 100644 } } --- -2.20.1 - diff --git a/debian/patches-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch b/debian/patches-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch index 132ff1c3f..7b52bb9a1 100644 --- a/debian/patches-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch +++ b/debian/patches-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch @@ -1,8 +1,7 @@ -From b91ae27f3efa2e15087397591db35dd1d11f5120 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 8 Nov 2013 17:34:54 +0100 -Subject: [PATCH 035/283] usb: do no disable interrupts in giveback -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 035/290] usb: do no disable interrupts in giveback +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=086a63b5d6cc385539d96fb77b65eeb281dbd552 Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet context") the USB code disables interrupts before invoking the complete @@ -41,6 +40,3 @@ index b82a7d787add..2f3015356124 100644 usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); --- -2.20.1 - diff --git a/debian/patches-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch b/debian/patches-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch index 83ae9c0ba..1ee99a282 100644 --- a/debian/patches-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch +++ b/debian/patches-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch @@ -1,8 +1,7 @@ -From 79f8ad95ffc5f1cd2ac721ab3d01291d4ad055df Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 17 Jun 2011 12:39:57 +0200 -Subject: [PATCH 036/283] rt: Provide PREEMPT_RT_BASE config switch -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 036/290] rt: Provide PREEMPT_RT_BASE config switch +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3b6f0b1d09976635e464aa63be521f1f6f63e35d Introduce PREEMPT_RT_BASE which enables parts of PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT @@ -59,6 +58,3 @@ index cd1655122ec0..027db5976c2f 100644 - bool \ No newline at end of file + bool --- -2.20.1 - diff --git a/debian/patches-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch b/debian/patches-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch index 8c991d239..01a7fb721 100644 --- a/debian/patches-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch +++ b/debian/patches-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch @@ -1,8 +1,7 @@ -From 75df679c581581978487f6f6de91bf86a9f72e7c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 14 Dec 2011 01:03:49 +0100 -Subject: [PATCH 037/283] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 037/290] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=2efd4a4df8085c42c4a8bb8d74f1a6a566adc67a There are "valid" GFP_ATOMIC allocations such as @@ -71,6 +70,3 @@ index a3928d4438b5..a50b2158f7cd 100644 help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids --- -2.20.1 - diff --git a/debian/patches-rt/0038-jump-label-disable-if-stop_machine-is-used.patch b/debian/patches-rt/0038-jump-label-disable-if-stop_machine-is-used.patch index b5a3fd2bf..0cbf71c9b 100644 --- a/debian/patches-rt/0038-jump-label-disable-if-stop_machine-is-used.patch +++ b/debian/patches-rt/0038-jump-label-disable-if-stop_machine-is-used.patch @@ -1,8 +1,7 @@ -From b5d77d6b4b4bcead77cd720e8a93f4ae78420034 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 8 Jul 2015 17:14:48 +0200 -Subject: [PATCH 038/283] jump-label: disable if stop_machine() is used -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 038/290] jump-label: disable if stop_machine() is used +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=77b2c3f7bbf3d862529e075933f0e7a6f8613c53 Some architectures are using stop_machine() while switching the opcode which leads to latency spikes. @@ -25,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 51794c7fa6d5..7d11242a37d2 100644 +index 185e552f1461..84f36e47e3ab 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -51,7 +51,7 @@ config ARM @@ -37,6 +36,3 @@ index 51794c7fa6d5..7d11242a37d2 100644 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) --- -2.20.1 - diff --git a/debian/patches-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch b/debian/patches-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch index 8822287db..038c10af9 100644 --- a/debian/patches-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch +++ b/debian/patches-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch @@ -1,9 +1,8 @@ -From 94fd428643474b867a8cac432d7d911a5250c367 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 24 Jul 2011 12:11:43 +0200 -Subject: [PATCH 039/283] kconfig: Disable config options which are not RT +Subject: [PATCH 039/290] kconfig: Disable config options which are not RT compatible -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=164a36eb11cebfda8c6eb1f79beb2a31e4dd9ce2 Disable stuff which is known to have issues on RT @@ -38,6 +37,3 @@ index b457e94ae618..0dddbb2a3282 100644 select COMPACTION select RADIX_TREE_MULTIORDER help --- -2.20.1 - diff --git a/debian/patches-rt/0040-lockdep-disable-self-test.patch b/debian/patches-rt/0040-lockdep-disable-self-test.patch index 1b065a53b..0fb961240 100644 --- a/debian/patches-rt/0040-lockdep-disable-self-test.patch +++ b/debian/patches-rt/0040-lockdep-disable-self-test.patch @@ -1,11 +1,10 @@ -From 1a9e9b418236c18717a91955eeafe5bd72a00598 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 17 Oct 2017 16:36:18 +0200 -Subject: [PATCH 040/283] lockdep: disable self-test +Subject: [PATCH 040/290] lockdep: disable self-test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=bff275d06e885f7b51dbac8b2e93867616852378 The self-test wasn't always 100% accurate for RT. We disabled a few tests which failed because they had a different semantic for RT. Some @@ -18,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index 3dea52f7be9c..1504e6aa8418 100644 +index 46a910acce3f..38cf7f81daa7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1207,7 +1207,7 @@ config DEBUG_ATOMIC_SLEEP @@ -30,6 +29,3 @@ index 3dea52f7be9c..1504e6aa8418 100644 help Say Y here if you want the kernel to run a short self-test during bootup. The self-test checks whether common types of locking bugs --- -2.20.1 - diff --git a/debian/patches-rt/0041-mm-Allow-only-slub-on-RT.patch b/debian/patches-rt/0041-mm-Allow-only-slub-on-RT.patch index cd5af84c0..9de47c9c3 100644 --- a/debian/patches-rt/0041-mm-Allow-only-slub-on-RT.patch +++ b/debian/patches-rt/0041-mm-Allow-only-slub-on-RT.patch @@ -1,8 +1,7 @@ -From 75102ff5e253e5ababc30c7512e0c07f2b7dc297 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:44:03 -0500 -Subject: [PATCH 041/283] mm: Allow only slub on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 041/290] mm: Allow only slub on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f04e50899757b5f50979fa506c88bd78d17c49b6 Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs. @@ -32,6 +31,3 @@ index 47035b5a46f6..ae9a0113a699 100644 help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but --- -2.20.1 - diff --git a/debian/patches-rt/0042-locking-Disable-spin-on-owner-for-RT.patch b/debian/patches-rt/0042-locking-Disable-spin-on-owner-for-RT.patch index c43feeebd..64ed2e16d 100644 --- a/debian/patches-rt/0042-locking-Disable-spin-on-owner-for-RT.patch +++ b/debian/patches-rt/0042-locking-Disable-spin-on-owner-for-RT.patch @@ -1,11 +1,10 @@ -From 32697a0be9afdc5c631cc3d232a298b5880ed65c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:51:45 +0200 -Subject: [PATCH 042/283] locking: Disable spin on owner for RT +Subject: [PATCH 042/290] locking: Disable spin on owner for RT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=fb7c18e5a29066807df152c5b7d6cd2cf66e0301 Drop spin on owner for mutex / rwsem. We are most likely not using it but… @@ -33,6 +32,3 @@ index 84d882f3e299..af27c4000812 100644 config LOCK_SPIN_ON_OWNER def_bool y --- -2.20.1 - diff --git a/debian/patches-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch b/debian/patches-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch index 99eda5975..48c7ed913 100644 --- a/debian/patches-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch +++ b/debian/patches-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch @@ -1,8 +1,7 @@ -From 708879e986c1b552ee69d6444b808a196bba0f5f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 13:26:09 +0000 -Subject: [PATCH 043/283] rcu: Disable RCU_FAST_NO_HZ on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 043/290] rcu: Disable RCU_FAST_NO_HZ on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c69674f542d0ccc839ba5282f2b24945882abb93 This uses a timer_list timer from the irq disabled guts of the idle code. Disable it for now to prevent wreckage. @@ -25,6 +24,3 @@ index 9210379c0353..644264be90f0 100644 default n help This option permits CPUs to enter dynticks-idle state even if --- -2.20.1 - diff --git a/debian/patches-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch index e87db8676..fc0c19c18 100644 --- a/debian/patches-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch +++ b/debian/patches-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch @@ -1,8 +1,7 @@ -From a14822b6d5fcc441064faf3edc2f91b5d461e703 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Mar 2014 20:19:05 +0100 -Subject: [PATCH 044/283] rcu: make RCU_BOOST default on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 044/290] rcu: make RCU_BOOST default on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ce85f19c79e9234d764888e74dc2e4e49b3a3da6 Since it is no longer invoked from the softirq people run into OOM more often if the priority of the RCU thread is too low. Making boosting @@ -29,6 +28,3 @@ index 644264be90f0..a243a78ff38c 100644 help This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. --- -2.20.1 - diff --git a/debian/patches-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch b/debian/patches-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch index eaad52703..a4d36ced3 100644 --- a/debian/patches-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch +++ b/debian/patches-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch @@ -1,8 +1,7 @@ -From 36c33c65b461082612dffa7be01862b7bd55270e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:03:52 +0200 -Subject: [PATCH 045/283] sched: Disable CONFIG_RT_GROUP_SCHED on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 045/290] sched: Disable CONFIG_RT_GROUP_SCHED on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=08989e0969de7b6c4727eb2a32f7c1aaa194708f Carsten reported problems when running: @@ -30,6 +29,3 @@ index ae9a0113a699..61e8b531649b 100644 default n help This feature lets you explicitly allocate real CPU bandwidth --- -2.20.1 - diff --git a/debian/patches-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch b/debian/patches-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch index 8f7d350fb..58c07b048 100644 --- a/debian/patches-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch +++ b/debian/patches-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch @@ -1,11 +1,10 @@ -From f2b7e396c43d3607ee0a0090c7470da50f833e93 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 -Subject: [PATCH 046/283] net/core: disable NET_RX_BUSY_POLL +Subject: [PATCH 046/290] net/core: disable NET_RX_BUSY_POLL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=86b04e83f0deb1dce266ca11efb35281d2d76566 sk_busy_loop() does preempt_disable() followed by a few operations which can take sleeping locks and may get long. @@ -33,6 +32,3 @@ index 228dfa382eec..bc8d01996f22 100644 config BQL bool --- -2.20.1 - diff --git a/debian/patches-rt/0047-arm-disable-NEON-in-kernel-mode.patch b/debian/patches-rt/0047-arm-disable-NEON-in-kernel-mode.patch index 208ca8945..b7e2543cc 100644 --- a/debian/patches-rt/0047-arm-disable-NEON-in-kernel-mode.patch +++ b/debian/patches-rt/0047-arm-disable-NEON-in-kernel-mode.patch @@ -1,8 +1,7 @@ -From 477660c22f2036e69299438b1292307ee1dba46b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 1 Dec 2017 10:42:03 +0100 -Subject: [PATCH 047/283] arm*: disable NEON in kernel mode -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 047/290] arm*: disable NEON in kernel mode +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a0a5f99c6726ff40c5f1173b951484e2b41d527a NEON in kernel mode is used by the crypto algorithms and raid6 code. While the raid6 code looks okay, the crypto algorithms do not: NEON @@ -21,10 +20,10 @@ Signed-off-by: Sebastian Andrzej Siewior 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 7d11242a37d2..e122dd212ab3 100644 +index 84f36e47e3ab..c1cc28f0841f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -2162,7 +2162,7 @@ config NEON +@@ -2163,7 +2163,7 @@ config NEON config KERNEL_MODE_NEON bool "Support for NEON in kernel mode" @@ -161,6 +160,3 @@ index 34b4e3d46aab..ae055cdad8cf 100644 crc32_pmull_algs[0].update = crc32_pmull_update; crc32_pmull_algs[1].update = crc32c_pmull_update; --- -2.20.1 - diff --git a/debian/patches-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch b/debian/patches-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch index 50d900ee3..52e985ead 100644 --- a/debian/patches-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch +++ b/debian/patches-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch @@ -1,8 +1,7 @@ -From 297ef639cbc4bc3aac2e5a8835090136753796fc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 048/283] powerpc: Use generic rwsem on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 048/290] powerpc: Use generic rwsem on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=58d37daded0023f403aec29501462e8499eeb016 Use generic code which uses rtmutex @@ -28,6 +27,3 @@ index a80669209155..9952764db9c5 100644 config GENERIC_LOCKBREAK bool --- -2.20.1 - diff --git a/debian/patches-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch index 470c2ac02..f41fe98fa 100644 --- a/debian/patches-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch +++ b/debian/patches-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch @@ -1,9 +1,8 @@ -From 3bead4e3fc7560659c1982ace99de374aa9df79c Mon Sep 17 00:00:00 2001 From: Bogdan Purcareata Date: Fri, 24 Apr 2015 15:53:13 +0000 -Subject: [PATCH 049/283] powerpc/kvm: Disable in-kernel MPIC emulation for +Subject: [PATCH 049/290] powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=98667cc1926223ba114f4e52611633d0d8f0af87 While converting the openpic emulation code to use a raw_spinlock_t enables guests to run on RT, there's still a performance issue. For interrupts sent in @@ -40,6 +39,3 @@ index 68a0e9d5b440..6f4d5d7615af 100644 select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING --- -2.20.1 - diff --git a/debian/patches-rt/0050-powerpc-Disable-highmem-on-RT.patch b/debian/patches-rt/0050-powerpc-Disable-highmem-on-RT.patch index 14bb749cc..fefd9bcdc 100644 --- a/debian/patches-rt/0050-powerpc-Disable-highmem-on-RT.patch +++ b/debian/patches-rt/0050-powerpc-Disable-highmem-on-RT.patch @@ -1,8 +1,7 @@ -From ae9000e3c66794249fbca61b8a71bcdf690910e0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:08:34 +0200 -Subject: [PATCH 050/283] powerpc: Disable highmem on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 050/290] powerpc: Disable highmem on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=648dcdb872f08b4c6a613140a10ee3eb6df95ed6 The current highmem handling on -RT is not compatible and needs fixups. @@ -24,6 +23,3 @@ index 9952764db9c5..1563820a37e8 100644 source kernel/Kconfig.hz --- -2.20.1 - diff --git a/debian/patches-rt/0051-mips-Disable-highmem-on-RT.patch b/debian/patches-rt/0051-mips-Disable-highmem-on-RT.patch index 7887c11ef..f82c08409 100644 --- a/debian/patches-rt/0051-mips-Disable-highmem-on-RT.patch +++ b/debian/patches-rt/0051-mips-Disable-highmem-on-RT.patch @@ -1,8 +1,7 @@ -From 454e636edd0bb26495afb3850a37aa5e5214a4ed Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:10:12 +0200 -Subject: [PATCH 051/283] mips: Disable highmem on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 051/290] mips: Disable highmem on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=10a1793ef40f4036a0b0c4c51e9fcdc1899e64ba The current highmem handling on -RT is not compatible and needs fixups. @@ -24,6 +23,3 @@ index 201caf226b47..bd268302efa4 100644 config CPU_SUPPORTS_HIGHMEM bool --- -2.20.1 - diff --git a/debian/patches-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch b/debian/patches-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch index e4e486d99..be59e8e3c 100644 --- a/debian/patches-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch +++ b/debian/patches-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch @@ -1,8 +1,7 @@ -From 5c86aec91ae10f140d18bd33cd62783cdde0922d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 26 Jul 2009 02:21:32 +0200 -Subject: [PATCH 052/283] x86: Use generic rwsem_spinlocks on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 052/290] x86: Use generic rwsem_spinlocks on -rt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=4b707497d459b106def4745d5274422c62423fa7 Simplifies the separation of anon_rw_semaphores and rw_semaphores for -rt. @@ -29,6 +28,3 @@ index 04a45d6d0167..1b05ae86bdde 100644 config GENERIC_CALIBRATE_DELAY def_bool y --- -2.20.1 - diff --git a/debian/patches-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch index 778693165..761c89c65 100644 --- a/debian/patches-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch +++ b/debian/patches-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch @@ -1,8 +1,7 @@ -From 9cd1a715d85ace3e9b1d3ae703eb16744dd3ebb6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 23 Jan 2014 14:45:59 +0100 -Subject: [PATCH 053/283] leds: trigger: disable CPU trigger on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 053/290] leds: trigger: disable CPU trigger on -RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=07924b23a92cab26f6271e8fb312ab8062dd29a6 as it triggers: |CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141 @@ -36,6 +35,3 @@ index 4018af769969..b4ce8c115949 100644 help This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which --- -2.20.1 - diff --git a/debian/patches-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch index f05d9e16b..fa93a69be 100644 --- a/debian/patches-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch +++ b/debian/patches-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch @@ -1,8 +1,7 @@ -From f19ffb87fe48ba1e8904df670b13d52f8b9c08f1 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 9 Apr 2015 15:23:01 +0200 -Subject: [PATCH 054/283] cpufreq: drop K8's driver from beeing selected -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 054/290] cpufreq: drop K8's driver from beeing selected +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c05ad13cbd22842369324d73f538911d319b97b3 Ralf posted a picture of a backtrace from @@ -34,6 +33,3 @@ index 35f71825b7f3..bb4a6160d0f7 100644 help This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. Support for K10 and newer processors is now in acpi-cpufreq. --- -2.20.1 - diff --git a/debian/patches-rt/0055-md-disable-bcache.patch b/debian/patches-rt/0055-md-disable-bcache.patch index 0a4f4f588..fa75e0602 100644 --- a/debian/patches-rt/0055-md-disable-bcache.patch +++ b/debian/patches-rt/0055-md-disable-bcache.patch @@ -1,11 +1,10 @@ -From 3b1c3bc41b87cd7a714ebfa5e88651d4f3326f2e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 11:48:57 +0200 -Subject: [PATCH 055/283] md: disable bcache +Subject: [PATCH 055/290] md: disable bcache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=57bf213cf62320f7d35e01f133973d9f37d06f82 It uses anon semaphores |drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’: @@ -36,6 +35,3 @@ index f6e0a8b3a61e..18c03d79a442 100644 select CRC64 help Allows a block device to be used as cache for other devices; uses --- -2.20.1 - diff --git a/debian/patches-rt/0056-efi-Disable-runtime-services-on-RT.patch b/debian/patches-rt/0056-efi-Disable-runtime-services-on-RT.patch index 1e0d1f1d5..a7ba932e9 100644 --- a/debian/patches-rt/0056-efi-Disable-runtime-services-on-RT.patch +++ b/debian/patches-rt/0056-efi-Disable-runtime-services-on-RT.patch @@ -1,8 +1,7 @@ -From f0e7a6e0f76d2ab27a0c5ef0f7872d971ec1dd23 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 15:03:16 +0200 -Subject: [PATCH 056/283] efi: Disable runtime services on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 056/290] efi: Disable runtime services on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b9d785862624bbbe2fd77ed8c7e1026b21c498f0 Based on meassurements the EFI functions get_variable / get_next_variable take up to 2us which looks okay. @@ -29,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c -index ab668e17fd05..f58ab9ed4ade 100644 +index 5db20908aa9c..1708505fdf5d 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -87,7 +87,7 @@ struct mm_struct efi_mm = { @@ -41,6 +40,3 @@ index ab668e17fd05..f58ab9ed4ade 100644 static int __init setup_noefi(char *arg) { disable_runtime = true; --- -2.20.1 - diff --git a/debian/patches-rt/0057-printk-Add-a-printk-kill-switch.patch b/debian/patches-rt/0057-printk-Add-a-printk-kill-switch.patch index 8d1630bbf..37f240364 100644 --- a/debian/patches-rt/0057-printk-Add-a-printk-kill-switch.patch +++ b/debian/patches-rt/0057-printk-Add-a-printk-kill-switch.patch @@ -1,8 +1,7 @@ -From d1e9e20fe16f16a1665eabaa44a0f1c2a4cebfec Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 22 Jul 2011 17:58:40 +0200 -Subject: [PATCH 057/283] printk: Add a printk kill switch -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 057/290] printk: Add a printk kill switch +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d031b0863f46321ad8e8e7f1a8131c4703f9f006 Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that it does not dead-lock with the early printk code. @@ -31,7 +30,7 @@ index cf3eccfe1543..30ebf5f82a7c 100644 #ifdef CONFIG_PRINTK_NMI diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 06045abd1887..413160a93814 100644 +index d0d03223b45b..289605ff56e8 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -405,6 +405,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); @@ -169,6 +168,3 @@ index 71381168dede..685443375dc0 100644 if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); --- -2.20.1 - diff --git a/debian/patches-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch b/debian/patches-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch index 0b0acd787..c68b897b8 100644 --- a/debian/patches-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch +++ b/debian/patches-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch @@ -1,9 +1,8 @@ -From 3d881bc012788bea38e0bf55b03d9996eb40b1b9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Sep 2011 14:41:29 +0200 -Subject: [PATCH 058/283] printk: Add "force_early_printk" boot param to help +Subject: [PATCH 058/290] printk: Add "force_early_printk" boot param to help with debugging -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e71d5b7d64fa1dd11f7ef2a2bd482f704d6de200 Gives me an option to screw printk and actually see what the machine says. @@ -17,7 +16,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org 1 file changed, 7 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 413160a93814..6553508ff388 100644 +index 289605ff56e8..210cdac1458d 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -435,6 +435,13 @@ asmlinkage void early_printk(const char *fmt, ...) @@ -34,6 +33,3 @@ index 413160a93814..6553508ff388 100644 void printk_kill(void) { printk_killswitch = true; --- -2.20.1 - diff --git a/debian/patches-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch b/debian/patches-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch index 03806b6e9..1fb250c6b 100644 --- a/debian/patches-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch +++ b/debian/patches-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch @@ -1,8 +1,7 @@ -From 6a3ec551d9ea7e49f20d8f9d3d45fb8d9ca1b720 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 24 Jul 2009 12:38:56 +0200 -Subject: [PATCH 059/283] preempt: Provide preempt_*_(no)rt variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 059/290] preempt: Provide preempt_*_(no)rt variants +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=05ad3e257a527d9072919b02e0cff6078055eed0 RT needs a few preempt_disable/enable points which are not necessary otherwise. Implement variants to avoid #ifdeffery. @@ -48,6 +47,3 @@ index 3196d0e76719..f7a17fcc3fec 100644 #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; --- -2.20.1 - diff --git a/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch index f8c4482e1..54e8f8d57 100644 --- a/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch +++ b/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch @@ -1,9 +1,8 @@ -From 02487d0393920e03426a2378e40bc7547193c3aa Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 8 Mar 2017 14:23:35 +0100 -Subject: [PATCH 060/283] futex: workaround migrate_disable/enable in different +Subject: [PATCH 060/290] futex: workaround migrate_disable/enable in different context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=5182d12b0ac95cf21c3206e06142566001edacc6 migrate_disable()/migrate_enable() takes a different path in atomic() vs !atomic() context. These little hacks ensure that we don't underflow / overflow @@ -65,6 +64,3 @@ index afdc5eadce6e..304f07d08c95 100644 put_pi_state(pi_state); /* --- -2.20.1 - diff --git a/debian/patches-rt/0061-rt-Add-local-irq-locks.patch b/debian/patches-rt/0061-rt-Add-local-irq-locks.patch index 1c74837c6..08c34f73c 100644 --- a/debian/patches-rt/0061-rt-Add-local-irq-locks.patch +++ b/debian/patches-rt/0061-rt-Add-local-irq-locks.patch @@ -1,8 +1,7 @@ -From 1e4195bafdb198d778c98aece678c7b16cd035c8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 20 Jun 2011 09:03:47 +0200 -Subject: [PATCH 061/283] rt: Add local irq locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 061/290] rt: Add local irq locks +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ed800ca05696e34346bf329c70ad735f9661075b Introduce locallock. For !RT this maps to preempt_disable()/ local_irq_disable() so there is not much that changes. For RT this will @@ -336,6 +335,3 @@ index 70b7123f38c7..24421bf8c4b3 100644 /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) --- -2.20.1 - diff --git a/debian/patches-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch b/debian/patches-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch index cc0c03405..f81011e2a 100644 --- a/debian/patches-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch +++ b/debian/patches-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch @@ -1,8 +1,7 @@ -From 58ee9341c0c3521cdb41239c83807a98cef97bd0 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Mon, 7 May 2018 08:58:56 -0500 -Subject: [PATCH 062/283] locallock: provide {get,put}_locked_ptr() variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 062/290] locallock: provide {get,put}_locked_ptr() variants +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d7c12c0b218da8cddfca3155c6481e425552bd89 Provide a set of locallocked accessors for pointers to per-CPU data; this is useful for dynamically-allocated per-CPU regions, for example. @@ -44,6 +43,3 @@ index d658c2552601..921eab83cd34 100644 #define local_lock_cpu(lvar) get_cpu() #define local_unlock_cpu(lvar) put_cpu() --- -2.20.1 - diff --git a/debian/patches-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch b/debian/patches-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch index 2ae2bc186..f200bd652 100644 --- a/debian/patches-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch +++ b/debian/patches-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch @@ -1,8 +1,7 @@ -From a6c38f0b349a8921a1bfe4dcef5972cf1e2224a0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:34 -0500 -Subject: [PATCH 063/283] mm/scatterlist: Do not disable irqs on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 063/290] mm/scatterlist: Do not disable irqs on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=be6d4affd5215ceaca4642942e1914cb32b9f6e2 For -RT it is enough to keep pagefault disabled (which is currently handled by kmap_atomic()). @@ -13,10 +12,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/scatterlist.c b/lib/scatterlist.c -index 7c6096a71704..5c2c68962709 100644 +index 8c3036c37ba0..336162c2813f 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c -@@ -776,7 +776,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) +@@ -777,7 +777,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { @@ -25,6 +24,3 @@ index 7c6096a71704..5c2c68962709 100644 kunmap_atomic(miter->addr); } else kunmap(miter->page); --- -2.20.1 - diff --git a/debian/patches-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch b/debian/patches-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch index e62f228b4..933f44ed9 100644 --- a/debian/patches-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch +++ b/debian/patches-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch @@ -1,8 +1,7 @@ -From f994c5279fb1173131e67419c540713cd25a59e3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 064/283] signal/x86: Delay calling signals in atomic -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 064/290] signal/x86: Delay calling signals in atomic +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=43a0a930bf46705aab6bab3928e1644575a02148 On x86_64 we must disable preemption before we enable interrupts for stack faults, int3 and debugging, because the current task is using @@ -80,7 +79,7 @@ index 33d3c88a7225..fb0438d06ca7 100644 typedef sigset_t compat_sigset_t; #endif diff --git a/include/linux/sched.h b/include/linux/sched.h -index df39ad5916e7..535e57775208 100644 +index 038d0faaa1d5..c87c11bfd9d9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -881,6 +881,10 @@ struct task_struct { @@ -149,6 +148,3 @@ index d5a9646b3538..56edb0580a3a 100644 /* * Nuke all other threads in the group. */ --- -2.20.1 - diff --git a/debian/patches-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch index a7f9725a2..1f1f84156 100644 --- a/debian/patches-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch +++ b/debian/patches-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch @@ -1,8 +1,7 @@ -From 77f58646e4722365c6b6b91802d5feddd57dff34 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 10 Dec 2015 10:58:51 -0800 -Subject: [PATCH 065/283] x86/signal: delay calling signals on 32bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 065/290] x86/signal: delay calling signals on 32bit +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=dd9ded5353246ce29f11b52eb93e640de36c8ef7 When running some ptrace single step tests on x86-32 machine, the below problem is triggered: @@ -44,6 +43,3 @@ index fb0438d06ca7..c00e27af2205 100644 #define ARCH_RT_DELAYS_SIGNAL_SEND #endif --- -2.20.1 - diff --git a/debian/patches-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch b/debian/patches-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch index fb68aa55b..507ef917e 100644 --- a/debian/patches-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch +++ b/debian/patches-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch @@ -1,8 +1,7 @@ -From 86a1cbd4eddbe083ad0331f0eeec002f6fa7b322 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 09:18:52 +0100 -Subject: [PATCH 066/283] buffer_head: Replace bh_uptodate_lock for -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 066/290] buffer_head: Replace bh_uptodate_lock for -rt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ce9849ace4af19ce619849a3e33f1569442832c3 Wrap the bit_spin_lock calls into a separate inline and add the RT replacements with a real spinlock. @@ -192,6 +191,3 @@ index 96225a77c112..8a1bcfb145d7 100644 /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. --- -2.20.1 - diff --git a/debian/patches-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch b/debian/patches-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch index d9f13ac25..f430cabea 100644 --- a/debian/patches-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch +++ b/debian/patches-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch @@ -1,9 +1,8 @@ -From a0ac5bf9b179bff5745bd4c15d14cb2ec5c81c16 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 10:11:25 +0100 -Subject: [PATCH 067/283] fs: jbd/jbd2: Make state lock and journal head lock +Subject: [PATCH 067/290] fs: jbd/jbd2: Make state lock and journal head lock rt safe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b440221d5e434ba95db2a0b09d00162e95b55d93 bit_spin_locks break under RT. @@ -45,7 +44,7 @@ index 8a1bcfb145d7..5869330d1f38 100644 } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h -index 583b82b5a1e9..57f4ad8d45a5 100644 +index 1cf1b9b8e975..2e3266736094 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) @@ -105,6 +104,3 @@ index 583b82b5a1e9..57f4ad8d45a5 100644 } #define J_ASSERT(assert) BUG_ON(!(assert)) --- -2.20.1 - diff --git a/debian/patches-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch b/debian/patches-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch index 84d1cefdd..5a43ad045 100644 --- a/debian/patches-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch +++ b/debian/patches-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch @@ -1,8 +1,7 @@ -From 575440eb3e514693de4892b3589bd02b584834ef Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Fri, 21 Jun 2013 15:07:25 -0400 -Subject: [PATCH 068/283] list_bl: Make list head locking RT safe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 068/290] list_bl: Make list head locking RT safe +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7de4956b65f4b43f98eca7ecd5400c01df56ba53 As per changes in include/linux/jbd_common.h for avoiding the bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal @@ -116,6 +115,3 @@ index 3fc2cc57ba1b..69b659259bac 100644 } static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) --- -2.20.1 - diff --git a/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch index 41894773f..02e1706b2 100644 --- a/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch +++ b/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch @@ -1,8 +1,7 @@ -From 386260fdddeed151902355b8c816f9b166c1c2b8 Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 31 Mar 2016 00:04:25 -0500 -Subject: [PATCH 069/283] list_bl: fixup bogus lockdep warning -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 069/290] list_bl: fixup bogus lockdep warning +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8f7a92e93b3e1260267a4fbe03c549dbe23303fe At first glance, the use of 'static inline' seems appropriate for INIT_HLIST_BL_HEAD(). @@ -99,6 +98,3 @@ index 69b659259bac..0b5de7d9ffcf 100644 static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { --- -2.20.1 - diff --git a/debian/patches-rt/0070-genirq-Disable-irqpoll-on-rt.patch b/debian/patches-rt/0070-genirq-Disable-irqpoll-on-rt.patch index 39aa3701e..92f5fd91f 100644 --- a/debian/patches-rt/0070-genirq-Disable-irqpoll-on-rt.patch +++ b/debian/patches-rt/0070-genirq-Disable-irqpoll-on-rt.patch @@ -1,8 +1,7 @@ -From d884d2bff2d643468c5e37727aa29e8f5c88b3be Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:57 -0500 -Subject: [PATCH 070/283] genirq: Disable irqpoll on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 070/290] genirq: Disable irqpoll on -rt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=470aeaaf1d6efae4d4a409a9d8143c408474f8b7 Creates long latencies for no value @@ -38,6 +37,3 @@ index d867d6ddafdd..cd12ee86c01e 100644 irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); --- -2.20.1 - diff --git a/debian/patches-rt/0071-genirq-Force-interrupt-thread-on-RT.patch b/debian/patches-rt/0071-genirq-Force-interrupt-thread-on-RT.patch index 1d763cc84..ce77700ad 100644 --- a/debian/patches-rt/0071-genirq-Force-interrupt-thread-on-RT.patch +++ b/debian/patches-rt/0071-genirq-Force-interrupt-thread-on-RT.patch @@ -1,8 +1,7 @@ -From 91f768aed73cc93826112811b4e622dce0c1915f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 3 Apr 2011 11:57:29 +0200 -Subject: [PATCH 071/283] genirq: Force interrupt thread on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 071/290] genirq: Force interrupt thread on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e83f2ae9e4c5b28fb9364ca7ee72d3f721f7f4ef Force threaded_irqs and optimize the code (force_irqthreads) in regard to this. @@ -13,9 +12,11 @@ Signed-off-by: Thomas Gleixner kernel/irq/manage.c | 2 ++ 2 files changed, 6 insertions(+) +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index eeceac3376fc..315f852b4981 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -427,7 +427,11 @@ extern int irq_set_irqchip_state(unsigne +@@ -427,7 +427,11 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool state); #ifdef CONFIG_IRQ_FORCED_THREADING @@ -27,6 +28,8 @@ Signed-off-by: Thomas Gleixner #else #define force_irqthreads (0) #endif +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 23bcfa71077f..3c26d0708709 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -24,6 +24,7 @@ @@ -37,7 +40,7 @@ Signed-off-by: Thomas Gleixner __read_mostly bool force_irqthreads; EXPORT_SYMBOL_GPL(force_irqthreads); -@@ -33,6 +34,7 @@ static int __init setup_forced_irqthread +@@ -33,6 +34,7 @@ static int __init setup_forced_irqthreads(char *arg) return 0; } early_param("threadirqs", setup_forced_irqthreads); diff --git a/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index ce1379ae3..815e4ebf8 100644 --- a/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -1,9 +1,8 @@ -From 6ec0e8d1526370de73bd18c096f7f96827594308 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:20 +0200 -Subject: [PATCH 072/283] Split IRQ-off and zone->lock while freeing pages from +Subject: [PATCH 072/290] Split IRQ-off and zone->lock while freeing pages from PCP list #1 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6fdb74e76cbdf28ba0fc148ff245ebe35ffcb059 Split the IRQ-off section while accessing the PCP list from zone->lock while freeing pages. @@ -168,6 +167,3 @@ index 2d04bd2e1ced..332b48f38d1e 100644 } } --- -2.20.1 - diff --git a/debian/patches-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index c560a2ac3..6d8312d5d 100644 --- a/debian/patches-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/debian/patches-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -1,9 +1,8 @@ -From f9efb76f365f15eaca8f29ee7f2648de90925a76 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:21 +0200 -Subject: [PATCH 073/283] Split IRQ-off and zone->lock while freeing pages from +Subject: [PATCH 073/290] Split IRQ-off and zone->lock while freeing pages from PCP list #2 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=4804f6aa4033e6fc037ec95843c4f4a7768b9d27 Split the IRQ-off section while accessing the PCP list from zone->lock while freeing pages. @@ -167,6 +166,3 @@ index 332b48f38d1e..55cee9a17a36 100644 } /* --- -2.20.1 - diff --git a/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch index 30fe582de..4d069e205 100644 --- a/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch @@ -1,8 +1,7 @@ -From 302fcfd8e9527e8f7b6ec9d733a5a3a760af64ef Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 28 May 2018 15:24:22 +0200 -Subject: [PATCH 074/283] mm/SLxB: change list_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 074/290] mm/SLxB: change list_lock to raw_spinlock_t +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b21ef7083e9bc886ce555c1b63633dd1ab69e039 The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t otherwise the interrupts won't be disabled on -RT. The locking rules remain @@ -399,7 +398,7 @@ index 9632772e14be..d6b01d61f768 100644 #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ diff --git a/mm/slub.c b/mm/slub.c -index 09c0e24a06d8..9450fb6da89f 100644 +index 9c3937c5ce38..ba20c68a9cfd 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1167,7 +1167,7 @@ static noinline int free_debug_processing( @@ -614,6 +613,3 @@ index 09c0e24a06d8..9450fb6da89f 100644 } for (i = 0; i < t.count; i++) { --- -2.20.1 - diff --git a/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch index c6312d034..87b41f922 100644 --- a/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch +++ b/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch @@ -1,9 +1,8 @@ -From 9da82885e5b9187857b5fdc2eaa482752e814fbc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Jun 2018 17:29:19 +0200 -Subject: [PATCH 075/283] mm/SLUB: delay giving back empty slubs to IRQ enabled +Subject: [PATCH 075/290] mm/SLUB: delay giving back empty slubs to IRQ enabled regions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=89376b48dd9f6ddbfa127c973136e8308ea2f47d __free_slab() is invoked with disabled interrupts which increases the irq-off time while __free_pages() is doing the work. @@ -18,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 69 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c -index 9450fb6da89f..7fd47a914f61 100644 +index ba20c68a9cfd..224663e20772 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1330,6 +1330,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, @@ -218,6 +217,3 @@ index 9450fb6da89f..7fd47a914f61 100644 if (debug_guardpage_minorder()) slub_max_order = 0; --- -2.20.1 - diff --git a/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch index f09e17263..5321932b6 100644 --- a/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -1,8 +1,7 @@ -From 4cd1dede47de27525631161fdc6cdfc9d8608c31 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:37 -0500 -Subject: [PATCH 076/283] mm: page_alloc: rt-friendly per-cpu pages -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 076/290] mm: page_alloc: rt-friendly per-cpu pages +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8d07a483196aff57e77d03c97a32ee47cb7af0d1 rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. @@ -234,6 +233,3 @@ index 55cee9a17a36..99b3861b1ef6 100644 } #ifdef CONFIG_MEMORY_HOTREMOVE --- -2.20.1 - diff --git a/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch b/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch index 937b154b7..e045eea3f 100644 --- a/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch +++ b/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch @@ -1,8 +1,7 @@ -From 98c01e9756e741d807b1198eb885a26e0998fcde Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:51 -0500 -Subject: [PATCH 077/283] mm/swap: Convert to percpu locked -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 077/290] mm/swap: Convert to percpu locked +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=22720b62b26a1291196199a2ac4f37ba1970d076 Replace global locks (get_cpu + local_irq_save) with "local_locks()". Currently there is one of for "rotate" and one for "swap". @@ -37,10 +36,10 @@ index 7bd0a6f2ac2b..e643672fa802 100644 extern void lru_cache_add_anon(struct page *page); extern void lru_cache_add_file(struct page *page); diff --git a/mm/compaction.c b/mm/compaction.c -index faca45ebe62d..f8ccb9d9daa3 100644 +index 5079ddbec8f9..c40d3a13cbbd 100644 --- a/mm/compaction.c +++ b/mm/compaction.c -@@ -1657,10 +1657,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro +@@ -1668,10 +1668,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro block_start_pfn(cc->migrate_pfn, cc->order); if (cc->last_migrated_pfn < current_block_start) { @@ -71,7 +70,7 @@ index 99b3861b1ef6..1679f5883307 100644 /* diff --git a/mm/swap.c b/mm/swap.c -index a3fc028e338e..4bac22ec1328 100644 +index 45fdbfb6b2a6..92f994b962f0 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -33,6 +33,7 @@ @@ -206,6 +205,3 @@ index a3fc028e338e..4bac22ec1328 100644 } #ifdef CONFIG_SMP --- -2.20.1 - diff --git a/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch b/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch index 18e923e2b..c3f020536 100644 --- a/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch +++ b/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch @@ -1,8 +1,7 @@ -From f4f53c9fdf55676d783a4fbad5049f39401a0542 Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Fri, 27 May 2016 15:03:28 +0200 -Subject: [PATCH 078/283] mm: perform lru_add_drain_all() remotely -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 078/290] mm: perform lru_add_drain_all() remotely +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=35d05460786e3b7a320e89aae8a90ae5ee7c758f lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run on all CPUs that have non-empty LRU pagevecs and then waiting for @@ -25,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/mm/swap.c b/mm/swap.c -index 4bac22ec1328..0457927d3f0c 100644 +index 92f994b962f0..3885645a45ce 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -585,9 +585,15 @@ void lru_add_drain_cpu(int cpu) @@ -104,6 +103,3 @@ index 4bac22ec1328..0457927d3f0c 100644 mutex_unlock(&lock); } --- -2.20.1 - diff --git a/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch b/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch index 4e558cd2b..d2fb41128 100644 --- a/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch +++ b/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch @@ -1,9 +1,8 @@ -From 3e1b4a0068b41c1782264376379985fb992bd41e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:13 -0500 -Subject: [PATCH 079/283] mm/vmstat: Protect per cpu variables with preempt +Subject: [PATCH 079/290] mm/vmstat: Protect per cpu variables with preempt disable on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1b7191af05332f5fa21d4da8c3c4fb7c35bdb0fe Disable preemption on -RT for the vmstat code. On vanila the code runs in IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the @@ -140,6 +139,3 @@ index 4a387937f9f5..0cd11c5e3999 100644 } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) --- -2.20.1 - diff --git a/debian/patches-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch b/debian/patches-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch index 8ac3f617b..25760b1e9 100644 --- a/debian/patches-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch +++ b/debian/patches-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch @@ -1,9 +1,8 @@ -From fb089e89b26bc5653a90d9983021813e15fa04d9 Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Sat, 1 Oct 2011 18:58:13 -0700 -Subject: [PATCH 080/283] ARM: Initialize split page table locks for vector +Subject: [PATCH 080/290] ARM: Initialize split page table locks for vector page -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f092992f9980fcf99ec12820e7350851848fef8a Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if PREEMPT_RT_FULL=y because vectors_user_mapping() creates a @@ -71,6 +70,3 @@ index 82ab015bf42b..8d3c7ce34c24 100644 #ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the --- -2.20.1 - diff --git a/debian/patches-rt/0081-mm-Enable-SLUB-for-RT.patch b/debian/patches-rt/0081-mm-Enable-SLUB-for-RT.patch index 3354b0ff9..f68d1081e 100644 --- a/debian/patches-rt/0081-mm-Enable-SLUB-for-RT.patch +++ b/debian/patches-rt/0081-mm-Enable-SLUB-for-RT.patch @@ -1,8 +1,7 @@ -From b01d03c695bcba2149713f4425c806b5b5e3410d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 Oct 2012 10:32:35 +0100 -Subject: [PATCH 081/283] mm: Enable SLUB for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 081/290] mm: Enable SLUB for RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6ee23601786b2fb75dfb6aaed6da438321511261 Avoid the memory allocation in IRQ section @@ -14,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 6 insertions(+) diff --git a/mm/slub.c b/mm/slub.c -index 7fd47a914f61..efd441e79e6f 100644 +index 224663e20772..cbe47408c6eb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3680,6 +3680,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, @@ -37,6 +36,3 @@ index 7fd47a914f61..efd441e79e6f 100644 } /* --- -2.20.1 - diff --git a/debian/patches-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch b/debian/patches-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch index c21175faa..57f0a405f 100644 --- a/debian/patches-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch +++ b/debian/patches-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch @@ -1,8 +1,7 @@ -From 14471a3281f661b8b8bccdb64820879a699fb2ad Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Jan 2013 12:08:15 +0100 -Subject: [PATCH 082/283] slub: Enable irqs for __GFP_WAIT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 082/290] slub: Enable irqs for __GFP_WAIT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=0d2a7aef26031e05ab0f7d78fc15f838459aa348 SYSTEM_RUNNING might be too late for enabling interrupts. Allocations with GFP_WAIT can happen before that. So use this as an indicator. @@ -13,7 +12,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c -index efd441e79e6f..2240b51a0549 100644 +index cbe47408c6eb..81c32ceab228 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1570,10 +1570,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -43,6 +42,3 @@ index efd441e79e6f..2240b51a0549 100644 local_irq_disable(); if (!page) return NULL; --- -2.20.1 - diff --git a/debian/patches-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch b/debian/patches-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch index fe5f645f7..814950d65 100644 --- a/debian/patches-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch +++ b/debian/patches-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch @@ -1,8 +1,7 @@ -From fb6bfe69057a4177f5f5b273cace7ea5cbb5f649 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 15 Apr 2015 19:00:47 +0200 -Subject: [PATCH 083/283] slub: Disable SLUB_CPU_PARTIAL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 083/290] slub: Disable SLUB_CPU_PARTIAL +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=31c858cb39a8bcd260d166bc3963e3dfe201c547 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 |in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7 @@ -49,6 +48,3 @@ index 61e8b531649b..b4e88fb19c26 100644 bool "SLUB per cpu partial cache" help Per cpu partial caches accellerate objects allocation and freeing --- -2.20.1 - diff --git a/debian/patches-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch index 859b0f723..23e5d4d45 100644 --- a/debian/patches-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch +++ b/debian/patches-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch @@ -1,9 +1,8 @@ -From b64de8d2bb376abf6af01c84a94e1a201aecc6ec Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 30 Oct 2013 11:48:33 -0700 -Subject: [PATCH 084/283] mm/memcontrol: Don't call schedule_work_on in +Subject: [PATCH 084/290] mm/memcontrol: Don't call schedule_work_on in preemption disabled context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c5685864e74efa64dff382c12b2b36c2b7d27c2f The following trace is triggered when running ltp oom test cases: @@ -49,10 +48,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index 7e7cc0cd89fe..174329de4779 100644 +index 65da189a433b..cc5172096d2d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c -@@ -2063,7 +2063,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) +@@ -2082,7 +2082,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ @@ -61,7 +60,7 @@ index 7e7cc0cd89fe..174329de4779 100644 for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; -@@ -2083,7 +2083,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) +@@ -2102,7 +2102,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) } css_put(&memcg->css); } @@ -70,6 +69,3 @@ index 7e7cc0cd89fe..174329de4779 100644 mutex_unlock(&percpu_charge_mutex); } --- -2.20.1 - diff --git a/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch b/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch index 789ae3431..1a368f4ce 100644 --- a/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch +++ b/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch @@ -1,9 +1,8 @@ -From 3cb7dde3b41a847eefeac79763e46ce167c8521f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 28 Jan 2015 17:14:16 +0100 -Subject: [PATCH 085/283] mm/memcontrol: Replace local_irq_disable with local +Subject: [PATCH 085/290] mm/memcontrol: Replace local_irq_disable with local locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=37c842416ddb1f501d2a36a2504945b5d79e4a5d There are a few local_irq_disable() which then take sleeping locks. This patch converts them local locks. @@ -14,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index 174329de4779..d0f245d80f93 100644 +index cc5172096d2d..90e67c468e76 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -69,6 +69,7 @@ @@ -34,7 +33,7 @@ index 174329de4779..d0f245d80f93 100644 /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { -@@ -4884,12 +4887,12 @@ static int mem_cgroup_move_account(struct page *page, +@@ -4913,12 +4916,12 @@ static int mem_cgroup_move_account(struct page *page, ret = 0; @@ -49,7 +48,7 @@ index 174329de4779..d0f245d80f93 100644 out_unlock: unlock_page(page); out: -@@ -6008,10 +6011,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, +@@ -6037,10 +6040,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, commit_charge(page, memcg, lrucare); @@ -62,7 +61,7 @@ index 174329de4779..d0f245d80f93 100644 if (do_memsw_account() && PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; -@@ -6080,7 +6083,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) +@@ -6109,7 +6112,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) memcg_oom_recover(ug->memcg); } @@ -71,7 +70,7 @@ index 174329de4779..d0f245d80f93 100644 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); -@@ -6088,7 +6091,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) +@@ -6117,7 +6120,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); memcg_check_events(ug->memcg, ug->dummy_page); @@ -80,7 +79,7 @@ index 174329de4779..d0f245d80f93 100644 if (!mem_cgroup_is_root(ug->memcg)) css_put_many(&ug->memcg->css, nr_pages); -@@ -6251,10 +6254,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) +@@ -6280,10 +6283,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) commit_charge(newpage, memcg, false); @@ -93,7 +92,7 @@ index 174329de4779..d0f245d80f93 100644 } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -6446,6 +6449,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +@@ -6475,6 +6478,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -101,7 +100,7 @@ index 174329de4779..d0f245d80f93 100644 VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -6491,13 +6495,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +@@ -6520,13 +6524,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ @@ -119,6 +118,3 @@ index 174329de4779..d0f245d80f93 100644 } /** --- -2.20.1 - diff --git a/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch b/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch index d0976ea4c..c2b593c0f 100644 --- a/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch +++ b/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch @@ -1,8 +1,7 @@ -From 50eae40f0475c039a273e2f5441f4ecda84d104e Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 22 Mar 2016 11:16:09 +0100 -Subject: [PATCH 086/283] mm/zsmalloc: copy with get_cpu_var() and locking -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 086/290] mm/zsmalloc: copy with get_cpu_var() and locking +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c50e41d890c9520ee9c04a91becc29f12bdfca6c get_cpu_var() disables preemption and triggers a might_sleep() splat later. This is replaced with get_locked_var(). @@ -18,18 +17,18 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 74 insertions(+), 6 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c -index 9da65552e7ca..63c193c1ff96 100644 +index 4b9063d12b93..1a2f6c13acbd 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c -@@ -55,6 +55,7 @@ - #include +@@ -56,6 +56,7 @@ + #include #include #include +#include #define ZSPAGE_MAGIC 0x58 -@@ -72,9 +73,22 @@ +@@ -73,9 +74,22 @@ */ #define ZS_MAX_ZSPAGE_ORDER 2 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) @@ -53,7 +52,7 @@ index 9da65552e7ca..63c193c1ff96 100644 /* * Object location (, ) is encoded as * as single (unsigned long) handle value. -@@ -320,7 +334,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} +@@ -325,7 +339,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} static int create_cache(struct zs_pool *pool) { @@ -62,7 +61,7 @@ index 9da65552e7ca..63c193c1ff96 100644 0, 0, NULL); if (!pool->handle_cachep) return 1; -@@ -344,10 +358,27 @@ static void destroy_cache(struct zs_pool *pool) +@@ -349,10 +363,27 @@ static void destroy_cache(struct zs_pool *pool) static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) { @@ -92,7 +91,7 @@ index 9da65552e7ca..63c193c1ff96 100644 static void cache_free_handle(struct zs_pool *pool, unsigned long handle) { kmem_cache_free(pool->handle_cachep, (void *)handle); -@@ -366,12 +397,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) +@@ -371,12 +402,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) static void record_obj(unsigned long handle, unsigned long obj) { @@ -111,7 +110,7 @@ index 9da65552e7ca..63c193c1ff96 100644 } /* zpool driver */ -@@ -453,6 +490,7 @@ MODULE_ALIAS("zpool-zsmalloc"); +@@ -458,6 +495,7 @@ MODULE_ALIAS("zpool-zsmalloc"); /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ static DEFINE_PER_CPU(struct mapping_area, zs_map_area); @@ -119,7 +118,7 @@ index 9da65552e7ca..63c193c1ff96 100644 static bool is_zspage_isolated(struct zspage *zspage) { -@@ -882,7 +920,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) +@@ -887,7 +925,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) static unsigned long handle_to_obj(unsigned long handle) { @@ -133,7 +132,7 @@ index 9da65552e7ca..63c193c1ff96 100644 } static unsigned long obj_to_head(struct page *page, void *obj) -@@ -896,22 +940,46 @@ static unsigned long obj_to_head(struct page *page, void *obj) +@@ -901,22 +945,46 @@ static unsigned long obj_to_head(struct page *page, void *obj) static inline int testpin_tag(unsigned long handle) { @@ -180,7 +179,7 @@ index 9da65552e7ca..63c193c1ff96 100644 } static void reset_page(struct page *page) -@@ -1337,7 +1405,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, +@@ -1342,7 +1410,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, class = pool->size_class[class_idx]; off = (class->size * obj_idx) & ~PAGE_MASK; @@ -189,7 +188,7 @@ index 9da65552e7ca..63c193c1ff96 100644 area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ -@@ -1391,7 +1459,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) +@@ -1396,7 +1464,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) __zs_unmap_object(area, pages, off, class->size); } @@ -198,6 +197,3 @@ index 9da65552e7ca..63c193c1ff96 100644 migrate_read_unlock(zspage); unpin_tag(handle); --- -2.20.1 - diff --git a/debian/patches-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch b/debian/patches-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch index 253b1fd5f..bdb3e38c7 100644 --- a/debian/patches-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch +++ b/debian/patches-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch @@ -1,9 +1,8 @@ -From 3d625e1fb1f5adff8191330efe6d47017b0806bd Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 21:53:43 +0100 -Subject: [PATCH 087/283] x86/mm/pat: disable preemption __split_large_page() +Subject: [PATCH 087/290] x86/mm/pat: disable preemption __split_large_page() after spin_lock() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=bfec83b70be39a512d8519c2bb34f4cb620cb6b3 Commit "x86/mm/pat: Disable preemption around __flush_tlb_all()" added a warning if __flush_tlb_all() is invoked in preemptible context. On !RT @@ -57,6 +56,3 @@ index e2d4b25c7aa4..9626ebb9e3c8 100644 spin_unlock(&pgd_lock); return 0; --- -2.20.1 - diff --git a/debian/patches-rt/0088-radix-tree-use-local-locks.patch b/debian/patches-rt/0088-radix-tree-use-local-locks.patch index 5d2d5bc70..9d0ea2e73 100644 --- a/debian/patches-rt/0088-radix-tree-use-local-locks.patch +++ b/debian/patches-rt/0088-radix-tree-use-local-locks.patch @@ -1,8 +1,7 @@ -From 0a7a65a5055b7a5a94c57ee2dc8404116cff804b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 25 Jan 2017 16:34:27 +0100 -Subject: [PATCH 088/283] radix-tree: use local locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 088/290] radix-tree: use local locks +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b762851e97a0699769d8cac2a637edd30c800d1f The preload functionality uses per-CPU variables and preempt-disable to ensure that it does not switch CPUs during its usage. This patch adds @@ -171,6 +170,3 @@ index bc03ecc4dfd2..44257463f683 100644 if (!this_cpu_read(ida_bitmap)) { struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); --- -2.20.1 - diff --git a/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch b/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch index 6d5ecb02b..c02d2e1e1 100644 --- a/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch +++ b/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch @@ -1,8 +1,7 @@ -From 5bbf9de052f34cd8d685120f60da34937f2b0772 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: [PATCH 089/283] timers: Prepare for full preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 089/290] timers: Prepare for full preemption +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a8b88eb0c9c0171975f889b404f3f0d5e8713e82 When softirqs can be preempted we need to make sure that cancelling the timer from the active thread can not deadlock vs. a running timer @@ -30,7 +29,7 @@ index 7b066fd38248..54627d046b3a 100644 #else # define del_timer_sync(t) del_timer(t) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 7a39d56f6a6b..5de80f29ef57 100644 +index a33c2c18628d..520640973942 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -498,11 +498,14 @@ void resched_cpu(int cpu) @@ -60,7 +59,7 @@ index 7a39d56f6a6b..5de80f29ef57 100644 } diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index fa49cd753dea..bbe24e241643 100644 +index ae64cb819a9a..9019c9caf146 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -44,6 +44,7 @@ @@ -151,7 +150,7 @@ index fa49cd753dea..bbe24e241643 100644 raw_spin_lock_irq(&base->lock); } } -@@ -1681,8 +1715,8 @@ static inline void __run_timers(struct timer_base *base) +@@ -1683,8 +1717,8 @@ static inline void __run_timers(struct timer_base *base) while (levels--) expire_timers(base, heads + levels); } @@ -161,7 +160,7 @@ index fa49cd753dea..bbe24e241643 100644 } /* -@@ -1927,6 +1961,9 @@ static void __init init_timer_cpu(int cpu) +@@ -1929,6 +1963,9 @@ static void __init init_timer_cpu(int cpu) base->cpu = cpu; raw_spin_lock_init(&base->lock); base->clk = jiffies; @@ -171,6 +170,3 @@ index fa49cd753dea..bbe24e241643 100644 } } --- -2.20.1 - diff --git a/debian/patches-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch b/debian/patches-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch index 2c3363e31..2201f0233 100644 --- a/debian/patches-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch +++ b/debian/patches-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch @@ -1,8 +1,7 @@ -From 49f95baf1667e4853406b63d30062b94afff4a25 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 6 Nov 2011 12:26:18 +0100 -Subject: [PATCH 090/283] x86: kvm Require const tsc for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 090/290] x86: kvm Require const tsc for RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=61807edd278df58c12c72f58fd6aeae90ef41013 Non constant TSC is a nightmare on bare metal already, but with virtualization it becomes a complete disaster because the workarounds @@ -15,10 +14,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index cea6568667c4..c90545667fd6 100644 +index 6ae8a013af31..c936e1848f28 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -6756,6 +6756,13 @@ int kvm_arch_init(void *opaque) +@@ -6787,6 +6787,13 @@ int kvm_arch_init(void *opaque) goto out; } @@ -32,6 +31,3 @@ index cea6568667c4..c90545667fd6 100644 r = kvm_mmu_module_init(); if (r) goto out_free_percpu; --- -2.20.1 - diff --git a/debian/patches-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch b/debian/patches-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch index ad379fa42..a81d3fc95 100644 --- a/debian/patches-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch +++ b/debian/patches-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch @@ -1,8 +1,7 @@ -From 99fc3867798d14c5cff8c71c3872af84605d572d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 4 Oct 2017 10:24:23 +0200 -Subject: [PATCH 091/283] pci/switchtec: Don't use completion's wait queue -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 091/290] pci/switchtec: Don't use completion's wait queue +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=10b9bce593adcb0971345750199a4c428b95be36 The poll callback is using completion's wait_queue_head_t member and puts it in poll_wait() so the poll() caller gets a wakeup after command @@ -110,6 +109,3 @@ index 72db2e0ebced..77d4fb86d05b 100644 list_del_init(&stuser->list); stuser_put(stuser); } --- -2.20.1 - diff --git a/debian/patches-rt/0092-wait.h-include-atomic.h.patch b/debian/patches-rt/0092-wait.h-include-atomic.h.patch index 250b359a0..964ef289f 100644 --- a/debian/patches-rt/0092-wait.h-include-atomic.h.patch +++ b/debian/patches-rt/0092-wait.h-include-atomic.h.patch @@ -1,11 +1,10 @@ -From 88037fc07062d469557427c97507d3f95d7ca3a6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 28 Oct 2013 12:19:57 +0100 -Subject: [PATCH 092/283] wait.h: include atomic.h +Subject: [PATCH 092/290] wait.h: include atomic.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6a750424efdceb04843eb5c9be84b0e070985534 | CC init/main.o |In file included from include/linux/mmzone.h:9:0, @@ -37,6 +36,3 @@ index ed7c122cb31f..2b5ef8e94d19 100644 typedef struct wait_queue_entry wait_queue_entry_t; --- -2.20.1 - diff --git a/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch b/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch index 3fcf292e6..305609095 100644 --- a/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch +++ b/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch @@ -1,8 +1,7 @@ -From 67478d9c6704de32600fd4363f3853bcdffcf391 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Fri, 11 Jul 2014 15:26:11 +0200 -Subject: [PATCH 093/283] work-simple: Simple work queue implemenation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 093/290] work-simple: Simple work queue implemenation +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b23368647741af52610dd7e6e8955ef23eab50ea Provides a framework for enqueuing callbacks from irq context PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. @@ -241,6 +240,3 @@ index 000000000000..a5b89fdacf19 + mutex_unlock(&worker_mutex); +} +EXPORT_SYMBOL_GPL(swork_put); --- -2.20.1 - diff --git a/debian/patches-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch b/debian/patches-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch index 70ab96e0e..4fd53f134 100644 --- a/debian/patches-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch +++ b/debian/patches-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch @@ -1,9 +1,8 @@ -From 147a7822bfe8f027b88fa0ca82ae0d210e57bf34 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 10 Sep 2018 18:00:31 +0200 -Subject: [PATCH 094/283] work-simple: drop a shit statement in +Subject: [PATCH 094/290] work-simple: drop a shit statement in SWORK_EVENT_PENDING -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=4333ab9ff36a8ec80e8b72a146602c8bf41acf24 Dan Carpenter reported | smatch warnings: @@ -32,6 +31,3 @@ index a5b89fdacf19..c90d14b9b126 100644 static DEFINE_MUTEX(worker_mutex); static struct sworker *glob_worker; --- -2.20.1 - diff --git a/debian/patches-rt/0095-completion-Use-simple-wait-queues.patch b/debian/patches-rt/0095-completion-Use-simple-wait-queues.patch index e50733a81..861221176 100644 --- a/debian/patches-rt/0095-completion-Use-simple-wait-queues.patch +++ b/debian/patches-rt/0095-completion-Use-simple-wait-queues.patch @@ -1,8 +1,7 @@ -From 334dc78522991ee04c4704a53d24f0009be71172 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 11 Jan 2013 11:23:51 +0100 -Subject: [PATCH 095/283] completion: Use simple wait queues -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 095/290] completion: Use simple wait queues +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=98054900c031519e7190d077f9875ef459fcc8e7 Completions have no long lasting callbacks and therefor do not need the complex waitqueue variant. Use simple waitqueues which reduces the @@ -55,10 +54,10 @@ index 94ad6fe29e69..52a49f0bbc19 100644 break; default: diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c -index aa15593a3ac4..5e9269cd14fa 100644 +index 2050993fb58b..e2ca75a6e241 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c -@@ -1624,7 +1624,7 @@ static void ffs_data_put(struct ffs_data *ffs) +@@ -1626,7 +1626,7 @@ static void ffs_data_put(struct ffs_data *ffs) pr_info("%s(): freeing\n", __func__); ffs_data_clear(ffs); BUG_ON(waitqueue_active(&ffs->ev.waitq) || @@ -320,10 +319,10 @@ index a1ad5b7d5521..755a58084978 100644 } EXPORT_SYMBOL(completion_done); diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 5de80f29ef57..337cc72e6a6a 100644 +index 520640973942..ba6d51c7df61 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7116,7 +7116,10 @@ void migrate_disable(void) +@@ -7153,7 +7153,10 @@ void migrate_disable(void) return; } #ifdef CONFIG_SCHED_DEBUG @@ -335,7 +334,7 @@ index 5de80f29ef57..337cc72e6a6a 100644 #endif if (p->migrate_disable) { -@@ -7146,7 +7149,10 @@ void migrate_enable(void) +@@ -7183,7 +7186,10 @@ void migrate_enable(void) } #ifdef CONFIG_SCHED_DEBUG @@ -386,6 +385,3 @@ index 66b59ac77c22..c7cb30cdd1b7 100644 { wait->task = current; if (list_empty(&wait->task_list)) --- -2.20.1 - diff --git a/debian/patches-rt/0096-fs-aio-simple-simple-work.patch b/debian/patches-rt/0096-fs-aio-simple-simple-work.patch index c7f86e40b..22c1c485e 100644 --- a/debian/patches-rt/0096-fs-aio-simple-simple-work.patch +++ b/debian/patches-rt/0096-fs-aio-simple-simple-work.patch @@ -1,8 +1,7 @@ -From b576efb7cedb58ffa58242d7b0df24d14063ba0e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 16 Feb 2015 18:49:10 +0100 -Subject: [PATCH 096/283] fs/aio: simple simple work -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 096/290] fs/aio: simple simple work +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=824fb4cfaff29a2b6013b74084736a71b3c3647a |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768 |in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2 @@ -84,6 +83,3 @@ index 911e23087dfb..16dcf8521c2c 100644 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) { unsigned i, new_nr; --- -2.20.1 - diff --git a/debian/patches-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch index 4b9aa6abd..e1dbbb004 100644 --- a/debian/patches-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch +++ b/debian/patches-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch @@ -1,9 +1,8 @@ -From bac483c38a96edeadc43fa8dcf03c3e57c41cc62 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 21 Aug 2013 17:48:46 +0200 -Subject: [PATCH 097/283] genirq: Do not invoke the affinity callback via a +Subject: [PATCH 097/290] genirq: Do not invoke the affinity callback via a workqueue on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=59cce101e50e9b0f9d6b978848521f8dc629b260 Joe Korty reported, that __irq_set_affinity_locked() schedules a workqueue while holding a rawlock which results in a might_sleep() @@ -49,10 +48,10 @@ index 315f852b4981..a943c07b54ba 100644 void (*release)(struct kref *ref); }; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index c3b9f6dacd8f..af2a8757abfb 100644 +index 3c26d0708709..eadcbfbd434a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -259,7 +259,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, +@@ -285,7 +285,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); @@ -65,7 +64,7 @@ index c3b9f6dacd8f..af2a8757abfb 100644 } irqd_set(data, IRQD_AFFINITY_SET); -@@ -297,10 +302,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) +@@ -323,10 +328,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); @@ -77,7 +76,7 @@ index c3b9f6dacd8f..af2a8757abfb 100644 struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; -@@ -322,6 +325,35 @@ static void irq_affinity_notify(struct work_struct *work) +@@ -348,6 +351,35 @@ static void irq_affinity_notify(struct work_struct *work) kref_put(¬ify->kref, notify->release); } @@ -113,7 +112,7 @@ index c3b9f6dacd8f..af2a8757abfb 100644 /** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification -@@ -350,7 +382,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +@@ -376,7 +408,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) if (notify) { notify->irq = irq; kref_init(¬ify->kref); @@ -126,7 +125,7 @@ index c3b9f6dacd8f..af2a8757abfb 100644 } raw_spin_lock_irqsave(&desc->lock, flags); -@@ -359,7 +396,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +@@ -385,7 +422,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) raw_spin_unlock_irqrestore(&desc->lock, flags); if (old_notify) { @@ -137,6 +136,3 @@ index c3b9f6dacd8f..af2a8757abfb 100644 kref_put(&old_notify->kref, old_notify->release); } --- -2.20.1 - diff --git a/debian/patches-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch b/debian/patches-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch index b5396c63c..505705dae 100644 --- a/debian/patches-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch +++ b/debian/patches-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch @@ -1,9 +1,8 @@ -From 7ada38687fe4d4f0ff8b7390d1588f7fed28a28d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 15 Nov 2017 17:29:51 +0100 -Subject: [PATCH 098/283] time/hrtimer: avoid schedule_work() with interrupts +Subject: [PATCH 098/290] time/hrtimer: avoid schedule_work() with interrupts disabled -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=60f0f171fd7d0f16aef78b604efb7156d5d1ad68 The NOHZ code tries to schedule a workqueue with interrupts disabled. Since this does not work -RT I am switching it to swork instead. @@ -14,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index bbe24e241643..696e7583137c 100644 +index 9019c9caf146..3fab1c50bf1b 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -217,8 +217,7 @@ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); @@ -55,6 +54,3 @@ index bbe24e241643..696e7583137c 100644 int timer_migration_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) --- -2.20.1 - diff --git a/debian/patches-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/debian/patches-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch index a3501d406..5541fe68c 100644 --- a/debian/patches-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch +++ b/debian/patches-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch @@ -1,9 +1,8 @@ -From 2decd81945344204be663182b0eac46997f297b2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 3 Jul 2018 11:25:41 +0200 -Subject: [PATCH 099/283] hrtimer: consolidate hrtimer_init() + +Subject: [PATCH 099/290] hrtimer: consolidate hrtimer_init() + hrtimer_init_sleeper() calls -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=956094850303bfde3d1c8d990ce9a247053d2c63 hrtimer_init_sleeper() calls require a prior initialisation of the hrtimer object with hrtimer_init(). Lets make the initialisation of @@ -28,7 +27,7 @@ Signed-off-by: Anna-Maria Gleixner 7 files changed, 67 insertions(+), 34 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c -index 70d839b9c3b0..e3e7a88e03a6 100644 +index 684acaa96db7..4aa3284874f6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3128,10 +3128,9 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, @@ -285,6 +284,3 @@ index 092fa3d75b32..9d472d626aaa 100644 do { set_current_state(TASK_INTERRUPTIBLE); hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); --- -2.20.1 - diff --git a/debian/patches-rt/0100-hrtimers-Prepare-full-preemption.patch b/debian/patches-rt/0100-hrtimers-Prepare-full-preemption.patch index 452ab3844..11a4f4f6f 100644 --- a/debian/patches-rt/0100-hrtimers-Prepare-full-preemption.patch +++ b/debian/patches-rt/0100-hrtimers-Prepare-full-preemption.patch @@ -1,8 +1,7 @@ -From 15ee476637495474369f2b444a8ae5c041e59ed4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: [PATCH 100/283] hrtimers: Prepare full preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 100/290] hrtimers: Prepare full preemption +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=59ed0327f0e08449f5d06eb111e02484791163e8 Make cancellation of a running callback in softirq context safe against preemption. @@ -95,7 +94,7 @@ index ee7e987ea1b4..0571b498db73 100644 void run_posix_cpu_timers(struct task_struct *task); diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index fdeb9bc6affb..966708e8ce14 100644 +index f4255a65c44b..1d1f077cffb3 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -436,7 +436,7 @@ int alarm_cancel(struct alarm *alarm) @@ -285,6 +284,3 @@ index 5a01c4fdbfef..a5ec421e3437 100644 goto retry_delete; } list_del(&timer->list); --- -2.20.1 - diff --git a/debian/patches-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch b/debian/patches-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch index 5632d0d40..57c874a15 100644 --- a/debian/patches-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch +++ b/debian/patches-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch @@ -1,9 +1,8 @@ -From da6822e02b92c82126e1e2ef3846912c9874b024 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 3 Jul 2009 08:44:31 -0500 -Subject: [PATCH 101/283] hrtimer: by timers by default into the softirq +Subject: [PATCH 101/290] hrtimer: by timers by default into the softirq context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7c5afdf61b2f178567e499bc4aec5050c12a82ce We can't have hrtimers callbacks running in hardirq context on RT. Therefore the timers are deferred to the softirq context by default. @@ -29,10 +28,10 @@ Signed-off-by: Sebastian Andrzej Siewior 11 files changed, 37 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c -index 031bd7f91f98..4b2a399f1df5 100644 +index 262e49301cae..c2f51b6e8974 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c -@@ -2252,7 +2252,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) +@@ -2257,7 +2257,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, @@ -66,7 +65,7 @@ index 73ad7309436a..2bdb047c7656 100644 /* diff --git a/kernel/events/core.c b/kernel/events/core.c -index 171b83ebed4a..a7807c609c22 100644 +index 625ba462e5bb..9e17b38fdb98 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1102,7 +1102,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) @@ -78,7 +77,7 @@ index 171b83ebed4a..a7807c609c22 100644 timer->function = perf_mux_hrtimer_handler; } -@@ -9216,7 +9216,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) +@@ -9239,7 +9239,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) if (!is_sampling_event(event)) return; @@ -88,7 +87,7 @@ index 171b83ebed4a..a7807c609c22 100644 /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 337cc72e6a6a..1f997ceec454 100644 +index ba6d51c7df61..86839f848805 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq) @@ -101,10 +100,10 @@ index 337cc72e6a6a..1f997ceec454 100644 } #else /* CONFIG_SCHED_HRTICK */ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index fb6e64417470..1794e152d888 100644 +index 4b13df38c069..974a8f9b615a 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -1053,7 +1053,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) +@@ -1086,7 +1086,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) { struct hrtimer *timer = &dl_se->dl_timer; @@ -114,10 +113,10 @@ index fb6e64417470..1794e152d888 100644 } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 0048a32a3b4d..4022ad749d85 100644 +index da5d60d25c27..5bbc77b948e7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4908,9 +4908,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +@@ -4967,9 +4967,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -194,10 +193,10 @@ index 923a650e5c35..abf24e60b6e8 100644 sl->timer.function = hrtimer_wakeup; sl->task = task; diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c -index a59641fb88b6..52649fdea3b5 100644 +index a836efd34589..c50e8f3262de 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c -@@ -106,7 +106,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) +@@ -107,7 +107,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) void tick_setup_hrtimer_broadcast(void) { @@ -232,6 +231,3 @@ index bbc4940f21af..defd493ba967 100644 hrtimer->function = watchdog_timer_fn; hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL_PINNED); --- -2.20.1 - diff --git a/debian/patches-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch b/debian/patches-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch index fb794ac54..05591e773 100644 --- a/debian/patches-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch +++ b/debian/patches-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch @@ -1,8 +1,7 @@ -From bccc05d43b6e3443288909080b555413b80fe36a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 8 Jan 2019 12:31:06 +0100 -Subject: [PATCH 102/283] sched/fair: Make the hrtimers non-hard again -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 102/290] sched/fair: Make the hrtimers non-hard again +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=02fb0a5ce45172d0c7f8f6ad175c42016a7764fe Since commit "sched/fair: Robustify CFS-bandwidth timer locking" both hrtimer can run in softirq context because now interrupts are disabled @@ -14,10 +13,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 4022ad749d85..0048a32a3b4d 100644 +index 5bbc77b948e7..da5d60d25c27 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4908,9 +4908,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +@@ -4967,9 +4967,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -29,6 +28,3 @@ index 4022ad749d85..0048a32a3b4d 100644 cfs_b->slack_timer.function = sched_cfs_slack_timer; cfs_b->distribute_running = 0; } --- -2.20.1 - diff --git a/debian/patches-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch index ecc3eb95c..4e0452c64 100644 --- a/debian/patches-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch +++ b/debian/patches-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch @@ -1,8 +1,7 @@ -From 73842c09ad741a2814a0df56dccd630cbd503cf9 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Mon, 16 Sep 2013 14:09:19 -0700 -Subject: [PATCH 103/283] hrtimer: Move schedule_work call to helper thread -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 103/290] hrtimer: Move schedule_work call to helper thread +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d5e0a0fe2ae5fd7d7eec849bd5aaf49d254da370 When run ltp leapsec_timer test, the following call trace is caught: @@ -93,6 +92,3 @@ index abf24e60b6e8..c72eb8bfc471 100644 #else --- -2.20.1 - diff --git a/debian/patches-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch b/debian/patches-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch index 145d4846d..4b54a1c2a 100644 --- a/debian/patches-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch +++ b/debian/patches-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch @@ -1,9 +1,8 @@ -From f9cb7c14dad693481c5b5dfea9223e1904a63a09 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 6 Dec 2018 10:15:13 +0100 -Subject: [PATCH 104/283] hrtimer: move state change before hrtimer_cancel in +Subject: [PATCH 104/290] hrtimer: move state change before hrtimer_cancel in do_nanosleep() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=92732a32e51eff3a6dfb87b0c63295af9e5c69f3 There is a small window between setting t->task to NULL and waking the task up (which would set TASK_RUNNING). So the timer would fire, run and @@ -46,6 +45,3 @@ index c72eb8bfc471..cfa3599fa789 100644 if (!t->task) return 0; --- -2.20.1 - diff --git a/debian/patches-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch b/debian/patches-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch index 587af328b..9da9537c6 100644 --- a/debian/patches-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch +++ b/debian/patches-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch @@ -1,8 +1,7 @@ -From f7d44ae1ff53ff9277b2207c7987b8ffa0a65738 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 3 Jul 2009 08:29:58 -0500 -Subject: [PATCH 105/283] posix-timers: Thread posix-cpu-timers on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 105/290] posix-timers: Thread posix-cpu-timers on -rt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e2963af9e43c937bbd8ade2d14c9031bc7fe64b3 posix-cpu-timer code takes non -rt safe locks in hard irq context. Move it to a thread. @@ -19,7 +18,7 @@ Signed-off-by: Thomas Gleixner 4 files changed, 164 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 535e57775208..c2dfe6939773 100644 +index c87c11bfd9d9..ba37d39d5c6b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -832,6 +832,9 @@ struct task_struct { @@ -58,7 +57,7 @@ index 0b49b9cf5571..9e3362748214 100644 .thread_group = LIST_HEAD_INIT(init_task.thread_group), .thread_node = LIST_HEAD_INIT(init_signals.thread_head), diff --git a/kernel/fork.c b/kernel/fork.c -index 98c971cb1d36..492bc898b09a 100644 +index 173e010cba45..c940fee1426c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1585,6 +1585,9 @@ static void rt_mutex_init_task(struct task_struct *p) @@ -72,7 +71,7 @@ index 98c971cb1d36..492bc898b09a 100644 tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c -index 76801b9b481e..baeeaef3b721 100644 +index d62d7ae5201c..8d95e8de98b2 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -3,8 +3,10 @@ @@ -94,7 +93,7 @@ index 76801b9b481e..baeeaef3b721 100644 #include "posix-timers.h" -@@ -1136,14 +1139,12 @@ static inline int fastpath_timer_check(struct task_struct *tsk) +@@ -1140,14 +1143,12 @@ static inline int fastpath_timer_check(struct task_struct *tsk) * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ @@ -110,7 +109,7 @@ index 76801b9b481e..baeeaef3b721 100644 /* * The fast path checks that there are no expired thread or thread * group timers. If that's so, just return. -@@ -1196,6 +1197,153 @@ void run_posix_cpu_timers(struct task_struct *tsk) +@@ -1200,6 +1201,153 @@ void run_posix_cpu_timers(struct task_struct *tsk) } } @@ -264,6 +263,3 @@ index 76801b9b481e..baeeaef3b721 100644 /* * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. --- -2.20.1 - diff --git a/debian/patches-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch b/debian/patches-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch index e062c4ad4..91b86241b 100644 --- a/debian/patches-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch +++ b/debian/patches-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch @@ -1,8 +1,7 @@ -From bc6dc2730e93dec5ead183a44781b5c8bf47b3d7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 May 2011 16:59:16 +0200 -Subject: [PATCH 106/283] sched: Move task_struct cleanup to RCU -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 106/290] sched: Move task_struct cleanup to RCU +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c69af1bd9be59d1e008a9cabae469116cffd44b6 __put_task_struct() does quite some expensive work. We don't want to burden random tasks with that. @@ -15,10 +14,10 @@ Signed-off-by: Thomas Gleixner 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index c2dfe6939773..a6f2f76b1162 100644 +index ba37d39d5c6b..a1ef00db6baa 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1186,6 +1186,9 @@ struct task_struct { +@@ -1194,6 +1194,9 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif @@ -58,7 +57,7 @@ index 108ede99e533..bb98c5b43f81 100644 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT diff --git a/kernel/fork.c b/kernel/fork.c -index 492bc898b09a..cba3cade3d5b 100644 +index c940fee1426c..b14cb6876ac8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -671,7 +671,9 @@ static inline void put_signal_struct(struct signal_struct *sig) @@ -91,6 +90,3 @@ index 492bc898b09a..cba3cade3d5b 100644 void __init __weak arch_task_cache_init(void) { } --- -2.20.1 - diff --git a/debian/patches-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch b/debian/patches-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch index 9303ad899..3bbe92ffb 100644 --- a/debian/patches-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch +++ b/debian/patches-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch @@ -1,8 +1,7 @@ -From 5e09842f9bc61a1babc80804a7c2e003b56989c3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 12:12:51 +0200 -Subject: [PATCH 107/283] sched: Limit the number of task migrations per batch -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 107/290] sched: Limit the number of task migrations per batch +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c6286af6b5a433a8f97f2875f2cdcc7fb9614a9d Put an upper limit on the number of tasks which are migrated per batch to avoid large latencies. @@ -13,7 +12,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 4 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 1f997ceec454..88a886c751ca 100644 +index 86839f848805..de93941bad5a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -44,7 +44,11 @@ const_debug unsigned int sysctl_sched_features = @@ -28,6 +27,3 @@ index 1f997ceec454..88a886c751ca 100644 /* * period over which we measure -rt task CPU usage in us. --- -2.20.1 - diff --git a/debian/patches-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch b/debian/patches-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch index 915e7c10a..eed532753 100644 --- a/debian/patches-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch +++ b/debian/patches-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch @@ -1,8 +1,7 @@ -From a07363220fb1b9eb6ed3a28d5eef11e4b95f6170 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 12:20:33 +0200 -Subject: [PATCH 108/283] sched: Move mmdrop to RCU on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 108/290] sched: Move mmdrop to RCU on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=26e860afda01037b95eca42b1e3863949b30b52c Takes sleeping locks and calls into the memory allocator, so nothing we want to do in task switch and oder atomic contexts. @@ -38,7 +37,7 @@ index 5ed8f6292a53..f430cf0a377e 100644 atomic_long_t hugetlb_usage; #endif diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h -index 0d10b7ce0da7..b6758c6fffbf 100644 +index e9d4e389aed9..fb59f96fdd2e 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,6 +49,17 @@ static inline void mmdrop(struct mm_struct *mm) @@ -60,7 +59,7 @@ index 0d10b7ce0da7..b6758c6fffbf 100644 * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_sem for writing before modifying the diff --git a/kernel/fork.c b/kernel/fork.c -index cba3cade3d5b..098130002cda 100644 +index b14cb6876ac8..5adb1349c001 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -637,6 +637,19 @@ void __mmdrop(struct mm_struct *mm) @@ -84,7 +83,7 @@ index cba3cade3d5b..098130002cda 100644 { struct mm_struct *mm; diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 88a886c751ca..0c916e7010a2 100644 +index de93941bad5a..3c51cd04d7b4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2729,9 +2729,13 @@ static struct rq *finish_task_switch(struct task_struct *prev) @@ -102,7 +101,7 @@ index 88a886c751ca..0c916e7010a2 100644 } if (unlikely(prev_state == TASK_DEAD)) { if (prev->sched_class->task_dead) -@@ -5559,6 +5563,8 @@ void sched_setnuma(struct task_struct *p, int nid) +@@ -5600,6 +5604,8 @@ void sched_setnuma(struct task_struct *p, int nid) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU @@ -111,7 +110,7 @@ index 88a886c751ca..0c916e7010a2 100644 /* * Ensure that the idle task is using init_mm right before its CPU goes * offline. -@@ -5574,7 +5580,11 @@ void idle_task_exit(void) +@@ -5615,7 +5621,11 @@ void idle_task_exit(void) current->active_mm = &init_mm; finish_arch_post_lock_switch(); } @@ -124,7 +123,7 @@ index 88a886c751ca..0c916e7010a2 100644 } /* -@@ -5886,6 +5896,10 @@ int sched_cpu_dying(unsigned int cpu) +@@ -5927,6 +5937,10 @@ int sched_cpu_dying(unsigned int cpu) update_max_interval(); nohz_balance_exit_idle(rq); hrtick_clear(rq); @@ -135,6 +134,3 @@ index 88a886c751ca..0c916e7010a2 100644 return 0; } #endif --- -2.20.1 - diff --git a/debian/patches-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/debian/patches-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch index 315c1a856..63da5114f 100644 --- a/debian/patches-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch +++ b/debian/patches-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch @@ -1,9 +1,8 @@ -From fcd92c128f39749e17ad0c95cf7154f14f3b575a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 21 Nov 2016 19:31:08 +0100 -Subject: [PATCH 109/283] kernel/sched: move stack + kprobe clean up to +Subject: [PATCH 109/290] kernel/sched: move stack + kprobe clean up to __put_task_struct() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e3ab21f020101536a57f21b778c83b41630914f7 There is no need to free the stack before the task struct (except for reasons mentioned in commit 68f24b08ee89 ("sched/core: Free the stack early if @@ -17,6 +16,8 @@ Signed-off-by: Sebastian Andrzej Siewior kernel/sched/core.c | 9 --------- 2 files changed, 10 insertions(+), 9 deletions(-) +diff --git a/kernel/fork.c b/kernel/fork.c +index 5adb1349c001..ff7e5983a21c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -40,6 +40,7 @@ @@ -27,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include #include #include -@@ -698,6 +699,15 @@ void __put_task_struct(struct task_struc +@@ -693,6 +694,15 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); @@ -43,9 +44,11 @@ Signed-off-by: Sebastian Andrzej Siewior cgroup_free(tsk); task_numa_free(tsk, true); security_task_free(tsk); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 3c51cd04d7b4..a3ba8e9227a3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2741,15 +2741,6 @@ static struct rq *finish_task_switch(str +@@ -2741,15 +2741,6 @@ static struct rq *finish_task_switch(struct task_struct *prev) if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); diff --git a/debian/patches-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/debian/patches-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch index f329c8016..595a66a19 100644 --- a/debian/patches-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch +++ b/debian/patches-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch @@ -1,9 +1,8 @@ -From 1b7fcb7ee765eafbf39fa9f1427f38370c00eff3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jun 2011 09:21:04 +0200 -Subject: [PATCH 110/283] sched: Add saved_state for tasks blocked on sleeping +Subject: [PATCH 110/290] sched: Add saved_state for tasks blocked on sleeping locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7c5f9e4e697af083543c1eb4fe2c185bd18134fd Spinlocks are state preserving in !RT. RT changes the state when a task gets blocked on a lock. So we need to remember the state before @@ -19,7 +18,7 @@ Signed-off-by: Thomas Gleixner 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index a6f2f76b1162..ad44849fba2e 100644 +index a1ef00db6baa..c073a3273beb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -600,6 +600,8 @@ struct task_struct { @@ -31,7 +30,7 @@ index a6f2f76b1162..ad44849fba2e 100644 /* * This begins the randomizable portion of task_struct. Only -@@ -1613,6 +1615,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); +@@ -1621,6 +1623,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -40,7 +39,7 @@ index a6f2f76b1162..ad44849fba2e 100644 #ifdef CONFIG_SMP diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 31d8e5828ece..5734699b0812 100644 +index a3ba8e9227a3..592e8c0acb8b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1999,8 +1999,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) @@ -103,6 +102,3 @@ index 9a7c3d08b39f..49ae30da28ee 100644 /* * To aid in avoiding the subversion of "niceness" due to uneven distribution --- -2.20.1 - diff --git a/debian/patches-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch b/debian/patches-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch index f56f6249b..4ee34e919 100644 --- a/debian/patches-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch +++ b/debian/patches-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch @@ -1,9 +1,8 @@ -From 833fc1aa92cf522d59c1a02d410198e8ba4d1832 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jun 2011 09:19:06 +0200 -Subject: [PATCH 111/283] sched: Do not account rcu_preempt_depth on RT in +Subject: [PATCH 111/290] sched: Do not account rcu_preempt_depth on RT in might_sleep() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=77016b51aec298c4b547aa2d8a30e001a8bc39d6 RT changes the rcu_preempt_depth semantics, so we cannot check for it in might_sleep(). @@ -15,7 +14,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index e102c5bccbb9..87eafcb3312f 100644 +index 68cbe111420b..027c58cdbb6e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -73,6 +73,11 @@ void synchronize_rcu(void); @@ -40,10 +39,10 @@ index e102c5bccbb9..87eafcb3312f 100644 /* Internal to kernel */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 5734699b0812..0be1cc1120db 100644 +index 592e8c0acb8b..a183036a437f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6156,7 +6156,7 @@ void __init sched_init(void) +@@ -6197,7 +6197,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { @@ -52,6 +51,3 @@ index 5734699b0812..0be1cc1120db 100644 return (nested == preempt_offset); } --- -2.20.1 - diff --git a/debian/patches-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch b/debian/patches-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch index 0b1338fbb..89d568c0e 100644 --- a/debian/patches-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch +++ b/debian/patches-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch @@ -1,8 +1,7 @@ -From be9eeaeabb9966cc740ca075b3bbffe54b2506a4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 22:51:33 +0200 -Subject: [PATCH 112/283] sched: Use the proper LOCK_OFFSET for cond_resched() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 112/290] sched: Use the proper LOCK_OFFSET for cond_resched() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=04e2f43d1deac4ff13f0e108fc89306198a2f762 RT does not increment preempt count when a 'sleeping' spinlock is locked. Update PREEMPT_LOCK_OFFSET for that case. @@ -28,6 +27,3 @@ index f7a17fcc3fec..b7fe717eb1f4 100644 /* * The preempt_count offset needed for things like: --- -2.20.1 - diff --git a/debian/patches-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch b/debian/patches-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch index 2744a2aef..5fc2f6029 100644 --- a/debian/patches-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch +++ b/debian/patches-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch @@ -1,8 +1,7 @@ -From a6ae12a40e837ece98c723e624820e41421b70a0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 13 Sep 2011 16:42:35 +0200 -Subject: [PATCH 113/283] sched: Disable TTWU_QUEUE on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 113/290] sched: Disable TTWU_QUEUE on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f1730e401e00fbd36906e5083279d065661f685a The queued remote wakeup mechanism can introduce rather large latencies if the number of migrated tasks is high. Disable it for RT. @@ -33,6 +32,3 @@ index 85ae8488039c..68de18405857 100644 /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain. --- -2.20.1 - diff --git a/debian/patches-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch index 3175b2a1d..3202b69df 100644 --- a/debian/patches-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch +++ b/debian/patches-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch @@ -1,9 +1,8 @@ -From 4ad0a976dcfe34e35355f20f464dd7eb08dd38e5 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 18 Mar 2013 15:12:49 -0400 -Subject: [PATCH 114/283] sched/workqueue: Only wake up idle workers if not +Subject: [PATCH 114/290] sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=dfa983ba257c10e427ff07cac06d6b270f5ee643 In -rt, most spin_locks() turn into mutexes. One of these spin_lock conversions is performed on the workqueue gcwq->lock. When the idle @@ -25,10 +24,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 0be1cc1120db..5ca8f53ba4fd 100644 +index a183036a437f..a1cedac8b707 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3498,8 +3498,10 @@ static void __sched notrace __schedule(bool preempt) +@@ -3539,8 +3539,10 @@ static void __sched notrace __schedule(bool preempt) * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. @@ -40,6 +39,3 @@ index 0be1cc1120db..5ca8f53ba4fd 100644 struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev); --- -2.20.1 - diff --git a/debian/patches-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/debian/patches-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch index 59f82e9da..808d9a671 100644 --- a/debian/patches-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch +++ b/debian/patches-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch @@ -1,9 +1,8 @@ -From 807cb05d929f39b81b980b673d69ea33c6473b96 Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Mon, 26 Jun 2017 17:07:15 +0200 -Subject: [PATCH 115/283] rt: Increase/decrease the nr of migratory tasks when +Subject: [PATCH 115/290] rt: Increase/decrease the nr of migratory tasks when enabling/disabling migration -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6ef7d4c7da82dc7df51211d0fe07912d3d67dc69 There is a problem in the migrate_disable()/enable() implementation regarding the number of migratory tasks in the rt/dl RQs. The problem @@ -82,10 +81,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 5ca8f53ba4fd..434fd8946629 100644 +index a1cedac8b707..9ba05b3f69e1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7147,6 +7147,47 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7184,6 +7184,47 @@ const u32 sched_prio_to_wmult[40] = { #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) @@ -133,7 +132,7 @@ index 5ca8f53ba4fd..434fd8946629 100644 void migrate_disable(void) { struct task_struct *p = current; -@@ -7170,10 +7211,9 @@ void migrate_disable(void) +@@ -7207,10 +7248,9 @@ void migrate_disable(void) } preempt_disable(); @@ -146,7 +145,7 @@ index 5ca8f53ba4fd..434fd8946629 100644 preempt_enable(); } -@@ -7205,9 +7245,8 @@ void migrate_enable(void) +@@ -7242,9 +7282,8 @@ void migrate_enable(void) preempt_disable(); @@ -157,6 +156,3 @@ index 5ca8f53ba4fd..434fd8946629 100644 if (p->migrate_disable_update) { struct rq *rq; --- -2.20.1 - diff --git a/debian/patches-rt/0116-hotplug-Lightweight-get-online-cpus.patch b/debian/patches-rt/0116-hotplug-Lightweight-get-online-cpus.patch index e96427823..3f8bfa40c 100644 --- a/debian/patches-rt/0116-hotplug-Lightweight-get-online-cpus.patch +++ b/debian/patches-rt/0116-hotplug-Lightweight-get-online-cpus.patch @@ -1,8 +1,7 @@ -From 6c5886f66d8a616f3294c2fd8ca26b4c638e430b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 15 Jun 2011 12:36:06 +0200 -Subject: [PATCH 116/283] hotplug: Lightweight get online cpus -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 116/290] hotplug: Lightweight get online cpus +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b3427c6b64cd30370b25801db8a88491596c1d4c get_online_cpus() is a heavy weight function which involves a global mutex. migrate_disable() wants a simpler construct which prevents only @@ -43,7 +42,7 @@ index 006f69f9277b..d45ea5c98cdd 100644 /* Wrappers which go away once all code is converted */ diff --git a/kernel/cpu.c b/kernel/cpu.c -index 46aefe5c0e35..9001e1779325 100644 +index d9f855cb9f6f..02e05a7e463c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -281,6 +281,21 @@ static int cpu_hotplug_disabled; @@ -69,10 +68,10 @@ index 46aefe5c0e35..9001e1779325 100644 void cpus_read_lock(void) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 434fd8946629..a8e9283f018c 100644 +index 9ba05b3f69e1..5723a8966865 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7211,6 +7211,7 @@ void migrate_disable(void) +@@ -7248,6 +7248,7 @@ void migrate_disable(void) } preempt_disable(); @@ -80,7 +79,7 @@ index 434fd8946629..a8e9283f018c 100644 migrate_disable_update_cpus_allowed(p); p->migrate_disable = 1; -@@ -7276,12 +7277,15 @@ void migrate_enable(void) +@@ -7313,12 +7314,15 @@ void migrate_enable(void) arg.task = p; arg.dest_cpu = dest_cpu; @@ -96,6 +95,3 @@ index 434fd8946629..a8e9283f018c 100644 preempt_enable(); } EXPORT_SYMBOL(migrate_enable); --- -2.20.1 - diff --git a/debian/patches-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch b/debian/patches-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch index 8fe3a5b4d..adb82a021 100644 --- a/debian/patches-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch +++ b/debian/patches-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch @@ -1,8 +1,7 @@ -From cea179e4bc61a9548573e2f5ece3e78b976c35eb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:56:42 +0200 -Subject: [PATCH 117/283] trace: Add migrate-disabled counter to tracing output -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 117/290] trace: Add migrate-disabled counter to tracing output +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1beef7a4cb4f8020766425cd50172f1c8edeaf5d Signed-off-by: Thomas Gleixner --- @@ -26,7 +25,7 @@ index 78a010e19ed4..0403d9696944 100644 #define TRACE_EVENT_TYPE_MAX \ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 3b0de19b9ed7..496c5d358010 100644 +index bdd7f3d78724..befe5978a832 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2149,6 +2149,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, @@ -81,6 +80,3 @@ index 6e6cc64faa38..46c96744f09d 100644 return !trace_seq_has_overflowed(s); } --- -2.20.1 - diff --git a/debian/patches-rt/0118-lockdep-Make-it-RT-aware.patch b/debian/patches-rt/0118-lockdep-Make-it-RT-aware.patch index b4f11ee47..1aee8e4ef 100644 --- a/debian/patches-rt/0118-lockdep-Make-it-RT-aware.patch +++ b/debian/patches-rt/0118-lockdep-Make-it-RT-aware.patch @@ -1,8 +1,7 @@ -From d2810fd8e650b96863a15b748cf385698ecde121 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 18:51:23 +0200 -Subject: [PATCH 118/283] lockdep: Make it RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 118/290] lockdep: Make it RT aware +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7effdc8f2c197c37436e86a7b4ab376cf38b498f teach lockdep that we don't really do softirqs on -RT. @@ -54,10 +53,10 @@ index 21619c92c377..b20eeb25e9fa 100644 defined(CONFIG_PREEMPT_TRACER) extern void stop_critical_timings(void); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c -index 26b57e24476f..6daeb369f691 100644 +index 1e272f6a01e7..1938b4bfb098 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c -@@ -3823,6 +3823,7 @@ static void check_flags(unsigned long flags) +@@ -3826,6 +3826,7 @@ static void check_flags(unsigned long flags) } } @@ -65,7 +64,7 @@ index 26b57e24476f..6daeb369f691 100644 /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only -@@ -3837,6 +3838,7 @@ static void check_flags(unsigned long flags) +@@ -3840,6 +3841,7 @@ static void check_flags(unsigned long flags) DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } @@ -73,6 +72,3 @@ index 26b57e24476f..6daeb369f691 100644 if (!debug_locks) print_irqtrace_events(current); --- -2.20.1 - diff --git a/debian/patches-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch b/debian/patches-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch index 33a3f725b..606fbe576 100644 --- a/debian/patches-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch +++ b/debian/patches-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch @@ -1,9 +1,8 @@ -From bb78f12d34959257b4413ff817644ce7477a93cd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 29 Nov 2011 20:18:22 -0500 -Subject: [PATCH 119/283] tasklet: Prevent tasklets from going into infinite +Subject: [PATCH 119/290] tasklet: Prevent tasklets from going into infinite spin in RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=eef329f6c046880d5fa82df96ecff7a6c6582e7f When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, and spinlocks turn are mutexes. But this can cause issues with @@ -298,6 +297,3 @@ index 6f584861d329..1d3a482246cc 100644 static int ksoftirqd_should_run(unsigned int cpu) { return local_softirq_pending(); --- -2.20.1 - diff --git a/debian/patches-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch b/debian/patches-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch index dd2ac2daa..3c9b401fa 100644 --- a/debian/patches-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch +++ b/debian/patches-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch @@ -1,8 +1,7 @@ -From 573f3d4984a2aa2239e4e28c0df1dc28517397d2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 13 Nov 2011 17:17:09 +0100 -Subject: [PATCH 120/283] softirq: Check preemption after reenabling interrupts -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 120/290] softirq: Check preemption after reenabling interrupts +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9d428d20b2a3fd2b59f17e811b7f51972a8a0d4a raise_softirq_irqoff() disables interrupts and wakes the softirq daemon, but after reenabling interrupts there is no preemption check, @@ -117,7 +116,7 @@ index 86a709954f5a..9c069ef83d6d 100644 return 0; } diff --git a/net/core/dev.c b/net/core/dev.c -index 138951d28643..48c4dc728d1b 100644 +index ddd8aab20adf..92e4d559ec93 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2726,6 +2726,7 @@ static void __netif_reschedule(struct Qdisc *q) @@ -167,7 +166,7 @@ index 138951d28643..48c4dc728d1b 100644 } EXPORT_SYMBOL(__napi_schedule); -@@ -9305,6 +9311,7 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -9307,6 +9313,7 @@ static int dev_cpu_dead(unsigned int oldcpu) raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); @@ -175,6 +174,3 @@ index 138951d28643..48c4dc728d1b 100644 #ifdef CONFIG_RPS remsd = oldsd->rps_ipi_list; --- -2.20.1 - diff --git a/debian/patches-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch b/debian/patches-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch index de34e0405..d076a5fee 100644 --- a/debian/patches-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch +++ b/debian/patches-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch @@ -1,8 +1,7 @@ -From ac1ed27a8471e837c8ea8811ca8322f47f47add2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 13:59:17 +0200 -Subject: [PATCH 121/283] softirq: Disable softirq stacks for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 121/290] softirq: Disable softirq stacks for RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7e2dac7d12e5f5068d09042b543ca3001c0eb674 Disable extra stacks for softirqs. We want to preempt softirqs and having them on special IRQ-stack does not make this easier. @@ -60,7 +59,7 @@ index 695b24a2d954..032ada21b7bd 100644 /* * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S -index 262ba9481781..4935ef9a142e 100644 +index 1bf6aaefd26a..52c7298f583a 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -32,6 +32,7 @@ @@ -120,10 +119,10 @@ index 713670e6d13d..5dfc715343f9 100644 #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S -index c90e00db5c13..7b29f2c10d01 100644 +index ccb5e3486aee..7ffd83c57ef2 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S -@@ -1059,6 +1059,7 @@ bad_gs: +@@ -1083,6 +1083,7 @@ EXPORT_SYMBOL(native_load_gs_index) jmp 2b .previous @@ -131,7 +130,7 @@ index c90e00db5c13..7b29f2c10d01 100644 /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(do_softirq_own_stack) pushq %rbp -@@ -1069,6 +1070,7 @@ ENTRY(do_softirq_own_stack) +@@ -1093,6 +1094,7 @@ ENTRY(do_softirq_own_stack) leaveq ret ENDPROC(do_softirq_own_stack) @@ -172,6 +171,3 @@ index e74936c7be48..cb2d1384cb0d 100644 void do_softirq_own_stack(void); #else static inline void do_softirq_own_stack(void) --- -2.20.1 - diff --git a/debian/patches-rt/0122-softirq-Split-softirq-locks.patch b/debian/patches-rt/0122-softirq-Split-softirq-locks.patch index 958219614..43b8435f3 100644 --- a/debian/patches-rt/0122-softirq-Split-softirq-locks.patch +++ b/debian/patches-rt/0122-softirq-Split-softirq-locks.patch @@ -1,8 +1,7 @@ -From 92ea7fb3bb1aa31dbfaa1aa02e6a83b3d5e2e476 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Oct 2012 14:20:47 +0100 -Subject: [PATCH 122/283] softirq: Split softirq locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 122/290] softirq: Split softirq locks +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9708867fdc9d911c92c3b3f7b7d5ff8e08d387a3 The 3.x RT series removed the split softirq implementation in favour of pushing softirq processing into the context of the thread which @@ -178,10 +177,10 @@ index 9984f2b75b73..27c3176d88d2 100644 #define in_task() (!(preempt_count() & \ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) diff --git a/include/linux/sched.h b/include/linux/sched.h -index ad44849fba2e..7ecccccbd358 100644 +index c073a3273beb..baa5fceea0ff 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1190,6 +1190,8 @@ struct task_struct { +@@ -1198,6 +1198,8 @@ struct task_struct { #endif #ifdef CONFIG_PREEMPT_RT_BASE struct rcu_head put_rcu; @@ -190,7 +189,7 @@ index ad44849fba2e..7ecccccbd358 100644 #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; -@@ -1387,6 +1389,7 @@ extern struct pid *cad_pid; +@@ -1395,6 +1397,7 @@ extern struct pid *cad_pid; /* * Per process flags */ @@ -827,6 +826,3 @@ index c217af74dddf..6482945f8ae8 100644 return false; } --- -2.20.1 - diff --git a/debian/patches-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch b/debian/patches-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch index 0ba701a49..373b27112 100644 --- a/debian/patches-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch +++ b/debian/patches-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch @@ -1,8 +1,7 @@ -From 05b2969dc9be8c70c6bac548cf956a1d14d905f5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 16 Jun 2017 19:03:16 +0200 -Subject: [PATCH 123/283] net/core: use local_bh_disable() in netif_rx_ni() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 123/290] net/core: use local_bh_disable() in netif_rx_ni() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ae0f813a827a24ec0fa98781902906f5cc5dbda6 In 2004 netif_rx_ni() gained a preempt_disable() section around netif_rx() and its do_softirq() + testing for it. The do_softirq() part @@ -19,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index 48c4dc728d1b..abaf8a73403b 100644 +index 92e4d559ec93..defca5df6baa 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4526,11 +4526,9 @@ int netif_rx_ni(struct sk_buff *skb) @@ -36,6 +35,3 @@ index 48c4dc728d1b..abaf8a73403b 100644 return err; } --- -2.20.1 - diff --git a/debian/patches-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch b/debian/patches-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch index 262b0c22e..1041ada24 100644 --- a/debian/patches-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch +++ b/debian/patches-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch @@ -1,9 +1,8 @@ -From c1e610ee580a46e958b904e4b2ec3c0d701e08d1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 Jan 2012 13:01:27 +0100 -Subject: [PATCH 124/283] genirq: Allow disabling of softirq processing in irq +Subject: [PATCH 124/290] genirq: Allow disabling of softirq processing in irq thread context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e12715d5618f4eaeff0a52a9635bb2d772ef359d The processing of softirqs in irq thread context is a performance gain for the non-rt workloads of a system, but it's counterproductive for @@ -69,10 +68,10 @@ index c9bffda04a45..73d3146db74d 100644 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index af2a8757abfb..69b4bfd4654c 100644 +index eadcbfbd434a..3858ac895777 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -978,7 +978,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) +@@ -1004,7 +1004,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) atomic_inc(&desc->threads_handled); irq_finalize_oneshot(desc, action); @@ -89,7 +88,7 @@ index af2a8757abfb..69b4bfd4654c 100644 return ret; } -@@ -1488,6 +1496,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) +@@ -1514,6 +1522,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } @@ -156,6 +155,3 @@ index fd89f8ab85ac..3e9333d148ad 100644 int in_serving_softirq(void) { return current->flags & PF_IN_SOFTIRQ; --- -2.20.1 - diff --git a/debian/patches-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch index c2b0b2aa6..2cad3a691 100644 --- a/debian/patches-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch +++ b/debian/patches-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch @@ -1,8 +1,7 @@ -From 9d4b8d35c23cd7cece2662fa17dea28d39f73e82 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jan 2016 16:34:17 +0100 -Subject: [PATCH 125/283] softirq: split timer softirqs out of ksoftirqd -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 125/290] softirq: split timer softirqs out of ksoftirqd +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=99ec74d1868440b1f19c612415c08f5b940d8090 The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with timer wakeup which can not happen in hardirq context. The prio has been @@ -210,6 +209,3 @@ index 3e9333d148ad..fe4e59c80a08 100644 return 0; } early_initcall(spawn_ksoftirqd); --- -2.20.1 - diff --git a/debian/patches-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch b/debian/patches-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch index 5ccc63ea6..ef90286be 100644 --- a/debian/patches-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch +++ b/debian/patches-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch @@ -1,9 +1,8 @@ -From 75bef693f46a9a3fd05cda70554c004df275c92b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 18 Feb 2019 13:19:59 +0100 -Subject: [PATCH 126/283] softirq: Avoid "local_softirq_pending" messages if +Subject: [PATCH 126/290] softirq: Avoid "local_softirq_pending" messages if ksoftirqd is blocked -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=692deaede6b1b2b424faa2d9701e1b6b23fbc381 If the ksoftirqd thread has a softirq pending and is blocked on the `local_softirq_locks' lock then softirq_check_pending_idle() won't @@ -107,6 +106,3 @@ index fe4e59c80a08..1920985eeb09 100644 } if (warnpending) { --- -2.20.1 - diff --git a/debian/patches-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch b/debian/patches-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch index 5158e8a43..7bedfea8b 100644 --- a/debian/patches-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch +++ b/debian/patches-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch @@ -1,9 +1,8 @@ -From fffcebd7dd6c2220fd06db35bffa957b6d4f3de1 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Feb 2019 16:49:29 +0100 -Subject: [PATCH 127/283] softirq: Avoid "local_softirq_pending" messages if +Subject: [PATCH 127/290] softirq: Avoid "local_softirq_pending" messages if task is in cpu_chill() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=35dcb7fcc7ec0b10e0b649f1e1ea2a63a965dac2 If the softirq thread enters cpu_chill() then ->state is UNINTERRUPTIBLE and has no ->pi_blocked_on set and so its mask is not taken into account. @@ -36,6 +35,3 @@ index 1920985eeb09..27a4bb2303d0 100644 /* Clear all bits pending in that task */ *pending &= ~(tsk->softirqs_raised); ret = true; --- -2.20.1 - diff --git a/debian/patches-rt/0128-rtmutex-trylock-is-okay-on-RT.patch b/debian/patches-rt/0128-rtmutex-trylock-is-okay-on-RT.patch index eb4c01b2e..4d32d9b45 100644 --- a/debian/patches-rt/0128-rtmutex-trylock-is-okay-on-RT.patch +++ b/debian/patches-rt/0128-rtmutex-trylock-is-okay-on-RT.patch @@ -1,8 +1,7 @@ -From e35f309a9eb6078f3d09d042b9ebcff7612d9398 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 2 Dec 2015 11:34:07 +0100 -Subject: [PATCH 128/283] rtmutex: trylock is okay on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 128/290] rtmutex: trylock is okay on -RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6844789b0db950fc38370175d40fd017331e8fb7 non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On -RT we don't run softirqs in IRQ context but in thread context so it is @@ -29,6 +28,3 @@ index 9562aaa2afdc..72abe7c121fa 100644 return 0; ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); --- -2.20.1 - diff --git a/debian/patches-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/debian/patches-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch index c75eb6174..9a8c3679f 100644 --- a/debian/patches-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch +++ b/debian/patches-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch @@ -1,8 +1,7 @@ -From 2755280eb692ac3db15304cb7874d039103cf43f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 15 Sep 2016 10:51:27 +0200 -Subject: [PATCH 129/283] fs/nfs: turn rmdir_sem into a semaphore -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 129/290] fs/nfs: turn rmdir_sem into a semaphore +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=40419bc23d3cd7c730310c4a6461dd40f579010a The RW semaphore had a reader side which used the _non_owner version because it most likely took the reader lock in one thread and released it @@ -22,10 +21,10 @@ Signed-off-by: Sebastian Andrzej Siewior 4 files changed, 43 insertions(+), 4 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c -index 8bfaa658b2c1..62afe8ca1e36 100644 +index b8d686087952..17f8a9259971 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c -@@ -1786,7 +1786,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) +@@ -1815,7 +1815,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) trace_nfs_rmdir_enter(dir, dentry); if (d_really_is_positive(dentry)) { @@ -37,7 +36,7 @@ index 8bfaa658b2c1..62afe8ca1e36 100644 error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { -@@ -1796,7 +1800,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) +@@ -1825,7 +1829,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) case -ENOENT: nfs_dentry_handle_enoent(dentry); } @@ -50,10 +49,10 @@ index 8bfaa658b2c1..62afe8ca1e36 100644 error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); trace_nfs_rmdir_exit(dir, dentry, error); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c -index b65aee481d13..110ee6f78c31 100644 +index e4cd3a2fe698..6f22c8d65760 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c -@@ -2103,7 +2103,11 @@ static void init_once(void *foo) +@@ -2104,7 +2104,11 @@ static void init_once(void *foo) atomic_long_set(&nfsi->nrequests, 0); atomic_long_set(&nfsi->commit_info.ncommit, 0); atomic_set(&nfsi->commit_info.rpcs_out, 0); @@ -146,6 +145,3 @@ index a0831e9d19c9..94b6fefd90b0 100644 struct mutex commit_mutex; #if IS_ENABLED(CONFIG_NFS_V4) --- -2.20.1 - diff --git a/debian/patches-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch b/debian/patches-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch index a4c54e174..4138d0ac8 100644 --- a/debian/patches-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch +++ b/debian/patches-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch @@ -1,8 +1,7 @@ -From 7130157f0703b4fe45ca427befd9ef0a7f88612f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 10 Jun 2011 11:04:15 +0200 -Subject: [PATCH 130/283] rtmutex: Handle the various new futex race conditions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 130/290] rtmutex: Handle the various new futex race conditions +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3a9436ea4bd1ca34b63976b763bab4fe93044700 RT opens a few new interesting race conditions in the rtmutex/futex combo due to futex hash bucket lock being a 'sleeping' spinlock and @@ -250,6 +249,3 @@ index d1d62f942be2..f4b6596d224a 100644 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); --- -2.20.1 - diff --git a/debian/patches-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch b/debian/patches-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch index 7e5dd04d3..c38ce3891 100644 --- a/debian/patches-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch +++ b/debian/patches-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch @@ -1,8 +1,7 @@ -From 7e148d447a1c2abcfba79f0d7cefeeb24350ca70 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 131/283] futex: Fix bug on when a requeued RT task times out -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 131/290] futex: Fix bug on when a requeued RT task times out +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=09e670527717c63889ba34375c51c55f408402b2 Requeue with timeout causes a bug with PREEMPT_RT_FULL. @@ -116,6 +115,3 @@ index f4b6596d224a..461527f3f7af 100644 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, --- -2.20.1 - diff --git a/debian/patches-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch index 634b9e044..987324b41 100644 --- a/debian/patches-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch +++ b/debian/patches-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch @@ -1,9 +1,8 @@ -From aee00b9108ae7ac1108f577b3bbdfd0b34709904 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Mar 2013 11:17:42 +0100 -Subject: [PATCH 132/283] futex: Ensure lock/unlock symetry versus pi_lock and +Subject: [PATCH 132/290] futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=653ee648a2ba5f857ec351405fc64f9cf8867b84 In exit_pi_state_list() we have the following locking construct: @@ -45,6 +44,3 @@ index b2a90c66d8f4..4d6501d689b5 100644 put_pi_state(pi_state); continue; } --- -2.20.1 - diff --git a/debian/patches-rt/0133-pid.h-include-atomic.h.patch b/debian/patches-rt/0133-pid.h-include-atomic.h.patch index 7a8b04cf3..9b4b52f2d 100644 --- a/debian/patches-rt/0133-pid.h-include-atomic.h.patch +++ b/debian/patches-rt/0133-pid.h-include-atomic.h.patch @@ -1,8 +1,7 @@ -From 6a4de7464f4d9165d37b0649a2b3e15f4cb64385 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jul 2015 19:43:56 +0300 -Subject: [PATCH 133/283] pid.h: include atomic.h -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 133/290] pid.h: include atomic.h +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6c3b20a2664b9baa69bba26f6743559038199793 This patch fixes build error: CC kernel/pid_namespace.o @@ -38,6 +37,3 @@ index 14a9a39da9c7..a9026a5da196 100644 enum pid_type { --- -2.20.1 - diff --git a/debian/patches-rt/0134-arm-include-definition-for-cpumask_t.patch b/debian/patches-rt/0134-arm-include-definition-for-cpumask_t.patch index 1b034b8ca..75c7752f1 100644 --- a/debian/patches-rt/0134-arm-include-definition-for-cpumask_t.patch +++ b/debian/patches-rt/0134-arm-include-definition-for-cpumask_t.patch @@ -1,8 +1,7 @@ -From 7a6eaa6b88680cbb1309983cc198a08c1bccacfa Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 22 Dec 2016 17:28:33 +0100 -Subject: [PATCH 134/283] arm: include definition for cpumask_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 134/290] arm: include definition for cpumask_t +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=11ec5b0cebd8ba85819ad3cddcf33d643f3513a0 This definition gets pulled in by other files. With the (later) split of RCU and spinlock.h it won't compile anymore. @@ -26,6 +25,3 @@ index 46d41140df27..c421b5b81946 100644 struct irqaction; struct pt_regs; --- -2.20.1 - diff --git a/debian/patches-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch index 7cfcb3560..911b329ae 100644 --- a/debian/patches-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch +++ b/debian/patches-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch @@ -1,9 +1,8 @@ -From 9043ba063f09071ec009efc5a340864f46b5d660 Mon Sep 17 00:00:00 2001 From: "Wolfgang M. Reimer" Date: Tue, 21 Jul 2015 16:20:07 +0200 -Subject: [PATCH 135/283] locking: locktorture: Do NOT include rwlock.h +Subject: [PATCH 135/290] locking: locktorture: Do NOT include rwlock.h directly -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=95a2cfde3e99e4cbf100ec5da1e4897618359af0 Including rwlock.h directly will cause kernel builds to fail if CONFIG_PREEMPT_RT_FULL is defined. The correct header file @@ -29,6 +28,3 @@ index 7d0b0ed74404..a81e6ef33a04 100644 #include #include #include --- -2.20.1 - diff --git a/debian/patches-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch b/debian/patches-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch index 00ae29b57..383ce7b0e 100644 --- a/debian/patches-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch +++ b/debian/patches-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch @@ -1,8 +1,7 @@ -From 471f3ddc1d39e94a3bb7b3de877630a7ba33e9ed Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 9 Jun 2011 11:43:52 +0200 -Subject: [PATCH 136/283] rtmutex: Add rtmutex_lock_killable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 136/290] rtmutex: Add rtmutex_lock_killable() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d10ccdda6815a479f62a22fdbe86b733a8f0fe79 Add "killable" type to rtmutex. We need this since rtmutex are used as "normal" mutexes which do use this type. @@ -55,6 +54,3 @@ index 1c3f56d3d9b6..a4b2af7718f8 100644 /** * rt_mutex_timed_lock - lock a rt_mutex interruptible * the timeout structure is provided --- -2.20.1 - diff --git a/debian/patches-rt/0137-rtmutex-Make-lock_killable-work.patch b/debian/patches-rt/0137-rtmutex-Make-lock_killable-work.patch index c3a8a643c..c25eefaba 100644 --- a/debian/patches-rt/0137-rtmutex-Make-lock_killable-work.patch +++ b/debian/patches-rt/0137-rtmutex-Make-lock_killable-work.patch @@ -1,8 +1,7 @@ -From f8590dc009d4339ab04d322fbc87a267824eb97f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 1 Apr 2017 12:50:59 +0200 -Subject: [PATCH 137/283] rtmutex: Make lock_killable work -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 137/290] rtmutex: Make lock_killable work +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=330b8f36a58113eb3839e507483c81dce194fd83 Locking an rt mutex killable does not work because signal handling is restricted to TASK_INTERRUPTIBLE. @@ -46,6 +45,3 @@ index a4b2af7718f8..f058bb976212 100644 } raw_spin_unlock_irq(&lock->wait_lock); --- -2.20.1 - diff --git a/debian/patches-rt/0138-spinlock-Split-the-lock-types-header.patch b/debian/patches-rt/0138-spinlock-Split-the-lock-types-header.patch index 60860d0e0..cf0032ddd 100644 --- a/debian/patches-rt/0138-spinlock-Split-the-lock-types-header.patch +++ b/debian/patches-rt/0138-spinlock-Split-the-lock-types-header.patch @@ -1,8 +1,7 @@ -From 2211e897694e1560a1b5b5e552c2321e131dacc3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 19:34:01 +0200 -Subject: [PATCH 138/283] spinlock: Split the lock types header -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 138/290] spinlock: Split the lock types header +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=dd9cbf1744910285047662eeb1ed3b5e1455da1c Split raw_spinlock into its own file and the remaining spinlock_t into its own non-RT header. The non-RT header will be replaced later by sleeping @@ -216,6 +215,3 @@ index 000000000000..822bf64a61d3 +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +#endif --- -2.20.1 - diff --git a/debian/patches-rt/0139-rtmutex-Avoid-include-hell.patch b/debian/patches-rt/0139-rtmutex-Avoid-include-hell.patch index 784721eea..cd2337652 100644 --- a/debian/patches-rt/0139-rtmutex-Avoid-include-hell.patch +++ b/debian/patches-rt/0139-rtmutex-Avoid-include-hell.patch @@ -1,8 +1,7 @@ -From eac9084f86148510c6c67ed209d33fcb15fb8b94 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 20:06:39 +0200 -Subject: [PATCH 139/283] rtmutex: Avoid include hell -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 139/290] rtmutex: Avoid include hell +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7fc473caca7282b1300e989d27056cfc2d573a30 Include only the required raw types. This avoids pulling in the complete spinlock header which in turn requires rtmutex.h at some point. @@ -25,6 +24,3 @@ index 81ece6a8291a..a355289b1fa1 100644 extern int max_lock_depth; /* for sysctl */ --- -2.20.1 - diff --git a/debian/patches-rt/0140-rbtree-don-t-include-the-rcu-header.patch b/debian/patches-rt/0140-rbtree-don-t-include-the-rcu-header.patch index 5fd2e6254..eb7bbabe5 100644 --- a/debian/patches-rt/0140-rbtree-don-t-include-the-rcu-header.patch +++ b/debian/patches-rt/0140-rbtree-don-t-include-the-rcu-header.patch @@ -1,11 +1,10 @@ -From ac40e3b4806fe59befb8c9387e7bb317a560cd9a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 16:56:02 +0100 -Subject: [PATCH 140/283] rbtree: don't include the rcu header +Subject: [PATCH 140/290] rbtree: don't include the rcu header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3c9aecc775f1a9c741feded9f8ce1e3c933f16a5 The RCU header pulls in spinlock.h and fails due not yet defined types: @@ -104,7 +103,7 @@ index 000000000000..7066962a4379 + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 87eafcb3312f..b73715c3c3c2 100644 +index 027c58cdbb6e..e6733d7911e9 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -42,6 +42,7 @@ @@ -170,6 +169,3 @@ index 87eafcb3312f..b73715c3c3c2 100644 /** * rcu_swap_protected() - swap an RCU and a regular pointer * @rcu_ptr: RCU pointer --- -2.20.1 - diff --git a/debian/patches-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/debian/patches-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch index 1745d19cb..f279a6031 100644 --- a/debian/patches-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch +++ b/debian/patches-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch @@ -1,8 +1,7 @@ -From 701e93b4c6964c05cec92d9611c50dc83a6b6b9b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 16:14:22 +0200 -Subject: [PATCH 141/283] rtmutex: Provide rt_mutex_slowlock_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 141/290] rtmutex: Provide rt_mutex_slowlock_locked() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=aff3c4fbe35a000662340f80be322f1c594854cf This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt. @@ -140,6 +139,3 @@ index 461527f3f7af..cb9815f0c766 100644 #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" --- -2.20.1 - diff --git a/debian/patches-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch b/debian/patches-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch index 840bf9938..8fa239fe0 100644 --- a/debian/patches-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch +++ b/debian/patches-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch @@ -1,9 +1,8 @@ -From 8bd2090cb448193446dee9a6a7fb661e89263f46 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 16:36:39 +0200 -Subject: [PATCH 142/283] rtmutex: export lockdep-less version of rt_mutex's +Subject: [PATCH 142/290] rtmutex: export lockdep-less version of rt_mutex's lock, trylock and unlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6ada9a3ace4091645571a46308a2fdd39270497f Required for lock implementation ontop of rtmutex. @@ -148,6 +147,3 @@ index cb9815f0c766..5955ad2aa2a8 100644 int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk, --- -2.20.1 - diff --git a/debian/patches-rt/0143-rtmutex-add-sleeping-lock-implementation.patch b/debian/patches-rt/0143-rtmutex-add-sleeping-lock-implementation.patch index 6cf19578b..11ead9b77 100644 --- a/debian/patches-rt/0143-rtmutex-add-sleeping-lock-implementation.patch +++ b/debian/patches-rt/0143-rtmutex-add-sleeping-lock-implementation.patch @@ -1,8 +1,7 @@ -From 966d9bcd54442c2ae5dd15f89a908539d6ad7137 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:11:19 +0200 -Subject: [PATCH 143/283] rtmutex: add sleeping lock implementation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 143/290] rtmutex: add sleeping lock implementation +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7f302e683aa4acd6aa30ca84762059087161208d Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -23,10 +22,10 @@ Signed-off-by: Sebastian Andrzej Siewior create mode 100644 include/linux/spinlock_types_rt.h diff --git a/include/linux/kernel.h b/include/linux/kernel.h -index 3d83ebb302cf..d81a153df451 100644 +index f6f94e54ab96..78f30d553037 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h -@@ -259,6 +259,9 @@ extern int _cond_resched(void); +@@ -260,6 +260,9 @@ extern int _cond_resched(void); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) @@ -36,7 +35,7 @@ index 3d83ebb302cf..d81a153df451 100644 # define sched_annotate_sleep() (current->task_state_change = 0) #else static inline void ___might_sleep(const char *file, int line, -@@ -266,6 +269,7 @@ extern int _cond_resched(void); +@@ -267,6 +270,7 @@ extern int _cond_resched(void); static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) @@ -103,7 +102,7 @@ index a355289b1fa1..138bd1e183e0 100644 * rt_mutex_is_locked - is the mutex locked * @lock: the mutex to be queried diff --git a/include/linux/sched.h b/include/linux/sched.h -index 7ecccccbd358..1797fd3c8cbb 100644 +index baa5fceea0ff..f25c9566ee92 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -134,6 +134,9 @@ struct task_group; @@ -396,7 +395,7 @@ index 000000000000..3e3d8c5f7a9a + +#endif diff --git a/kernel/fork.c b/kernel/fork.c -index 247b08eb66c8..96297e71019c 100644 +index ff7e5983a21c..652986ccb41c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -895,6 +895,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) @@ -1144,7 +1143,7 @@ index 5955ad2aa2a8..6fcf0a3e180d 100644 #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index a8e9283f018c..868d3395c3cf 100644 +index 5723a8966865..654e2ec54f9d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -401,9 +401,15 @@ static bool set_nr_if_polling(struct task_struct *p) @@ -1203,6 +1202,3 @@ index a8e9283f018c..868d3395c3cf 100644 put_task_struct(task); } } --- -2.20.1 - diff --git a/debian/patches-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch b/debian/patches-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch index 8f95d58bb..eff654e01 100644 --- a/debian/patches-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch @@ -1,8 +1,7 @@ -From b589ab9fd5ba3e54255590659660a6c0beecdc4b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:17:03 +0200 -Subject: [PATCH 144/283] rtmutex: add mutex implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 144/290] rtmutex: add mutex implementation based on rtmutex +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=bab10e0b6d5f774e9d4b8745a51b7d9cc64f9eb0 Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -378,6 +377,3 @@ index 000000000000..4f81595c0f52 + return 1; +} +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); --- -2.20.1 - diff --git a/debian/patches-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch b/debian/patches-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch index b99050a48..3958ce9ea 100644 --- a/debian/patches-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch @@ -1,8 +1,7 @@ -From 5b491c8328f76502a2285229fd224ec7116dfed7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:28:34 +0200 -Subject: [PATCH 145/283] rtmutex: add rwsem implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 145/290] rtmutex: add rwsem implementation based on rtmutex +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3056e6baa0fe46629d72199996326a75d58409c6 The RT specific R/W semaphore implementation restricts the number of readers to one because a writer cannot block on multiple readers and inherit its @@ -422,6 +421,3 @@ index 000000000000..7d3c5cf3d23d + /* Release it and account current as reader */ + __up_write_unlock(sem, WRITER_BIAS - 1, flags); +} --- -2.20.1 - diff --git a/debian/patches-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch b/debian/patches-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch index 6414f8c98..22bb73993 100644 --- a/debian/patches-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch @@ -1,8 +1,7 @@ -From 0ac4d3a8d1b2e6effaea2997c80bb3583d3f6a12 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:18:06 +0200 -Subject: [PATCH 146/283] rtmutex: add rwlock implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 146/290] rtmutex: add rwlock implementation based on rtmutex +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=fd1933abf31e4d8b16f98fda611f5ec1cb9b1161 The implementation is bias-based, similar to the rwsem implementation. @@ -577,6 +576,3 @@ index 000000000000..aebb7ce25bc6 + do_rwlock_rt_init(rwlock, name, key); +} +EXPORT_SYMBOL(__rt_rwlock_init); --- -2.20.1 - diff --git a/debian/patches-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch b/debian/patches-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch index 212ce900d..5b4adc2a7 100644 --- a/debian/patches-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch +++ b/debian/patches-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch @@ -1,8 +1,7 @@ -From fa17e8511d2f284ca4d1bebe5744ec1e41ce1803 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 11 Jan 2019 21:16:31 +0100 -Subject: [PATCH 147/283] rtmutex/rwlock: preserve state like a sleeping lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 147/290] rtmutex/rwlock: preserve state like a sleeping lock +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e181b2520df8a6c8ec1db3b847bf2036dd54fbca The rwlock is spinning while acquiring a lock. Therefore it must become a sleeping lock on RT and preserve its task state while sleeping and @@ -28,6 +27,3 @@ index aebb7ce25bc6..8f90afe111ce 100644 rt_spin_lock_slowlock_locked(m, &waiter, flags); /* * The slowlock() above is guaranteed to return with the rtmutex is --- -2.20.1 - diff --git a/debian/patches-rt/0148-rtmutex-wire-up-RT-s-locking.patch b/debian/patches-rt/0148-rtmutex-wire-up-RT-s-locking.patch index 92329cb0e..4ca016e0e 100644 --- a/debian/patches-rt/0148-rtmutex-wire-up-RT-s-locking.patch +++ b/debian/patches-rt/0148-rtmutex-wire-up-RT-s-locking.patch @@ -1,8 +1,7 @@ -From c3de22b64c9e40a638fd521cbfb87285e475d2ee Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:31:14 +0200 -Subject: [PATCH 148/283] rtmutex: wire up RT's locking -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 148/290] rtmutex: wire up RT's locking +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=124dab5838407b56b59161de8a01d98181dfe729 Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -266,6 +265,3 @@ index 9aa0fccd5d43..76d0b40d9193 100644 } + +#endif --- -2.20.1 - diff --git a/debian/patches-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/debian/patches-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch index d0b52e0a8..4f2bbd319 100644 --- a/debian/patches-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch +++ b/debian/patches-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch @@ -1,8 +1,7 @@ -From a0132c114d821b81d4e4104b797f76f0dd8fbcba Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 12 Oct 2017 17:34:38 +0200 -Subject: [PATCH 149/283] rtmutex: add ww_mutex addon for mutex-rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 149/290] rtmutex: add ww_mutex addon for mutex-rt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b5727916bc6ad502e7bdfa2073c4d24b1a29d482 Signed-off-by: Sebastian Andrzej Siewior --- @@ -440,6 +439,3 @@ index 7d3c5cf3d23d..660e22caf709 100644 /* * The slowlock() above is guaranteed to return with the rtmutex (for * ret = 0) is now held, so there can't be a writer active. Increment --- -2.20.1 - diff --git a/debian/patches-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch b/debian/patches-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch index 6a0d52fce..4569d2094 100644 --- a/debian/patches-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch +++ b/debian/patches-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch @@ -1,8 +1,7 @@ -From b0a0152ae5fe9de88c2b10c9e213ab1d10459876 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 14:58:57 +0200 -Subject: [PATCH 150/283] kconfig: Add PREEMPT_RT_FULL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 150/290] kconfig: Add PREEMPT_RT_FULL +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9cf79c789681c52a2ec521c420c9e50e9b6a2a19 Introduce the final symbol for PREEMPT_RT_FULL. @@ -17,7 +16,7 @@ diff --git a/init/Makefile b/init/Makefile index a3e5ce2bcf08..7779232563ae 100644 --- a/init/Makefile +++ b/init/Makefile -@@ -34,4 +34,4 @@ silent_chk_compile.h = : +@@ -34,4 +34,4 @@ $(obj)/version.o: include/generated/compile.h include/generated/compile.h: FORCE @$($(quiet)chk_compile.h) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ @@ -64,6 +63,3 @@ index 87f1fc9801d7..f67b15236936 100755 UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length --- -2.20.1 - diff --git a/debian/patches-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/debian/patches-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch index 1d1503f65..86b842cf9 100644 --- a/debian/patches-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch +++ b/debian/patches-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch @@ -1,9 +1,8 @@ -From bb6f7fcdf67a3a410b786714d43f24dde243ff4e Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 13 Nov 2017 12:56:53 -0500 -Subject: [PATCH 151/283] locking/rt-mutex: fix deadlock in device mapper / +Subject: [PATCH 151/290] locking/rt-mutex: fix deadlock in device mapper / block-IO -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=982f936d6c159820d6df7fee5e09b0c072472bc3 When some block device driver creates a bio and submits it to another block device driver, the bio is added to current->bio_list (in order to @@ -76,6 +75,3 @@ index 1f2dc2dfe2e7..b38c3a92dce8 100644 return slowfn(lock, state, timeout, chwalk, ww_ctx); } --- -2.20.1 - diff --git a/debian/patches-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch b/debian/patches-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch index 056cac181..8d9920b40 100644 --- a/debian/patches-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch +++ b/debian/patches-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch @@ -1,8 +1,7 @@ -From 8b1428c9761e45fabdb70309947582526e3bbfbc Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 4 Jan 2019 15:33:21 -0500 -Subject: [PATCH 152/283] locking/rt-mutex: Flush block plug on __down_read() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 152/290] locking/rt-mutex: Flush block plug on __down_read() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=5d45bce4eea516a2d4626f2611c57c7f5cdd89cc __down_read() bypasses the rtmutex frontend to call rt_mutex_slowlock_locked() directly, and thus it needs to call @@ -41,6 +40,3 @@ index 660e22caf709..f518495bd6cc 100644 might_sleep(); raw_spin_lock_irq(&m->wait_lock); --- -2.20.1 - diff --git a/debian/patches-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch b/debian/patches-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch index 7ba88ce85..21fe21930 100644 --- a/debian/patches-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch +++ b/debian/patches-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch @@ -1,9 +1,8 @@ -From 232cc4794d35bf36a49924022ebd18edb13c8a5d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 16 Nov 2017 16:48:48 +0100 -Subject: [PATCH 153/283] locking/rtmutex: re-init the wait_lock in +Subject: [PATCH 153/290] locking/rtmutex: re-init the wait_lock in rt_mutex_init_proxy_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=361d72a145de18a02d4072fead2642a386755146 We could provide a key-class for the lockdep (and fixup all callers) or move the init to all callers (like it was) in order to avoid lockdep @@ -34,6 +33,3 @@ index b38c3a92dce8..94788662b2f2 100644 debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); } --- -2.20.1 - diff --git a/debian/patches-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch index 44f772d75..e5a62779a 100644 --- a/debian/patches-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ b/debian/patches-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -1,8 +1,7 @@ -From b031672801c6d24d255cc3d458e9a546899c919b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 18:21:04 +0200 -Subject: [PATCH 154/283] ptrace: fix ptrace vs tasklist_lock race -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 154/290] ptrace: fix ptrace vs tasklist_lock race +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=dc2b806b1905e1f7f8538a75cb22fbef4d319f7f As explained by Alexander Fyodorov : @@ -31,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior 3 files changed, 68 insertions(+), 7 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 1797fd3c8cbb..25e9a40f9576 100644 +index f25c9566ee92..32a9ca5dff59 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -101,12 +101,8 @@ struct task_group; @@ -47,7 +46,7 @@ index 1797fd3c8cbb..25e9a40f9576 100644 #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0 && \ (task->state & TASK_NOLOAD) == 0) -@@ -1709,6 +1705,51 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) +@@ -1717,6 +1713,51 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -120,7 +119,7 @@ index fed682a01a75..ace2839323de 100644 } spin_unlock_irq(&task->sighand->siglock); diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 868d3395c3cf..b2149a7ed3cd 100644 +index 654e2ec54f9d..fc2826788d16 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1349,6 +1349,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, @@ -161,6 +160,3 @@ index 868d3395c3cf..b2149a7ed3cd 100644 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); --- -2.20.1 - diff --git a/debian/patches-rt/0155-rtmutex-annotate-sleeping-lock-context.patch b/debian/patches-rt/0155-rtmutex-annotate-sleeping-lock-context.patch index 415bb9e6d..2d4e134ea 100644 --- a/debian/patches-rt/0155-rtmutex-annotate-sleeping-lock-context.patch +++ b/debian/patches-rt/0155-rtmutex-annotate-sleeping-lock-context.patch @@ -1,8 +1,7 @@ -From f9ae4924361c91598ce03771aaa5f99c4bfa8100 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 21 Sep 2017 14:25:13 +0200 -Subject: [PATCH 155/283] rtmutex: annotate sleeping lock context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 155/290] rtmutex: annotate sleeping lock context +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1dc416b3e49814a54ed09b9248b6ec3183db9f8e The RCU code complains on schedule() within a rcu_readlock() section. The valid scenario on -RT is if a sleeping is held. In order to suppress @@ -54,7 +53,7 @@ index 27c3176d88d2..9eafc34898b4 100644 #define migrate_disable() barrier() #define migrate_enable() barrier() diff --git a/include/linux/sched.h b/include/linux/sched.h -index 25e9a40f9576..8f0bb5f6d39e 100644 +index 32a9ca5dff59..c5f050afff18 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -673,6 +673,15 @@ struct task_struct { @@ -73,7 +72,7 @@ index 25e9a40f9576..8f0bb5f6d39e 100644 #endif #ifdef CONFIG_PREEMPT_RCU -@@ -1802,6 +1811,23 @@ static __always_inline bool need_resched(void) +@@ -1810,6 +1819,23 @@ static __always_inline bool need_resched(void) return unlikely(tif_need_resched()); } @@ -250,10 +249,10 @@ index a97c20ea9bce..564e3927e7b0 100644 !t->rcu_read_unlock_special.b.blocked) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index b2149a7ed3cd..ce1cb23cb78f 100644 +index fc2826788d16..5fa7e1b544d2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7316,4 +7316,49 @@ void migrate_enable(void) +@@ -7353,4 +7353,49 @@ void migrate_enable(void) preempt_enable(); } EXPORT_SYMBOL(migrate_enable); @@ -303,6 +302,3 @@ index b2149a7ed3cd..ce1cb23cb78f 100644 +} +EXPORT_SYMBOL(migrate_enable); #endif --- -2.20.1 - diff --git a/debian/patches-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch b/debian/patches-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch index 23739a68a..62ed08195 100644 --- a/debian/patches-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch +++ b/debian/patches-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch @@ -1,9 +1,8 @@ -From 55652e517f87d321623cda451d7bfbcdc55528d9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 5 Jul 2018 14:44:51 +0200 -Subject: [PATCH 156/283] sched/migrate_disable: fallback to preempt_disable() +Subject: [PATCH 156/290] sched/migrate_disable: fallback to preempt_disable() instead barrier() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a4408a9fddd06393e70db9e3e61ae63973d1d257 On SMP + !RT migrate_disable() is still around. It is not part of spin_lock() anymore so it has almost no users. However the futex code has a workaround for @@ -71,7 +70,7 @@ index 9eafc34898b4..ed8413e7140f 100644 { return 0; diff --git a/include/linux/sched.h b/include/linux/sched.h -index 8f0bb5f6d39e..a023e1ba5d8f 100644 +index c5f050afff18..53d92153700b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -667,7 +667,7 @@ struct task_struct { @@ -94,7 +93,7 @@ index 8f0bb5f6d39e..a023e1ba5d8f 100644 # endif #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index ce1cb23cb78f..36f791ff52bc 100644 +index 5fa7e1b544d2..ed44ed8215dd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1031,7 +1031,7 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma @@ -115,7 +114,7 @@ index ce1cb23cb78f..36f791ff52bc 100644 if (__migrate_disabled(p)) { lockdep_assert_held(&p->pi_lock); -@@ -1144,7 +1144,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -1145,7 +1145,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) goto out; @@ -124,7 +123,7 @@ index ce1cb23cb78f..36f791ff52bc 100644 if (__migrate_disabled(p)) { p->migrate_disable_update = 1; goto out; -@@ -7172,7 +7172,7 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7209,7 +7209,7 @@ const u32 sched_prio_to_wmult[40] = { #undef CREATE_TRACE_POINTS @@ -133,7 +132,7 @@ index ce1cb23cb78f..36f791ff52bc 100644 static inline void update_nr_migratory(struct task_struct *p, long delta) -@@ -7320,45 +7320,44 @@ EXPORT_SYMBOL(migrate_enable); +@@ -7357,45 +7357,44 @@ EXPORT_SYMBOL(migrate_enable); #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) void migrate_disable(void) { @@ -199,6 +198,3 @@ index 5027158d3908..dd6c364d6f01 100644 P(migrate_disable); #endif P(nr_cpus_allowed); --- -2.20.1 - diff --git a/debian/patches-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/debian/patches-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch index a52981500..03a952366 100644 --- a/debian/patches-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch +++ b/debian/patches-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch @@ -1,9 +1,8 @@ -From 86ad5f1aff0d9992886c176a2f136ef494754c55 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 4 Aug 2017 17:40:42 +0200 -Subject: [PATCH 157/283] locking: don't check for __LINUX_SPINLOCK_TYPES_H on +Subject: [PATCH 157/290] locking: don't check for __LINUX_SPINLOCK_TYPES_H on -RT archs -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ca5186cf254ac598302e902572f9ec6e5208101e Upstream uses arch_spinlock_t within spinlock_t and requests that spinlock_types.h header file is included first. @@ -177,6 +176,3 @@ index c09b6407ae1b..b0243ba07fb7 100644 /* * include/linux/spinlock_types_up.h - spinlock type definitions for UP * --- -2.20.1 - diff --git a/debian/patches-rt/0158-rcu-Frob-softirq-test.patch b/debian/patches-rt/0158-rcu-Frob-softirq-test.patch index d984dfbbc..cfbeb0436 100644 --- a/debian/patches-rt/0158-rcu-Frob-softirq-test.patch +++ b/debian/patches-rt/0158-rcu-Frob-softirq-test.patch @@ -1,8 +1,7 @@ -From 3b403e90bb2186f5908b365157b777ed870de348 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 13 Aug 2011 00:23:17 +0200 -Subject: [PATCH 158/283] rcu: Frob softirq test -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 158/290] rcu: Frob softirq test +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8a9338244bc09a7c6c2f6397d445fd1fac3f73ad With RT_FULL we get the below wreckage: @@ -168,6 +167,3 @@ index 564e3927e7b0..429a2f144e19 100644 lockdep_rcu_suspicious(__FILE__, __LINE__, "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", --- -2.20.1 - diff --git a/debian/patches-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch b/debian/patches-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch index 753b98bb4..6bcd61696 100644 --- a/debian/patches-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch +++ b/debian/patches-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch @@ -1,8 +1,7 @@ -From 324e697209ed33e734f64df4cf0ddf60658de5ff Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Oct 2011 11:59:38 -0700 -Subject: [PATCH 159/283] rcu: Merge RCU-bh into RCU-preempt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 159/290] rcu: Merge RCU-bh into RCU-preempt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=0cde6b9c8eb03c12c6386be0e528677d3fea03ad The Linux kernel has long RCU-bh read-side critical sections that intolerably increase scheduling latency under mainline's RCU-bh rules, @@ -35,7 +34,7 @@ Signed-off-by: Thomas Gleixner 7 files changed, 73 insertions(+), 2 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index b73715c3c3c2..241a4a9577a0 100644 +index e6733d7911e9..08d64e5713fc 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -56,7 +56,11 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); @@ -346,6 +345,3 @@ index 81688a133552..6ffafb1b1584 100644 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ --- -2.20.1 - diff --git a/debian/patches-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch b/debian/patches-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch index 5eaa323c3..cf9aabc6d 100644 --- a/debian/patches-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch +++ b/debian/patches-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch @@ -1,8 +1,7 @@ -From 00cbdab312799cf3648a917434e3178644f44f07 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 5 Oct 2011 11:45:18 -0700 -Subject: [PATCH 160/283] rcu: Make ksoftirqd do RCU quiescent states -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 160/290] rcu: Make ksoftirqd do RCU quiescent states +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c601f950fa972271a42584bc6a375e57b9de7ced Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable to network-based denial-of-service attacks. This patch therefore @@ -112,6 +111,3 @@ index 429a2f144e19..bee9bffeb0ce 100644 /* * Prepare a CPU for idle from an RCU perspective. The first major task --- -2.20.1 - diff --git a/debian/patches-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch index 2a739d26c..c5b910185 100644 --- a/debian/patches-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch +++ b/debian/patches-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch @@ -1,8 +1,7 @@ -From e270d045cc4c717e9781c7e4f0d351b63e61858a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 4 Nov 2013 13:21:10 -0800 -Subject: [PATCH 161/283] rcu: Eliminate softirq processing from rcutree -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 161/290] rcu: Eliminate softirq processing from rcutree +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3849b64ca740e816ba3e9c8c188225a500d906ce Running RCU out of softirq is a problem for some workloads that would like to manage RCU core processing independently of other softirq work, @@ -418,6 +417,3 @@ index bee9bffeb0ce..2e8737f1010f 100644 static bool rcu_is_callbacks_kthread(void) { return false; --- -2.20.1 - diff --git a/debian/patches-rt/0162-srcu-use-cpu_online-instead-custom-check.patch b/debian/patches-rt/0162-srcu-use-cpu_online-instead-custom-check.patch index b5fe9a7c0..c41baf6d0 100644 --- a/debian/patches-rt/0162-srcu-use-cpu_online-instead-custom-check.patch +++ b/debian/patches-rt/0162-srcu-use-cpu_online-instead-custom-check.patch @@ -1,8 +1,7 @@ -From ceae4b480cbafa469d9bfb43e2916b8cd4092ec6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 14:43:41 +0200 -Subject: [PATCH 162/283] srcu: use cpu_online() instead custom check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 162/290] srcu: use cpu_online() instead custom check +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f8e14a2b6827eff60c356df5338a5742202fa52e The current check via srcu_online is slightly racy because after looking at srcu_online there could be an interrupt that interrupted us long @@ -91,6 +90,3 @@ index ae716ca783bc..f162a4f54b05 100644 return 0; } --- -2.20.1 - diff --git a/debian/patches-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch b/debian/patches-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch index 9b2c6c7a3..8d9dcf045 100644 --- a/debian/patches-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch +++ b/debian/patches-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch @@ -1,8 +1,7 @@ -From c5179e3d05fb9c649a24846aec42fd142baaba46 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 12 Oct 2017 18:37:12 +0200 -Subject: [PATCH 163/283] srcu: replace local_irqsave() with a locallock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 163/290] srcu: replace local_irqsave() with a locallock +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=4223fcff7c7a145c27b6e917d8113a340b4f0fe7 There are two instances which disable interrupts in order to become a stable this_cpu_ptr() pointer. The restore part is coupled with @@ -72,6 +71,3 @@ index df0375453ba1..0f09a1a9e17c 100644 if (needgp) srcu_funnel_gp_start(sp, sdp, s, do_norm); else if (needexp) --- -2.20.1 - diff --git a/debian/patches-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/debian/patches-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch index 328708bd7..a7c6e9329 100644 --- a/debian/patches-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch +++ b/debian/patches-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch @@ -1,8 +1,7 @@ -From 279a404efccb555b0b9cb897445eb06bab8efe14 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Wed, 12 Oct 2016 11:21:14 -0500 -Subject: [PATCH 164/283] rcu: enable rcu_normal_after_boot by default for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 164/290] rcu: enable rcu_normal_after_boot by default for RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=eb935a53cab85b5354d0dea339bdc8c6aacff1ea The forcing of an expedited grace period is an expensive and very RT-application unfriendly operation, as it forcibly preempts all running @@ -32,6 +31,3 @@ index 6ffafb1b1584..16d8dba23329 100644 module_param(rcu_normal_after_boot, int, 0); #endif /* #ifndef CONFIG_TINY_RCU */ --- -2.20.1 - diff --git a/debian/patches-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch b/debian/patches-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch index d627b6dd6..d0f26d53c 100644 --- a/debian/patches-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch +++ b/debian/patches-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch @@ -1,8 +1,7 @@ -From e88697f5f31d546848145184386ec739cc7aabfd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 28 Jul 2011 13:32:57 +0200 -Subject: [PATCH 165/283] tty/serial/omap: Make the locking RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 165/290] tty/serial/omap: Make the locking RT aware +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c1aa166b3cda7c4fa842730ba6380b6aefde743f The lock is a sleeping lock and local_irq_save() is not the optimsation we are looking for. Redo it to make it work on -RT and @@ -44,6 +43,3 @@ index 6420ae581a80..0f4f41ed9ffa 100644 } static int __init --- -2.20.1 - diff --git a/debian/patches-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch b/debian/patches-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch index 8ac8135a2..e8c33d261 100644 --- a/debian/patches-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch +++ b/debian/patches-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch @@ -1,8 +1,7 @@ -From 5d5a72172d1b6447861b7359fb2fa8e4ea1e707c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 8 Jan 2013 21:36:51 +0100 -Subject: [PATCH 166/283] tty/serial/pl011: Make the locking work on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 166/290] tty/serial/pl011: Make the locking work on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8439d05feecf238ad757c9a011a3dc5efcc313a1 The lock is a sleeping lock and local_irq_save() is not the optimsation we are looking for. Redo it to make it work on -RT and non-RT. @@ -49,6 +48,3 @@ index 89ade213a1a9..6be86f8c7e6a 100644 clk_disable(uap->clk); } --- -2.20.1 - diff --git a/debian/patches-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch b/debian/patches-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch index 909cc7309..6b5f6d19b 100644 --- a/debian/patches-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch +++ b/debian/patches-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch @@ -1,12 +1,11 @@ -From 1d9fb6ef0391f5e2ee11e6ae76fbdad192da7f6a Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Mon, 24 Sep 2018 10:29:01 +0200 -Subject: [PATCH 167/283] tty: serial: pl011: explicitly initialize the flags +Subject: [PATCH 167/290] tty: serial: pl011: explicitly initialize the flags variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f62463ebdcecc357660298ab74a11df27df0adc3 Silence the following gcc warning: @@ -40,6 +39,3 @@ index 6be86f8c7e6a..59b4ab7b50bf 100644 int locked = 1; clk_enable(uap->clk); --- -2.20.1 - diff --git a/debian/patches-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch b/debian/patches-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch index 6a0d274d5..829081a51 100644 --- a/debian/patches-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch +++ b/debian/patches-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch @@ -1,11 +1,10 @@ -From 1e7abe5e3aee5c132a0d65d39713c6f4cb67be9c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 14 Dec 2011 13:05:54 +0100 -Subject: [PATCH 168/283] rt: Improve the serial console PASS_LIMIT +Subject: [PATCH 168/290] rt: Improve the serial console PASS_LIMIT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6cb957cb6dd7593e888563c6d3dc8ddfc96d1179 Beyond the warning: @@ -42,6 +41,3 @@ index 8fe3d0ed229e..a2baac4c8b63 100644 #include /* --- -2.20.1 - diff --git a/debian/patches-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/debian/patches-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch index 2b22a2184..0fca93a21 100644 --- a/debian/patches-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch +++ b/debian/patches-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch @@ -1,8 +1,7 @@ -From 872de798df0e839bfdfdaebdde0eae1b50de4614 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Apr 2016 16:55:02 +0200 -Subject: [PATCH 169/283] tty: serial: 8250: don't take the trylock during oops -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 169/290] tty: serial: 8250: don't take the trylock during oops +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ab14019d06506af1681a118b437e278ede26523e An oops with irqs off (panic() from irqsafe hrtimer like the watchdog timer) will lead to a lockdep warning on each invocation and as such @@ -15,10 +14,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index e26d87b6ffc5..8d85448975d3 100644 +index aa4de6907f77..6b1d46c1df3b 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c -@@ -3238,10 +3238,8 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3239,10 +3239,8 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, serial8250_rpm_get(up); @@ -30,6 +29,3 @@ index e26d87b6ffc5..8d85448975d3 100644 else spin_lock_irqsave(&port->lock, flags); --- -2.20.1 - diff --git a/debian/patches-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch b/debian/patches-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch index 1a94ff29c..36016f47f 100644 --- a/debian/patches-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch +++ b/debian/patches-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch @@ -1,8 +1,7 @@ -From e1d3a8c231f929b0069ddc2dc3059bab193e9d00 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Nov 2016 16:29:32 +0100 -Subject: [PATCH 170/283] locking/percpu-rwsem: Remove preempt_disable variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 170/290] locking/percpu-rwsem: Remove preempt_disable variants +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=bd2407192fdb6b4ab9958ae8e7df76b9b0b1ca3f Effective revert commit: @@ -221,6 +220,3 @@ index 79b99d653e03..fb44e237316d 100644 extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); --- -2.20.1 - diff --git a/debian/patches-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch b/debian/patches-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch index 52fb63eaa..fe7150ccd 100644 --- a/debian/patches-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch +++ b/debian/patches-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch @@ -1,9 +1,8 @@ -From 3fe311d6ea2109a5390bffc6870dafee03cab931 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Tue, 15 May 2012 13:53:56 +0800 -Subject: [PATCH 171/283] mm: Protect activate_mm() by +Subject: [PATCH 171/290] mm: Protect activate_mm() by preempt_[disable&enable]_rt() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=92aa811a04477ea039cc3410d3995550d7701de0 User preempt_*_rt instead of local_irq_*_rt or otherwise there will be warning on ARM like below: @@ -38,7 +37,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 4 insertions(+) diff --git a/fs/exec.c b/fs/exec.c -index 433b1257694a..352c1a6fa6a9 100644 +index 561ea64829ec..0d95c6349fb1 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1028,12 +1028,14 @@ static int exec_mmap(struct mm_struct *mm) @@ -76,6 +75,3 @@ index 3e612ae748e9..d0ccc070979f 100644 task_unlock(tsk); #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); --- -2.20.1 - diff --git a/debian/patches-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch b/debian/patches-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch index ef4f2178f..2621dcb06 100644 --- a/debian/patches-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch +++ b/debian/patches-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch @@ -1,9 +1,8 @@ -From a82c4acdfe13556ab98571ef02f3a3eb9d29dd6e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 12:32:34 +0200 -Subject: [PATCH 172/283] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD +Subject: [PATCH 172/290] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD init -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e3be20e797ac5235e6dea292095b4965758e5fae Commit 3d375d78593c ("mm: update callers to use HASH_ZERO flag") removed INIT_HLIST_BL_HEAD and uses the ZERO flag instead for the init. However @@ -56,6 +55,3 @@ index 6e0022326afe..10225a9135fb 100644 d_hash_shift = 32 - d_hash_shift; } --- -2.20.1 - diff --git a/debian/patches-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/debian/patches-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch index 7e414a08c..7a3990617 100644 --- a/debian/patches-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch +++ b/debian/patches-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch @@ -1,9 +1,8 @@ -From 09acbcc860c548ba124fb4402beed59790f49218 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 20 Oct 2017 11:29:53 +0200 -Subject: [PATCH 173/283] fs/dcache: disable preemption on i_dir_seq's write +Subject: [PATCH 173/290] fs/dcache: disable preemption on i_dir_seq's write side -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b76c5592f74b41c5b7b2bca53c5276466618cc91 i_dir_seq is an opencoded seqcounter. Based on the code it looks like we could have two writers in parallel despite the fact that the d_lock is @@ -20,9 +19,8 @@ Signed-off-by: Sebastian Andrzej Siewior --- fs/dcache.c | 12 +++++++----- fs/inode.c | 2 +- - fs/libfs.c | 6 ++++-- include/linux/fs.h | 2 +- - 4 files changed, 13 insertions(+), 9 deletions(-) + 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/fs/dcache.c b/fs/dcache.c index 10225a9135fb..dcde8ffe384c 100644 @@ -82,40 +80,8 @@ index 5c63693326bb..c3e17dcbb558 100644 inode->i_rdev = 0; inode->dirtied_when = 0; -diff --git a/fs/libfs.c b/fs/libfs.c -index 0fb590d79f30..cd95874a1952 100644 ---- a/fs/libfs.c -+++ b/fs/libfs.c -@@ -90,7 +90,7 @@ static struct dentry *next_positive(struct dentry *parent, - struct list_head *from, - int count) - { -- unsigned *seq = &parent->d_inode->i_dir_seq, n; -+ unsigned *seq = &parent->d_inode->__i_dir_seq, n; - struct dentry *res; - struct list_head *p; - bool skipped; -@@ -123,8 +123,9 @@ static struct dentry *next_positive(struct dentry *parent, - static void move_cursor(struct dentry *cursor, struct list_head *after) - { - struct dentry *parent = cursor->d_parent; -- unsigned n, *seq = &parent->d_inode->i_dir_seq; -+ unsigned n, *seq = &parent->d_inode->__i_dir_seq; - spin_lock(&parent->d_lock); -+ preempt_disable_rt(); - for (;;) { - n = *seq; - if (!(n & 1) && cmpxchg(seq, n, n + 1) == n) -@@ -137,6 +138,7 @@ static void move_cursor(struct dentry *cursor, struct list_head *after) - else - list_add_tail(&cursor->d_child, &parent->d_subdirs); - smp_store_release(seq, n + 2); -+ preempt_enable_rt(); - spin_unlock(&parent->d_lock); - } - diff --git a/include/linux/fs.h b/include/linux/fs.h -index d4e1b43a53c3..72749feed0e3 100644 +index 92420009b9bc..9b2b707e9112 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -678,7 +678,7 @@ struct inode { @@ -127,6 +93,3 @@ index d4e1b43a53c3..72749feed0e3 100644 }; __u32 i_generation; --- -2.20.1 - diff --git a/debian/patches-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch b/debian/patches-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch index 3c1673364..58b74a056 100644 --- a/debian/patches-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch +++ b/debian/patches-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch @@ -1,9 +1,8 @@ -From d1843e5a615442fd804b363f5593ddbf58bc6688 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Mon, 7 May 2018 08:58:57 -0500 -Subject: [PATCH 174/283] squashfs: make use of local lock in multi_cpu +Subject: [PATCH 174/290] squashfs: make use of local lock in multi_cpu decompressor -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d33cd90abebb1208dcdad47bfaae55dfe6210384 Currently, the squashfs multi_cpu decompressor makes use of get_cpu_ptr()/put_cpu_ptr(), which unconditionally disable preemption @@ -67,6 +66,3 @@ index 23a9c28ad8ea..6a73c4fa88e7 100644 if (res < 0) ERROR("%s decompression failed, data probably corrupt\n", --- -2.20.1 - diff --git a/debian/patches-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch index afc218fb8..3f386f94a 100644 --- a/debian/patches-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch +++ b/debian/patches-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch @@ -1,8 +1,7 @@ -From 4f73daf6f6c5ecfca2198293c10701d2d777a451 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Tue, 17 Feb 2015 09:37:44 +0100 -Subject: [PATCH 175/283] thermal: Defer thermal wakups to threads -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 175/290] thermal: Defer thermal wakups to threads +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d72bb8e181f96a5083272f0f0a81398734754a1e On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will call schedule while we run in irq context. @@ -132,6 +131,3 @@ index 1ef937d799e4..a5991cbb408f 100644 } module_exit(pkg_temp_thermal_exit) --- -2.20.1 - diff --git a/debian/patches-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch b/debian/patches-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch index a5f584cec..740840542 100644 --- a/debian/patches-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch +++ b/debian/patches-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch @@ -1,8 +1,7 @@ -From c3a958f7c3d63e98096d5c662e8346b5fc0fafec Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 15:10:33 +0100 -Subject: [PATCH 176/283] x86/fpu: Disable preemption around local_bh_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 176/290] x86/fpu: Disable preemption around local_bh_disable() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b7106c9d220d744e3fe1c149b187b48876a9e5f4 __fpu__restore_sig() restores the content of the FPU state in the CPUs and in order to avoid concurency it disbles BH. On !RT it also disables @@ -33,6 +32,3 @@ index d99a8ee9e185..5e0274a94133 100644 return err; } else { --- -2.20.1 - diff --git a/debian/patches-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch b/debian/patches-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch index 5e4fb4487..1fc9dcfba 100644 --- a/debian/patches-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch +++ b/debian/patches-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch @@ -1,8 +1,7 @@ -From 2bc1814482c92194daf20b98841321870709e4dd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Jul 2011 16:35:35 +0200 -Subject: [PATCH 177/283] fs/epoll: Do not disable preemption on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 177/290] fs/epoll: Do not disable preemption on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=723ebe37abb09e4f0d3f367217307266d12f04da ep_call_nested() takes a sleeping lock so we can't disable preemption. The light version is enough since ep_call_nested() doesn't mind beeing @@ -32,6 +31,3 @@ index 58f48ea0db23..a41120a34e6d 100644 } #else --- -2.20.1 - diff --git a/debian/patches-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch b/debian/patches-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch index bfd85a176..76bbc5dc4 100644 --- a/debian/patches-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch +++ b/debian/patches-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch @@ -1,9 +1,8 @@ -From 974d73cf26c5d957984cd6078582894a25a0600a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 11:39:36 +0200 -Subject: [PATCH 178/283] mm/vmalloc: Another preempt disable region which +Subject: [PATCH 178/290] mm/vmalloc: Another preempt disable region which sucks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=984f0aea9084e80c662e605b8683389dd685232d Avoid the preempt disable version of get_cpu_var(). The inner-lock should provide enough serialisation. @@ -14,7 +13,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c -index a46ec261a44e..5c6939cc28b7 100644 +index d8e877365f9f..9b7cf993cada 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -852,7 +852,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) @@ -68,6 +67,3 @@ index a46ec261a44e..5c6939cc28b7 100644 rcu_read_unlock(); /* Allocate new block if nothing was found */ --- -2.20.1 - diff --git a/debian/patches-rt/0179-block-mq-use-cpu_light.patch b/debian/patches-rt/0179-block-mq-use-cpu_light.patch index e6b440faa..f788e80c3 100644 --- a/debian/patches-rt/0179-block-mq-use-cpu_light.patch +++ b/debian/patches-rt/0179-block-mq-use-cpu_light.patch @@ -1,8 +1,7 @@ -From a8a7839dcded1152098641824de0792247d0af82 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 9 Apr 2014 10:37:23 +0200 -Subject: [PATCH 179/283] block: mq: use cpu_light() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 179/290] block: mq: use cpu_light() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=89d6f7956e2046a5b49e51aa3f200b64023fb8aa there is a might sleep splat because get_cpu() disables preemption and later we grab a lock. As a workaround for this we use get_cpu_light(). @@ -13,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.h b/block/blk-mq.h -index 9497b47e2526..e55c8599b90b 100644 +index 5ad9251627f8..5a96c97991b6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -113,12 +113,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, @@ -31,6 +30,3 @@ index 9497b47e2526..e55c8599b90b 100644 } struct blk_mq_alloc_data { --- -2.20.1 - diff --git a/debian/patches-rt/0180-block-mq-do-not-invoke-preempt_disable.patch b/debian/patches-rt/0180-block-mq-do-not-invoke-preempt_disable.patch index 957822be2..e6023a0a7 100644 --- a/debian/patches-rt/0180-block-mq-do-not-invoke-preempt_disable.patch +++ b/debian/patches-rt/0180-block-mq-do-not-invoke-preempt_disable.patch @@ -1,8 +1,7 @@ -From f981eb8d8253e9496a5551600aa89d6ac82f153d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 180/283] block/mq: do not invoke preempt_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 180/290] block/mq: do not invoke preempt_disable() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ac4e6d6f8a4fc0bb512302adf00e280a5a4ec275 preempt_disable() and get_cpu() don't play well together with the sleeping locks it tries to allocate later. @@ -14,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c -index e3e7a88e03a6..b07332cab8ff 100644 +index 4aa3284874f6..376fb90de054 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -570,7 +570,7 @@ static void __blk_mq_complete_request(struct request *rq) @@ -35,7 +34,7 @@ index e3e7a88e03a6..b07332cab8ff 100644 } static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) -@@ -1368,14 +1368,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, +@@ -1371,14 +1371,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { @@ -53,6 +52,3 @@ index e3e7a88e03a6..b07332cab8ff 100644 } kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, --- -2.20.1 - diff --git a/debian/patches-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch index 07a8ba94e..da708db20 100644 --- a/debian/patches-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch +++ b/debian/patches-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch @@ -1,8 +1,7 @@ -From 806f56991b1c799b77919980a4331ebd857eca6e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Jan 2015 15:10:08 +0100 -Subject: [PATCH 181/283] block/mq: don't complete requests via IPI -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 181/290] block/mq: don't complete requests via IPI +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ba611c8f81858be6e3e56e1e556ee058d65df3c7 The IPI runs in hardirq context and there are sleeping locks. This patch moves the completion into a workqueue. @@ -16,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c -index 682bc561b77b..b6010a1c3aad 100644 +index 074ae9376189..0edb346263b8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -189,6 +189,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq) @@ -30,7 +29,7 @@ index 682bc561b77b..b6010a1c3aad 100644 rq->q = q; rq->__sector = (sector_t) -1; diff --git a/block/blk-mq.c b/block/blk-mq.c -index b07332cab8ff..a01b6aba61fa 100644 +index 376fb90de054..5808446e4758 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -320,6 +320,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, @@ -88,10 +87,10 @@ index b07332cab8ff..a01b6aba61fa 100644 rq->q->softirq_done_fn(rq); } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h -index 1da59c16f637..04c15b5ca76c 100644 +index 2885dce1ad49..8dbb9ecf9993 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h -@@ -249,7 +249,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) +@@ -256,7 +256,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } @@ -101,7 +100,7 @@ index 1da59c16f637..04c15b5ca76c 100644 void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 6980014357d4..f93ae914abda 100644 +index d51e10f50e75..f1960add94df 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -149,6 +149,9 @@ enum mq_rq_state { @@ -114,6 +113,3 @@ index 6980014357d4..f93ae914abda 100644 struct blk_mq_ctx *mq_ctx; int cpu; --- -2.20.1 - diff --git a/debian/patches-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch b/debian/patches-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch index bab51162f..ca8d4e762 100644 --- a/debian/patches-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch +++ b/debian/patches-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch @@ -1,8 +1,7 @@ -From 149113bd6da78fc421b65edab96f402b3fdfc0f8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 6 Apr 2010 16:51:31 +0200 -Subject: [PATCH 182/283] md: raid5: Make raid5_percpu handling RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 182/290] md: raid5: Make raid5_percpu handling RT aware +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ca0dd19a307514cdb9330e8439eb01949dba8bfb __raid_run_ops() disables preemption with get_cpu() around the access to the raid5_percpu variables. That causes scheduling while atomic @@ -20,7 +19,7 @@ Tested-by: Udo van den Heuvel 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c -index f237d6f30752..adec2947c3e1 100644 +index 4a5aad26ded7..7245222787f7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) @@ -44,7 +43,7 @@ index f237d6f30752..adec2947c3e1 100644 } static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) -@@ -6811,6 +6813,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) +@@ -6815,6 +6817,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) __func__, cpu); return -ENOMEM; } @@ -52,7 +51,7 @@ index f237d6f30752..adec2947c3e1 100644 return 0; } -@@ -6821,7 +6824,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) +@@ -6825,7 +6828,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) conf->percpu = alloc_percpu(struct raid5_percpu); if (!conf->percpu) return -ENOMEM; @@ -72,6 +71,3 @@ index 8474c224127b..a3bf907ab2af 100644 struct page *spare_page; /* Used when checking P/Q in raid6 */ struct flex_array *scribble; /* space for constructing buffer * lists and performing address --- -2.20.1 - diff --git a/debian/patches-rt/0183-rt-Introduce-cpu_chill.patch b/debian/patches-rt/0183-rt-Introduce-cpu_chill.patch index f9cf763e1..954158b27 100644 --- a/debian/patches-rt/0183-rt-Introduce-cpu_chill.patch +++ b/debian/patches-rt/0183-rt-Introduce-cpu_chill.patch @@ -1,8 +1,7 @@ -From e4e5f1cc93c38f1bc914494fc1a4a0c4388e42e1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 20:51:03 +0100 -Subject: [PATCH 183/283] rt: Introduce cpu_chill() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 183/290] rt: Introduce cpu_chill() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=659e4f914a6993cf0589d66323ab73e8f339b97b Retry loops on RT might loop forever when the modifying side was preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() @@ -108,6 +107,3 @@ index cfa3599fa789..851b2134e77f 100644 /* * Functions related to boot-time initialization: */ --- -2.20.1 - diff --git a/debian/patches-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch b/debian/patches-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch index f2623ae2c..acdf9da89 100644 --- a/debian/patches-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch +++ b/debian/patches-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch @@ -1,8 +1,7 @@ -From 33f5a28483e30eb660e9ebc75308de9618a279ff Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Feb 2019 16:59:15 +0100 -Subject: [PATCH 184/283] hrtimer: Don't lose state in cpu_chill() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 184/290] hrtimer: Don't lose state in cpu_chill() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=90daf635e3dc26a4457ccb0d7c6833364ace0588 In cpu_chill() the state is set to TASK_UNINTERRUPTIBLE and a timer is programmed. On return the state is always TASK_RUNNING which means we @@ -43,6 +42,3 @@ index 851b2134e77f..6f2736ec4b8e 100644 } EXPORT_SYMBOL(cpu_chill); #endif --- -2.20.1 - diff --git a/debian/patches-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch b/debian/patches-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch index 956475285..5edad5943 100644 --- a/debian/patches-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch +++ b/debian/patches-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch @@ -1,9 +1,8 @@ -From 65b275b3fee3444107af46dd38c427834861ea37 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 12:31:10 +0100 -Subject: [PATCH 185/283] hrtimer: cpu_chill(): save task state in +Subject: [PATCH 185/290] hrtimer: cpu_chill(): save task state in ->saved_state() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=257cbf0f7494e27b80ba0f94daaabb3198e02305 In the previous change I saved the current task state on stack. This was bad because while the task is scheduled-out it might receive a wake-up. @@ -58,6 +57,3 @@ index 6f2736ec4b8e..e1040b80362c 100644 } EXPORT_SYMBOL(cpu_chill); #endif --- -2.20.1 - diff --git a/debian/patches-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/debian/patches-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch index b12f53318..77780c4dc 100644 --- a/debian/patches-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ b/debian/patches-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch @@ -1,9 +1,8 @@ -From a0f3744dd80367c9d401ebb5764b426fa7634006 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 13 Mar 2018 13:49:16 +0100 -Subject: [PATCH 186/283] block: blk-mq: move blk_queue_usage_counter_release() +Subject: [PATCH 186/290] block: blk-mq: move blk_queue_usage_counter_release() into process context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c42562ee54e99ab855db63ca0f224c76515fe664 | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 | in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6 @@ -52,10 +51,10 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c -index b6010a1c3aad..38b1bd493165 100644 +index 0edb346263b8..06fcd081696e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c -@@ -967,12 +967,21 @@ void blk_queue_exit(struct request_queue *q) +@@ -970,12 +970,21 @@ void blk_queue_exit(struct request_queue *q) percpu_ref_put(&q->q_usage_counter); } @@ -78,7 +77,7 @@ index b6010a1c3aad..38b1bd493165 100644 } static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1069,6 +1078,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, +@@ -1072,6 +1081,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); @@ -86,7 +85,7 @@ index b6010a1c3aad..38b1bd493165 100644 /* * Init percpu_ref in atomic mode so that it's faster to shutdown. -@@ -3958,6 +3968,8 @@ int __init blk_dev_init(void) +@@ -3961,6 +3971,8 @@ int __init blk_dev_init(void) if (!kblockd_workqueue) panic("Failed to create kblockd\n"); @@ -96,7 +95,7 @@ index b6010a1c3aad..38b1bd493165 100644 sizeof(struct request), 0, SLAB_PANIC, NULL); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index f93ae914abda..940c794042ae 100644 +index f1960add94df..7b7c0bc6a514 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -27,6 +27,7 @@ @@ -107,7 +106,7 @@ index f93ae914abda..940c794042ae 100644 struct module; struct scsi_ioctl_command; -@@ -649,6 +650,7 @@ struct request_queue { +@@ -655,6 +656,7 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; @@ -115,6 +114,3 @@ index f93ae914abda..940c794042ae 100644 struct percpu_ref q_usage_counter; struct list_head all_q_node; --- -2.20.1 - diff --git a/debian/patches-rt/0187-block-Use-cpu_chill-for-retry-loops.patch b/debian/patches-rt/0187-block-Use-cpu_chill-for-retry-loops.patch index 10488914a..b2c13e73b 100644 --- a/debian/patches-rt/0187-block-Use-cpu_chill-for-retry-loops.patch +++ b/debian/patches-rt/0187-block-Use-cpu_chill-for-retry-loops.patch @@ -1,8 +1,7 @@ -From 21b09ece0276ba2e189265b580648e8505039067 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 20 Dec 2012 18:28:26 +0100 -Subject: [PATCH 187/283] block: Use cpu_chill() for retry loops -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 187/290] block: Use cpu_chill() for retry loops +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9cf22ca96dd1665b30609658a308c09ecb55c1c1 Retry loops on RT might loop forever when the modifying side was preempted. Steven also observed a live lock when there was a @@ -46,6 +45,3 @@ index 01580f88fcb3..98d87e52ccdc 100644 goto retry; } } --- -2.20.1 - diff --git a/debian/patches-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch b/debian/patches-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch index 5bc96a53c..acd2f3522 100644 --- a/debian/patches-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch +++ b/debian/patches-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch @@ -1,8 +1,7 @@ -From 0a339e66574cbd8cf1cb2b146d2b960db46a2803 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 21:00:34 +0100 -Subject: [PATCH 188/283] fs: dcache: Use cpu_chill() in trylock loops -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 188/290] fs: dcache: Use cpu_chill() in trylock loops +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=2daf2bfc05c4859befc797ba426cc6d41fa0c72d Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system @@ -61,6 +60,3 @@ index 1fce41ba3535..5dc970027e30 100644 /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until --- -2.20.1 - diff --git a/debian/patches-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch b/debian/patches-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch index c00ba794e..45c00c1a6 100644 --- a/debian/patches-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch +++ b/debian/patches-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch @@ -1,8 +1,7 @@ -From 3e88f3c941795af8750994fd1a26f7031b691be6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 21:10:04 +0100 -Subject: [PATCH 189/283] net: Use cpu_chill() instead of cpu_relax() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 189/290] net: Use cpu_chill() instead of cpu_relax() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=4e739662f4d14956803c5fa64a1ed3b08275bba8 Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system @@ -15,7 +14,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c -index 93b5a4200585..47009dddb740 100644 +index 7204e7bbebb0..2c22fa8cf9bf 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -63,6 +63,7 @@ @@ -65,6 +64,3 @@ index 0b347f46b2f4..f395f06031bc 100644 } } --- -2.20.1 - diff --git a/debian/patches-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch index 899c29af3..8af63bf75 100644 --- a/debian/patches-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch +++ b/debian/patches-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch @@ -1,8 +1,7 @@ -From 4625df6903b8b9c8ebbcddea1660e7df3e4f9efa Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 14 Sep 2016 14:35:49 +0200 -Subject: [PATCH 190/283] fs/dcache: use swait_queue instead of waitqueue -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 190/290] fs/dcache: use swait_queue instead of waitqueue +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=92858e88d7c3572ca27dfe00a007a9971d4c720d __d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock() which disables preemption. As a workaround convert it to swait. @@ -85,7 +84,7 @@ index dcde8ffe384c..b2a00f3ff7df 100644 hlist_bl_unlock(b); INIT_HLIST_NODE(&dentry->d_u.d_alias); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c -index 82a13221775e..58324a93e3c0 100644 +index e7a2a988533d..4691605ea4aa 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1203,7 +1203,7 @@ static int fuse_direntplus_link(struct file *file, @@ -120,7 +119,7 @@ index 914178cdbe94..2a8c41bc227f 100644 if (unlikely(IS_DEADDIR(dir_inode))) return -ENOENT; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c -index 62afe8ca1e36..9818a5dfb472 100644 +index 17f8a9259971..6642f0c321ef 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -445,7 +445,7 @@ static @@ -132,7 +131,7 @@ index 62afe8ca1e36..9818a5dfb472 100644 struct dentry *dentry; struct dentry *alias; struct inode *dir = d_inode(parent); -@@ -1459,7 +1459,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, +@@ -1495,7 +1495,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned open_flags, umode_t mode) { @@ -164,10 +163,10 @@ index ce9100b5604d..839bfa76f41e 100644 status = -EBUSY; spin_lock(&dentry->d_lock); diff --git a/fs/proc/base.c b/fs/proc/base.c -index f999e8bd3771..bf9476600c73 100644 +index 3b9b726b1a6c..a45d4d640f01 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c -@@ -1872,7 +1872,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, +@@ -1890,7 +1890,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, child = d_hash_and_lookup(dir, &qname); if (!child) { @@ -177,10 +176,10 @@ index f999e8bd3771..bf9476600c73 100644 if (IS_ERR(child)) goto end_instantiate; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c -index 7325baa8f9d4..31f25ff3999f 100644 +index c95f32b83a94..75f500cb7e74 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c -@@ -677,7 +677,7 @@ static bool proc_sys_fill_cache(struct file *file, +@@ -681,7 +681,7 @@ static bool proc_sys_fill_cache(struct file *file, child = d_lookup(dir, &qname); if (!child) { @@ -212,7 +211,7 @@ index 0880baefd85f..8b4d6c8c1f7f 100644 extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h -index bd1c889a9ed9..1fc27eb1f021 100644 +index cab24a127feb..73b0d19ef0d9 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1549,7 +1549,7 @@ struct nfs_unlinkdata { @@ -236,6 +235,3 @@ index c7cb30cdd1b7..119a56d7f739 100644 raw_spin_lock_irq(&q->lock); list_splice_init(&q->task_list, &tmp); while (!list_empty(&tmp)) { --- -2.20.1 - diff --git a/debian/patches-rt/0191-workqueue-Use-normal-rcu.patch b/debian/patches-rt/0191-workqueue-Use-normal-rcu.patch index 208c26557..74c9bf638 100644 --- a/debian/patches-rt/0191-workqueue-Use-normal-rcu.patch +++ b/debian/patches-rt/0191-workqueue-Use-normal-rcu.patch @@ -1,8 +1,7 @@ -From 4018ce6d082178ff8281c68418cb65803b58482e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Jul 2013 15:26:54 +0200 -Subject: [PATCH 191/283] workqueue: Use normal rcu -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 191/290] workqueue: Use normal rcu +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=99a059bc619abcf186aa94c0358a8ac0dff692fb There is no need for sched_rcu. The undocumented reason why sched_rcu is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by @@ -355,6 +354,3 @@ index cd8b61bded78..88d7db5e0105 100644 return written; } --- -2.20.1 - diff --git a/debian/patches-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch b/debian/patches-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch index 2d5b38bd8..b4e88c4a0 100644 --- a/debian/patches-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch +++ b/debian/patches-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch @@ -1,9 +1,8 @@ -From c6764ccb97710b9c7026328c5403d2fac671b693 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:42:26 +0200 -Subject: [PATCH 192/283] workqueue: Use local irq lock instead of irq disable +Subject: [PATCH 192/290] workqueue: Use local irq lock instead of irq disable regions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e81c3e61f9acdd14e03dca56c2755c8ccb34e7a7 Use a local_irq_lock as a replacement for irq off regions. We keep the semantic of irq-off in regard to the pool->lock and remain preemptible. @@ -181,6 +180,3 @@ index 88d7db5e0105..d168a5581c7f 100644 return ret; } --- -2.20.1 - diff --git a/debian/patches-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch b/debian/patches-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch index 0856d4f67..e1af99412 100644 --- a/debian/patches-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch +++ b/debian/patches-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch @@ -1,8 +1,7 @@ -From 95e529176f4d5f1da0194d25b6a7434f5dc66590 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 1 Jul 2013 11:02:42 +0200 -Subject: [PATCH 193/283] workqueue: Prevent workqueue versus ata-piix livelock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 193/290] workqueue: Prevent workqueue versus ata-piix livelock +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8361189669ab897311b896e43b3e9827d5cb460d An Intel i7 system regularly detected rcu_preempt stalls after the kernel was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no @@ -134,6 +133,3 @@ index d168a5581c7f..0a11d2f64424 100644 return -EAGAIN; } --- -2.20.1 - diff --git a/debian/patches-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch b/debian/patches-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch index 5a2485843..e3ce82ee3 100644 --- a/debian/patches-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch +++ b/debian/patches-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch @@ -1,8 +1,7 @@ -From 2a67f032ed0f4dc51e3cfbba38a8b4bedbf3f9a0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jun 2011 19:47:03 +0200 -Subject: [PATCH 194/283] sched: Distangle worker accounting from rqlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 194/290] sched: Distangle worker accounting from rqlock +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e6b7d9334dd365409ae319c4ae71c6a69a3cf9d8 The worker accounting for cpu bound workers is plugged into the core scheduler code and the wakeup code. This is not a hard requirement and @@ -34,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior 3 files changed, 47 insertions(+), 100 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 36f791ff52bc..08052198031a 100644 +index ed44ed8215dd..7a0061839d77 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1704,10 +1704,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl @@ -105,7 +104,7 @@ index 36f791ff52bc..08052198031a 100644 /** * wake_up_process - Wake up a specific process * @p: The process to be woken up. -@@ -3520,21 +3466,6 @@ static void __sched notrace __schedule(bool preempt) +@@ -3561,21 +3507,6 @@ static void __sched notrace __schedule(bool preempt) atomic_inc(&rq->nr_iowait); delayacct_blkio_start(); } @@ -127,7 +126,7 @@ index 36f791ff52bc..08052198031a 100644 } switch_count = &prev->nvcsw; } -@@ -3594,6 +3525,20 @@ static inline void sched_submit_work(struct task_struct *tsk) +@@ -3635,6 +3566,20 @@ static inline void sched_submit_work(struct task_struct *tsk) { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; @@ -148,7 +147,7 @@ index 36f791ff52bc..08052198031a 100644 /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. -@@ -3602,6 +3547,12 @@ static inline void sched_submit_work(struct task_struct *tsk) +@@ -3643,6 +3588,12 @@ static inline void sched_submit_work(struct task_struct *tsk) blk_schedule_flush_plug(tsk); } @@ -161,7 +160,7 @@ index 36f791ff52bc..08052198031a 100644 asmlinkage __visible void __sched schedule(void) { struct task_struct *tsk = current; -@@ -3612,6 +3563,7 @@ asmlinkage __visible void __sched schedule(void) +@@ -3653,6 +3604,7 @@ asmlinkage __visible void __sched schedule(void) __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); @@ -286,6 +285,3 @@ index 66fbb5a9e633..30cfed226b39 100644 +void wq_worker_sleeping(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ --- -2.20.1 - diff --git a/debian/patches-rt/0195-debugobjects-Make-RT-aware.patch b/debian/patches-rt/0195-debugobjects-Make-RT-aware.patch index f05ab0ddb..bed049652 100644 --- a/debian/patches-rt/0195-debugobjects-Make-RT-aware.patch +++ b/debian/patches-rt/0195-debugobjects-Make-RT-aware.patch @@ -1,8 +1,7 @@ -From a51785a98d1a72ead35a11a9fc57eaaf37789736 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:41:35 +0200 -Subject: [PATCH 195/283] debugobjects: Make RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 195/290] debugobjects: Make RT aware +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=51f4d617b46bda60b9220ca45de034aab6c76877 Avoid filling the pool / allocating memory with irqs off(). @@ -27,6 +26,3 @@ index 14afeeb7d6ef..e28481c402ae 100644 db = get_bucket((unsigned long) addr); --- -2.20.1 - diff --git a/debian/patches-rt/0196-seqlock-Prevent-rt-starvation.patch b/debian/patches-rt/0196-seqlock-Prevent-rt-starvation.patch index 5d7b32ce2..737286fd4 100644 --- a/debian/patches-rt/0196-seqlock-Prevent-rt-starvation.patch +++ b/debian/patches-rt/0196-seqlock-Prevent-rt-starvation.patch @@ -1,8 +1,7 @@ -From 57407205aa8a3eacf4fb95e6ae48e68850c42cf4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Feb 2012 12:03:30 +0100 -Subject: [PATCH 196/283] seqlock: Prevent rt starvation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 196/290] seqlock: Prevent rt starvation +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=417244b09dae189643c08ec5fa4b8e328cc85ab3 If a low prio writer gets preempted while holding the seqlock write locked, a high prio reader spins forever on RT. @@ -190,6 +189,3 @@ index beeeed126872..6dd1765e22ec 100644 const struct net_device *dev) { unsigned int seq; --- -2.20.1 - diff --git a/debian/patches-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch index 5f7a3b86d..bd525744b 100644 --- a/debian/patches-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch +++ b/debian/patches-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch @@ -1,9 +1,8 @@ -From 69eb884bf0625cc689fe528f261b51906bf389d1 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 18 Feb 2015 16:05:28 +0100 -Subject: [PATCH 197/283] sunrpc: Make svc_xprt_do_enqueue() use +Subject: [PATCH 197/290] sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1449fd695efd50e04504ff47612f7ae5fc5f9687 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 |in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd @@ -56,6 +55,3 @@ index 6cf0fd37cbf0..48c0a0b90946 100644 trace_svc_xprt_do_enqueue(xprt, rqstp); } EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); --- -2.20.1 - diff --git a/debian/patches-rt/0198-net-Use-skbufhead-with-raw-lock.patch b/debian/patches-rt/0198-net-Use-skbufhead-with-raw-lock.patch index 24ded5272..d3f12830b 100644 --- a/debian/patches-rt/0198-net-Use-skbufhead-with-raw-lock.patch +++ b/debian/patches-rt/0198-net-Use-skbufhead-with-raw-lock.patch @@ -1,8 +1,7 @@ -From 21c221625426b72acf5aad55261378a107daab99 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 15:38:34 +0200 -Subject: [PATCH 198/283] net: Use skbufhead with raw lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 198/290] net: Use skbufhead with raw lock +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=5cb81c78a721cc47b52a8eba0ae24d8d44c8e349 Use the rps lock as rawlock so we can keep irq-off regions. It looks low latency. However we can't kfree() from this context therefore we defer this @@ -53,7 +52,7 @@ index 28baccb1efd5..b4412944db54 100644 struct lock_class_key *class) { diff --git a/net/core/dev.c b/net/core/dev.c -index abaf8a73403b..616429a4715c 100644 +index defca5df6baa..794c64b0a6ce 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -217,14 +217,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) @@ -141,7 +140,7 @@ index abaf8a73403b..616429a4715c 100644 for (;;) { struct napi_struct *n; -@@ -9323,10 +9336,13 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -9325,10 +9338,13 @@ static int dev_cpu_dead(unsigned int oldcpu) netif_rx_ni(skb); input_queue_head_incr(oldsd); } @@ -156,7 +155,7 @@ index abaf8a73403b..616429a4715c 100644 return 0; } -@@ -9635,8 +9651,9 @@ static int __init net_dev_init(void) +@@ -9639,8 +9655,9 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); @@ -168,6 +167,3 @@ index abaf8a73403b..616429a4715c 100644 #ifdef CONFIG_XFRM_OFFLOAD skb_queue_head_init(&sd->xfrm_backlog); #endif --- -2.20.1 - diff --git a/debian/patches-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch index 678fdcb76..937986420 100644 --- a/debian/patches-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/debian/patches-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -1,8 +1,7 @@ -From 2cb581744045c7aea758c05bbc5c84389753b1a5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jan 2016 15:55:02 +0100 -Subject: [PATCH 199/283] net: move xmit_recursion to per-task variable on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 199/290] net: move xmit_recursion to per-task variable on -RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e3b9f93dd4b63400c35e346f0647e8b27be7ff22 A softirq on -RT can be preempted. That means one task is in __dev_queue_xmit(), gets preempted and another task may enter @@ -184,10 +183,10 @@ index 384c63ecb9ae..b6a75296eb46 100644 } diff --git a/include/linux/sched.h b/include/linux/sched.h -index a023e1ba5d8f..a9a5edfa9689 100644 +index 53d92153700b..5e1cc92c2f5c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1208,6 +1208,9 @@ struct task_struct { +@@ -1216,6 +1216,9 @@ struct task_struct { #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; @@ -198,7 +197,7 @@ index a023e1ba5d8f..a9a5edfa9689 100644 int pagefault_disabled; #ifdef CONFIG_MMU diff --git a/net/core/dev.c b/net/core/dev.c -index 616429a4715c..1a8677236939 100644 +index 794c64b0a6ce..13531bd05ffb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3537,8 +3537,10 @@ static void skb_update_prio(struct sk_buff *skb) @@ -249,7 +248,7 @@ index 616429a4715c..1a8677236939 100644 queue->dev = dev; #ifdef CONFIG_BQL diff --git a/net/core/filter.c b/net/core/filter.c -index 34ec9324737b..03925960fb5c 100644 +index e6fa88506c00..b3b9b8d8a28d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2000,7 +2000,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) @@ -273,6 +272,3 @@ index 34ec9324737b..03925960fb5c 100644 return ret; } --- -2.20.1 - diff --git a/debian/patches-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch index b876fd9eb..05511a47e 100644 --- a/debian/patches-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ b/debian/patches-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch @@ -1,9 +1,8 @@ -From 713b395adee9bc6d03ba74839407be41a05a6e89 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jan 2016 15:39:05 +0100 -Subject: [PATCH 200/283] net: provide a way to delegate processing a softirq +Subject: [PATCH 200/290] net: provide a way to delegate processing a softirq to ksoftirqd -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=198b2ed6ab2a6cc2020ab7768ad92c777faf121b If the NET_RX uses up all of his budget it moves the following NAPI invocations into the `ksoftirqd`. On -RT it does not do so. Instead it @@ -72,7 +71,7 @@ index 27a4bb2303d0..25bcf2f2714b 100644 * This function must run with irqs disabled! */ diff --git a/net/core/dev.c b/net/core/dev.c -index 1a8677236939..0da36fb20153 100644 +index 13531bd05ffb..3bbe71c93b04 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6382,7 +6382,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) @@ -84,6 +83,3 @@ index 1a8677236939..0da36fb20153 100644 net_rps_action_and_irq_enable(sd); out: --- -2.20.1 - diff --git a/debian/patches-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch index 09646ca05..3dc845d00 100644 --- a/debian/patches-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch +++ b/debian/patches-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch @@ -1,9 +1,8 @@ -From d81c8e92081b67d58e45585afb0020c6759e671e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 30 Mar 2016 13:36:29 +0200 -Subject: [PATCH 201/283] net: dev: always take qdisc's busylock in +Subject: [PATCH 201/290] net: dev: always take qdisc's busylock in __dev_xmit_skb() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=bb88f29ae0c936e363f1c9792509b6dfc48086da The root-lock is dropped before dev_hard_start_xmit() is invoked and after setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away @@ -22,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 4 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c -index 0da36fb20153..305bf1240e8a 100644 +index 3bbe71c93b04..051b3708e180 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3465,7 +3465,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, @@ -37,6 +36,3 @@ index 0da36fb20153..305bf1240e8a 100644 if (unlikely(contended)) spin_lock(&q->busylock); --- -2.20.1 - diff --git a/debian/patches-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch index 0e4c548b9..566128d5d 100644 --- a/debian/patches-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch +++ b/debian/patches-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch @@ -1,8 +1,7 @@ -From f0d7dcb8d6664bb3c716c75710d0a37f3ed3d107 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 14 Sep 2016 17:36:35 +0200 -Subject: [PATCH 202/283] net/Qdisc: use a seqlock instead seqcount -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 202/290] net/Qdisc: use a seqlock instead seqcount +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=582f57023bf1845fd98e522a850011a7f8d9b966 The seqcount disables preemption on -RT while it is held which can't remove. Also we don't want the reader to spin for ages if the writer is @@ -106,7 +105,7 @@ index 000000000000..a7034298a82a + +#endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h -index c44da48de7df..c85ac38f7fa9 100644 +index c9cd5086bd54..b6328680dc71 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -10,6 +10,7 @@ @@ -166,7 +165,7 @@ index c44da48de7df..c85ac38f7fa9 100644 if (qdisc->flags & TCQ_F_NOLOCK) spin_unlock(&qdisc->seqlock); } -@@ -453,7 +468,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) +@@ -458,7 +473,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) return qdisc_lock(root); } @@ -242,7 +241,7 @@ index e2fd8baec65f..8bab88738691 100644 struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c -index be7cd140b2a3..5b8f90de0615 100644 +index 84fdc4857771..3c5c51657e1a 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1166,7 +1166,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, @@ -255,10 +254,10 @@ index be7cd140b2a3..5b8f90de0615 100644 err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c -index 31b9c2b415b4..b0cc57ff96e3 100644 +index 0b9c494f64b0..c618e5c5a1b1 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c -@@ -570,7 +570,11 @@ struct Qdisc noop_qdisc = { +@@ -575,7 +575,11 @@ struct Qdisc noop_qdisc = { .ops = &noop_qdisc_ops, .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, @@ -270,7 +269,7 @@ index 31b9c2b415b4..b0cc57ff96e3 100644 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), }; EXPORT_SYMBOL(noop_qdisc); -@@ -859,9 +863,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, +@@ -864,9 +868,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, lockdep_set_class(&sch->busylock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); @@ -288,6 +287,3 @@ index 31b9c2b415b4..b0cc57ff96e3 100644 sch->ops = ops; sch->flags = ops->static_flags; --- -2.20.1 - diff --git a/debian/patches-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch b/debian/patches-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch index 5b0537d1b..51d46de34 100644 --- a/debian/patches-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch +++ b/debian/patches-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch @@ -1,12 +1,11 @@ -From 65c80c3225a3398c6f65566593d114a2a92baa6c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 31 Aug 2016 17:21:56 +0200 -Subject: [PATCH 203/283] net: add back the missing serialization in +Subject: [PATCH 203/290] net: add back the missing serialization in ip_send_unicast_reply() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e829741b52f421beee05a47cb7f53472c31dd355 Some time ago Sami Pietikäinen reported a crash on -RT in ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire @@ -94,6 +93,3 @@ index b76cf96d5cfe..51358c73dada 100644 local_bh_enable(); } --- -2.20.1 - diff --git a/debian/patches-rt/0204-net-add-a-lock-around-icmp_sk.patch b/debian/patches-rt/0204-net-add-a-lock-around-icmp_sk.patch index 13a30b850..cc41d0d07 100644 --- a/debian/patches-rt/0204-net-add-a-lock-around-icmp_sk.patch +++ b/debian/patches-rt/0204-net-add-a-lock-around-icmp_sk.patch @@ -1,8 +1,7 @@ -From 46a531f02facc10cbd982f8de9c62e704d8f1815 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 31 Aug 2016 17:54:09 +0200 -Subject: [PATCH 204/283] net: add a lock around icmp_sk() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 204/290] net: add a lock around icmp_sk() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=5ca36394a2695a653822ea30a6aeb8323c88ebfd It looks like the this_cpu_ptr() access in icmp_sk() is protected with local_bh_disable(). To avoid missing serialization in -RT I am adding @@ -15,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 8 insertions(+) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c -index ad75c468ecfb..1770ff1638bc 100644 +index 0167e23d1c8f..acec420899c5 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -77,6 +77,7 @@ @@ -60,6 +59,3 @@ index ad75c468ecfb..1770ff1638bc 100644 } int sysctl_icmp_msgs_per_sec __read_mostly = 1000; --- -2.20.1 - diff --git a/debian/patches-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/debian/patches-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch index d6a067f1e..978033448 100644 --- a/debian/patches-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch +++ b/debian/patches-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch @@ -1,9 +1,8 @@ -From 6b041ae87c727d10384398ec51f0003ef02cdc38 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 6 Dec 2016 17:50:30 -0500 -Subject: [PATCH 205/283] net: Have __napi_schedule_irqoff() disable interrupts +Subject: [PATCH 205/290] net: Have __napi_schedule_irqoff() disable interrupts on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=82df0681d7324c3d2e4b92c5694ebd38e831cd5a A customer hit a crash where the napi sd->poll_list became corrupted. The customer had the bnx2x driver, which does a @@ -53,7 +52,7 @@ index b6a75296eb46..946875cae933 100644 static inline bool napi_disable_pending(struct napi_struct *n) { diff --git a/net/core/dev.c b/net/core/dev.c -index 305bf1240e8a..d86972449f63 100644 +index 051b3708e180..c9adf3a88771 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5952,6 +5952,7 @@ bool napi_schedule_prep(struct napi_struct *n) @@ -72,6 +71,3 @@ index 305bf1240e8a..d86972449f63 100644 bool napi_complete_done(struct napi_struct *n, int work_done) { --- -2.20.1 - diff --git a/debian/patches-rt/0206-irqwork-push-most-work-into-softirq-context.patch b/debian/patches-rt/0206-irqwork-push-most-work-into-softirq-context.patch index c0fd5a20f..f369099e3 100644 --- a/debian/patches-rt/0206-irqwork-push-most-work-into-softirq-context.patch +++ b/debian/patches-rt/0206-irqwork-push-most-work-into-softirq-context.patch @@ -1,8 +1,7 @@ -From b1507e25e704c438e660c5c1825ec806b158f556 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 23 Jun 2015 15:32:51 +0200 -Subject: [PATCH 206/283] irqwork: push most work into softirq context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 206/290] irqwork: push most work into softirq context +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=2207048a61c66cb5d93777d14f38a073404787c0 Initially we defered all irqwork into softirq because we didn't want the latency spikes if perf or another user was busy and delayed the RT task. @@ -247,10 +246,10 @@ index 6482945f8ae8..da4a3f8feb56 100644 /* diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index 696e7583137c..781483c76b17 100644 +index 3fab1c50bf1b..2fcd56aa6092 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c -@@ -1733,6 +1733,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) +@@ -1735,6 +1735,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); @@ -259,6 +258,3 @@ index 696e7583137c..781483c76b17 100644 __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); --- -2.20.1 - diff --git a/debian/patches-rt/0207-printk-Make-rt-aware.patch b/debian/patches-rt/0207-printk-Make-rt-aware.patch index 5f655ebc9..059cc2cda 100644 --- a/debian/patches-rt/0207-printk-Make-rt-aware.patch +++ b/debian/patches-rt/0207-printk-Make-rt-aware.patch @@ -1,8 +1,7 @@ -From fd90db4e8653f0ebf2a6959313658fcdaadec8fc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Sep 2012 14:50:37 +0200 -Subject: [PATCH 207/283] printk: Make rt aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 207/290] printk: Make rt aware +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=fdb4f35129331aff2fda4afcfeffc3f209b78725 Drop the lock before calling the console driver and do not disable interrupts while printing to a serial console. @@ -13,7 +12,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 6553508ff388..d983c509f74a 100644 +index 210cdac1458d..c66755a0a046 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1617,6 +1617,7 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) @@ -109,6 +108,3 @@ index 6553508ff388..d983c509f74a 100644 if (do_cond_resched) cond_resched(); --- -2.20.1 - diff --git a/debian/patches-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/debian/patches-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch index dd45846c8..66114f325 100644 --- a/debian/patches-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch +++ b/debian/patches-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch @@ -1,8 +1,7 @@ -From af21e5c755447f232fe96ab7df9ecaf8df8a15a7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 19 May 2016 17:45:27 +0200 -Subject: [PATCH 208/283] kernel/printk: Don't try to print from IRQ/NMI region -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 208/290] kernel/printk: Don't try to print from IRQ/NMI region +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8c719aa550b2e9bb95786ce26d5fbff438268cbd On -RT we try to acquire sleeping locks which might lead to warnings from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on @@ -16,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 10 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index d983c509f74a..f15988a33860 100644 +index c66755a0a046..1935cf91db0c 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1782,6 +1782,11 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, @@ -43,6 +42,3 @@ index d983c509f74a..f15988a33860 100644 /* * console_unblank can no longer be called in interrupt context unless * oops_in_progress is set to 1.. --- -2.20.1 - diff --git a/debian/patches-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch b/debian/patches-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch index 9e0869700..d350ef0f5 100644 --- a/debian/patches-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch +++ b/debian/patches-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch @@ -1,8 +1,7 @@ -From b0852ca451c4ab71a2ce94c00e267b8eb82e3ac2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 21 Mar 2013 19:01:05 +0100 -Subject: [PATCH 209/283] printk: Drop the logbuf_lock more often -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 209/290] printk: Drop the logbuf_lock more often +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3bfadd9fc97cf1d7877b7d67316d67a96e82f182 The lock is hold with irgs off. The latency drops 500us+ on my arm bugs with a "full" buffer after executing "dmesg" on the shell. @@ -13,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 28 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index f15988a33860..a43d07d4e043 100644 +index 1935cf91db0c..3b0f90ace2f1 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1420,12 +1420,23 @@ static int syslog_print_all(char __user *buf, int size, bool clear) @@ -78,6 +77,3 @@ index f15988a33860..a43d07d4e043 100644 logbuf_unlock_irq(); kfree(text); --- -2.20.1 - diff --git a/debian/patches-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch index 2038566c6..e3179277a 100644 --- a/debian/patches-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch +++ b/debian/patches-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch @@ -1,12 +1,11 @@ -From d3668df4d5096d46b33e7f75444dcecf362dfb81 Mon Sep 17 00:00:00 2001 From: "Yadi.hu" Date: Wed, 10 Dec 2014 10:32:09 +0800 -Subject: [PATCH 210/283] ARM: enable irq in translation/section permission +Subject: [PATCH 210/290] ARM: enable irq in translation/section permission fault handlers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=05082980933eaa57eb3a208aa1849e3b7603fe91 Probably happens on all ARM, with CONFIG_PREEMPT_RT_FULL @@ -68,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 6 insertions(+) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c -index 3232afb6fdc0..3bec1f73a9aa 100644 +index a9ee0d9dc740..20b0e146de98 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -439,6 +439,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, @@ -91,6 +90,3 @@ index 3232afb6fdc0..3bec1f73a9aa 100644 do_bad_area(addr, fsr, regs); return 0; } --- -2.20.1 - diff --git a/debian/patches-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch index 55cd910c7..f712294dc 100644 --- a/debian/patches-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch +++ b/debian/patches-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch @@ -1,8 +1,7 @@ -From a44a97a7d302fab9e3435afc7023d3ebff277efd Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 11 Feb 2016 11:54:00 -0600 -Subject: [PATCH 211/283] genirq: update irq_set_irqchip_state documentation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 211/290] genirq: update irq_set_irqchip_state documentation +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8b11399a6330d802b28290410bb88dd784a962e2 On -rt kernels, the use of migrate_disable()/migrate_enable() is sufficient to guarantee a task isn't moved to another CPU. Update the @@ -15,10 +14,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 69b4bfd4654c..aafe2256bd39 100644 +index 3858ac895777..5701774a6d71 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -2282,7 +2282,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); +@@ -2330,7 +2330,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * @@ -27,6 +26,3 @@ index 69b4bfd4654c..aafe2256bd39 100644 * interrupt controller has per-cpu registers. */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, --- -2.20.1 - diff --git a/debian/patches-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch index 879bd591a..2e8851d29 100644 --- a/debian/patches-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch +++ b/debian/patches-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch @@ -1,9 +1,8 @@ -From a3a039471f58abf91d2007ede9a7e308c834b456 Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 11 Feb 2016 11:54:01 -0600 -Subject: [PATCH 212/283] KVM: arm/arm64: downgrade preempt_disable()d region +Subject: [PATCH 212/290] KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=97bbe6dd0a79b542ddf927ebe0dac5a18cc96ebe kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating the vgic and timer states to prevent the calling task from migrating to @@ -24,10 +23,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c -index 02bac8abd206..d36802fe2825 100644 +index d982650deb33..efe2d6c0201c 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c -@@ -712,7 +712,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +@@ -723,7 +723,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * involves poking the GIC, which must be done in a * non-preemptible context. */ @@ -36,7 +35,7 @@ index 02bac8abd206..d36802fe2825 100644 kvm_pmu_flush_hwstate(vcpu); -@@ -761,7 +761,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +@@ -772,7 +772,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); @@ -45,7 +44,7 @@ index 02bac8abd206..d36802fe2825 100644 continue; } -@@ -839,7 +839,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +@@ -850,7 +850,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, run, ret); @@ -54,6 +53,3 @@ index 02bac8abd206..d36802fe2825 100644 ret = handle_exit(vcpu, run, ret); } --- -2.20.1 - diff --git a/debian/patches-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/debian/patches-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch index d2cf2b5f1..3999d6930 100644 --- a/debian/patches-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch +++ b/debian/patches-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch @@ -1,9 +1,8 @@ -From d42064d787982281635b029761c83a285c877f51 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 25 Jul 2018 14:02:38 +0200 -Subject: [PATCH 213/283] arm64: fpsimd: use preemp_disable in addition to +Subject: [PATCH 213/290] arm64: fpsimd: use preemp_disable in addition to local_bh_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=67796c909f19de9affd36a6024bfad2fead9a410 In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The code disables BH and expects that it is not preemptible. On -RT the @@ -166,6 +165,3 @@ index 58c53bc96928..71252cd8b594 100644 } EXPORT_SYMBOL(kernel_neon_begin); --- -2.20.1 - diff --git a/debian/patches-rt/0214-kgdb-serial-Short-term-workaround.patch b/debian/patches-rt/0214-kgdb-serial-Short-term-workaround.patch index 4199c9322..1984db67f 100644 --- a/debian/patches-rt/0214-kgdb-serial-Short-term-workaround.patch +++ b/debian/patches-rt/0214-kgdb-serial-Short-term-workaround.patch @@ -1,8 +1,7 @@ -From afa7e85db981a95a467d06226214ecfb0bbca27b Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 28 Jul 2011 12:42:23 -0500 -Subject: [PATCH 214/283] kgdb/serial: Short term workaround -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 214/290] kgdb/serial: Short term workaround +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e1ccf900f9f371a1ef8665087ad41b1ece4d9fa9 On 07/27/2011 04:37 PM, Thomas Gleixner wrote: > - KGDB (not yet disabled) is reportedly unusable on -rt right now due @@ -25,7 +24,7 @@ Jason. 3 files changed, 7 insertions(+) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index 8d85448975d3..ebc98ce465b8 100644 +index 6b1d46c1df3b..cd49a76be52a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -31,6 +31,7 @@ @@ -36,7 +35,7 @@ index 8d85448975d3..ebc98ce465b8 100644 #include #include #include -@@ -3240,6 +3241,8 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3241,6 +3242,8 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (port->sysrq || oops_in_progress) locked = 0; @@ -81,6 +80,3 @@ index 6a4b41484afe..197cb422f6e1 100644 return r; } --- -2.20.1 - diff --git a/debian/patches-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch b/debian/patches-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch index 7068b4175..1b86ad209 100644 --- a/debian/patches-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch +++ b/debian/patches-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch @@ -1,8 +1,7 @@ -From 012396ccc66ac609679e12d1bf475300918d4e31 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Sat, 30 Jul 2011 21:55:53 -0500 -Subject: [PATCH 215/283] sysfs: Add /sys/kernel/realtime entry -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 215/290] sysfs: Add /sys/kernel/realtime entry +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=04f88fb150866704c704045d9c8dd5985b5bde9e Add a /sys/kernel entry to indicate that the kernel is a realtime kernel. @@ -49,6 +48,3 @@ index 46ba853656f6..9a23632b6294 100644 #endif NULL }; --- -2.20.1 - diff --git a/debian/patches-rt/0216-mm-rt-kmap_atomic-scheduling.patch b/debian/patches-rt/0216-mm-rt-kmap_atomic-scheduling.patch index 3231431f9..9d783416c 100644 --- a/debian/patches-rt/0216-mm-rt-kmap_atomic-scheduling.patch +++ b/debian/patches-rt/0216-mm-rt-kmap_atomic-scheduling.patch @@ -1,8 +1,7 @@ -From 7852dee63089f616ea97bbbc5137c7312fab3ea8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Jul 2011 10:43:51 +0200 -Subject: [PATCH 216/283] mm, rt: kmap_atomic scheduling -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 216/290] mm, rt: kmap_atomic scheduling +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f44ce46e831350a50239c2586a4f777586dc1410 In fact, with migrate_disable() existing one could play games with kmap_atomic. You could save/restore the kmap_atomic slots on context @@ -248,7 +247,7 @@ index 0690679832d4..1ac89e4718bf 100644 } diff --git a/include/linux/sched.h b/include/linux/sched.h -index a9a5edfa9689..76e6cdafb992 100644 +index 5e1cc92c2f5c..834f46cb258b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -28,6 +28,7 @@ @@ -259,7 +258,7 @@ index a9a5edfa9689..76e6cdafb992 100644 /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; -@@ -1206,6 +1207,12 @@ struct task_struct { +@@ -1214,6 +1215,12 @@ struct task_struct { int softirq_nestcnt; unsigned int softirqs_raised; #endif @@ -320,6 +319,3 @@ index 59db3223a5d6..22aa3ddbd87b 100644 unsigned int nr_free_highpages (void) { --- -2.20.1 - diff --git a/debian/patches-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch b/debian/patches-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch index 96e64361a..9b2bdec58 100644 --- a/debian/patches-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch +++ b/debian/patches-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch @@ -1,8 +1,7 @@ -From 26d14743e5183f7a37bf0a4489428926868186de Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 17:09:55 +0100 -Subject: [PATCH 217/283] x86/highmem: Add a "already used pte" check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 217/290] x86/highmem: Add a "already used pte" check +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c1e9c3cef196f71a33fb68f1a9a0c65ab2d27d2d This is a copy from kmap_atomic_prot(). @@ -24,6 +23,3 @@ index d5a48210d0f6..c0ec8d430c02 100644 #ifdef CONFIG_PREEMPT_RT_FULL current->kmap_pte[type] = pte; #endif --- -2.20.1 - diff --git a/debian/patches-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch b/debian/patches-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch index aa407c2ea..9867d25af 100644 --- a/debian/patches-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch +++ b/debian/patches-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch @@ -1,8 +1,7 @@ -From 2335fd68385693465319a8ed36dc816837cc2d13 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 21:37:27 +0100 -Subject: [PATCH 218/283] arm/highmem: Flush tlb on unmap -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 218/290] arm/highmem: Flush tlb on unmap +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=5783132bce9ff6911a382ffbfa6a8969a5c3704f The tlb should be flushed on unmap and thus make the mapping entry invalid. This is only done in the non-debug case which does not look @@ -29,6 +28,3 @@ index d02f8187b1cc..eb4b225d28c9 100644 kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ --- -2.20.1 - diff --git a/debian/patches-rt/0219-arm-Enable-highmem-for-rt.patch b/debian/patches-rt/0219-arm-Enable-highmem-for-rt.patch index 32d6da4a2..5ddc9ecc9 100644 --- a/debian/patches-rt/0219-arm-Enable-highmem-for-rt.patch +++ b/debian/patches-rt/0219-arm-Enable-highmem-for-rt.patch @@ -1,8 +1,7 @@ -From dbd3e75fbbecfbb79a264b7c2b8f092d4671dfaa Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Feb 2013 11:03:11 +0100 -Subject: [PATCH 219/283] arm: Enable highmem for rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 219/290] arm: Enable highmem for rt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=5fe42b35e69fc1b2fda87b9c3d090149a0ec3096 fixup highmem for ARM. @@ -179,6 +178,3 @@ index 1ac89e4718bf..eaa2ef9bc10e 100644 #include --- -2.20.1 - diff --git a/debian/patches-rt/0220-scsi-fcoe-Make-RT-aware.patch b/debian/patches-rt/0220-scsi-fcoe-Make-RT-aware.patch index 13f796594..5b6aba9d7 100644 --- a/debian/patches-rt/0220-scsi-fcoe-Make-RT-aware.patch +++ b/debian/patches-rt/0220-scsi-fcoe-Make-RT-aware.patch @@ -1,8 +1,7 @@ -From e8f1bc60d8f9bb78f3671441af7f6dea46c576a9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 12 Nov 2011 14:00:48 +0100 -Subject: [PATCH 220/283] scsi/fcoe: Make RT aware. -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 220/290] scsi/fcoe: Make RT aware. +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f9bbf02ac596565f61feeee7d279ab74525fdc35 Do not disable preemption while taking sleeping locks. All user look safe for migrate_diable() only. @@ -73,7 +72,7 @@ index 6768b2e8148a..c20f51af6bdf 100644 } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c -index 7dc4ffa24430..4946df66a5ab 100644 +index 24cbd0a2cc69..ccf60801fe9d 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -838,7 +838,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) @@ -111,6 +110,3 @@ index 42bcf7f3a0f9..2ce045d6860c 100644 /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { --- -2.20.1 - diff --git a/debian/patches-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch b/debian/patches-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch index a8f06ec60..07455d392 100644 --- a/debian/patches-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch +++ b/debian/patches-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch @@ -1,8 +1,7 @@ -From db7f78bb12a8de39b10bf4c414283150adf48dad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Nov 2011 18:19:27 +0100 -Subject: [PATCH 221/283] x86: crypto: Reduce preempt disabled regions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 221/290] x86: crypto: Reduce preempt disabled regions +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=472656d732749a107ad80e379b543d3bfce4bd72 Restrict the preempt disabled regions to the actual floating point operations and enable preemption for the administrative actions. @@ -113,6 +112,3 @@ index 917f25e4d0a8..58d8c03fc32d 100644 return err; } --- -2.20.1 - diff --git a/debian/patches-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch index 38681aaaa..2b86986d6 100644 --- a/debian/patches-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch +++ b/debian/patches-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch @@ -1,8 +1,7 @@ -From a9bad506610e589f70cb9bcd40a91ae40a6fcd96 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Feb 2014 17:24:04 +0100 -Subject: [PATCH 222/283] crypto: Reduce preempt disabled regions, more algos -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 222/290] crypto: Reduce preempt disabled regions, more algos +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3964d439260995e6fb01f96f3daf1f7f3ba405f5 Don Estabrook reported | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() @@ -258,6 +257,3 @@ index a78ef99a9981..dac489a1c4da 100644 return err; } EXPORT_SYMBOL_GPL(glue_xts_req_128bit); --- -2.20.1 - diff --git a/debian/patches-rt/0223-crypto-limit-more-FPU-enabled-sections.patch b/debian/patches-rt/0223-crypto-limit-more-FPU-enabled-sections.patch index f5f73ee39..381555d61 100644 --- a/debian/patches-rt/0223-crypto-limit-more-FPU-enabled-sections.patch +++ b/debian/patches-rt/0223-crypto-limit-more-FPU-enabled-sections.patch @@ -1,11 +1,10 @@ -From 76870a6a1befc6db5eb65682cb8190cf093d49a5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 30 Nov 2017 13:40:10 +0100 -Subject: [PATCH 223/283] crypto: limit more FPU-enabled sections +Subject: [PATCH 223/290] crypto: limit more FPU-enabled sections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=06f6850c211b4924e125e00c1f1a21a14ea7712e Those crypto drivers use SSE/AVX/… for their crypto work and in order to do so in kernel they need to enable the "FPU" in kernel mode which @@ -103,6 +102,3 @@ index 2e5003fef51a..768c53767bb2 100644 /* * Save the FPU state (mark it for reload if necessary): * --- -2.20.1 - diff --git a/debian/patches-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch b/debian/patches-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch index d4ccb716a..22a4fd428 100644 --- a/debian/patches-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch +++ b/debian/patches-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch @@ -1,9 +1,8 @@ -From bb65fe43a0177184808e0426548a3e11bcd894e4 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 11 Jul 2018 17:14:47 +0200 -Subject: [PATCH 224/283] crypto: scompress - serialize RT percpu scratch +Subject: [PATCH 224/290] crypto: scompress - serialize RT percpu scratch buffer access with a local lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b060aeba579ee733c1015b2301365526e672f475 | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:974 | in_atomic(): 1, irqs_disabled(): 0, pid: 1401, name: cryptomgr_test @@ -78,6 +77,3 @@ index 968bbcf65c94..c2f0077e0801 100644 return ret; } --- -2.20.1 - diff --git a/debian/patches-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/debian/patches-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch index 128912504..4bc40b2a4 100644 --- a/debian/patches-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch +++ b/debian/patches-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch @@ -1,9 +1,8 @@ -From 46ee243833b0300b7f72e77268030fba7d083f36 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 18:52:00 +0200 -Subject: [PATCH 225/283] crypto: cryptd - add a lock instead +Subject: [PATCH 225/290] crypto: cryptd - add a lock instead preempt_disable/local_bh_disable -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=22f60893f4fbc66d43c729900e0306163d7389a0 cryptd has a per-CPU lock which protected with local_bh_disable() and preempt_disable(). @@ -80,6 +79,3 @@ index e0c8e907b086..e079f9a70201 100644 if (!req) return; --- -2.20.1 - diff --git a/debian/patches-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch b/debian/patches-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch index 86b5fa33d..272736b86 100644 --- a/debian/patches-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch +++ b/debian/patches-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch @@ -1,9 +1,8 @@ -From f05daa36cd3745fd6096f84b064ecd32d32ba72c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 226/283] panic: skip get_random_bytes for RT_FULL in +Subject: [PATCH 226/290] panic: skip get_random_bytes for RT_FULL in init_oops_id -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6d6fa048f3ae116f0d13b7262f16ea5afd9c4aac Disable on -RT. If this is invoked from irq-context we will have problems to acquire the sleeping lock. @@ -14,10 +13,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 2 insertions(+) diff --git a/kernel/panic.c b/kernel/panic.c -index 6a6df23acd1a..8f0a896e8428 100644 +index 72e001e3753e..98748902cc41 100644 --- a/kernel/panic.c +++ b/kernel/panic.c -@@ -479,9 +479,11 @@ static u64 oops_id; +@@ -480,9 +480,11 @@ static u64 oops_id; static int init_oops_id(void) { @@ -29,6 +28,3 @@ index 6a6df23acd1a..8f0a896e8428 100644 oops_id++; return 0; --- -2.20.1 - diff --git a/debian/patches-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch b/debian/patches-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch index 41dc6f5fd..a0db6cc11 100644 --- a/debian/patches-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch +++ b/debian/patches-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch @@ -1,8 +1,7 @@ -From 7ca9435f9f9b0a1c4f286bfc916aaa6894d5b1ab Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 16 Dec 2010 14:25:18 +0100 -Subject: [PATCH 227/283] x86: stackprotector: Avoid random pool on rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 227/290] x86: stackprotector: Avoid random pool on rt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6bf5941983152b741aea75850114423524c55c92 CPU bringup calls into the random pool to initialize the stack canary. During boot that works nicely even on RT as the might sleep @@ -46,6 +45,3 @@ index 8ec97a62c245..7bc85841fc56 100644 tsc = rdtsc(); canary += tsc + (tsc << 32UL); canary &= CANARY_MASK; --- -2.20.1 - diff --git a/debian/patches-rt/0228-random-Make-it-work-on-rt.patch b/debian/patches-rt/0228-random-Make-it-work-on-rt.patch index 2a710e1d4..de20d7c9b 100644 --- a/debian/patches-rt/0228-random-Make-it-work-on-rt.patch +++ b/debian/patches-rt/0228-random-Make-it-work-on-rt.patch @@ -1,8 +1,7 @@ -From d7138138dbb45015e20668ec13215dc198c4c4d9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 21 Aug 2012 20:38:50 +0200 -Subject: [PATCH 228/283] random: Make it work on rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 228/290] random: Make it work on rt +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f07a155615d469ecdfbbe7236f9f2c32f694035c Delegate the random insertion to the forced threaded interrupt handler. Store the return IP of the hard interrupt handler in the irq @@ -146,10 +145,10 @@ index 38554bc35375..06a80bbf78af 100644 if (!noirqdebug) note_interrupt(desc, retval); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index aafe2256bd39..7f4041357d2f 100644 +index 5701774a6d71..ce86341a9e19 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -1084,6 +1084,12 @@ static int irq_thread(void *data) +@@ -1110,6 +1110,12 @@ static int irq_thread(void *data) if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); @@ -162,6 +161,3 @@ index aafe2256bd39..7f4041357d2f 100644 wake_threads_waitq(desc); } --- -2.20.1 - diff --git a/debian/patches-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch b/debian/patches-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch index 3e501c98e..c142fb66a 100644 --- a/debian/patches-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch +++ b/debian/patches-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch @@ -1,8 +1,7 @@ -From 8d14b62a0aef3f8f55641157c07a65df44f4cdda Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Jul 2017 17:31:20 +0200 -Subject: [PATCH 229/283] cpu/hotplug: Implement CPU pinning -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 229/290] cpu/hotplug: Implement CPU pinning +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=673c270ddb52159dc10aae6859ad2c7fbdc784b1 Signed-off-by: Thomas Gleixner --- @@ -11,7 +10,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 39 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 76e6cdafb992..0445d5c7ced0 100644 +index 834f46cb258b..4559d8039c45 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -671,6 +671,7 @@ struct task_struct { @@ -23,7 +22,7 @@ index 76e6cdafb992..0445d5c7ced0 100644 int migrate_disable_atomic; # endif diff --git a/kernel/cpu.c b/kernel/cpu.c -index 9001e1779325..89b56880314d 100644 +index 02e05a7e463c..e1cd4bfc03bc 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -75,6 +75,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { @@ -112,6 +111,3 @@ index 9001e1779325..89b56880314d 100644 /* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); --- -2.20.1 - diff --git a/debian/patches-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/debian/patches-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch index adaafaa13..c9247c213 100644 --- a/debian/patches-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch +++ b/debian/patches-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch @@ -1,9 +1,8 @@ -From 3ee298788bec1452e68227e1737ea65b196f5986 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 19 Aug 2018 08:28:35 +0200 -Subject: [PATCH 230/283] sched: Allow pinned user tasks to be awakened to the +Subject: [PATCH 230/290] sched: Allow pinned user tasks to be awakened to the CPU they pinned -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=2e0e054d46bdb30148a9bdbbbcee165a85b47ca5 Since commit 7af443ee16976 ("sched/core: Require cpu_active() in select_task_rq(), for user tasks") select_fallback_rq() will BUG() if @@ -22,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 08052198031a..33e81e7be168 100644 +index 7a0061839d77..d7692e31a254 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -904,7 +904,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) @@ -34,6 +33,3 @@ index 08052198031a..33e81e7be168 100644 return cpu_online(cpu); return cpu_active(cpu); --- -2.20.1 - diff --git a/debian/patches-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/debian/patches-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch index c495c38d5..dbd4e6838 100644 --- a/debian/patches-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch +++ b/debian/patches-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch @@ -1,8 +1,7 @@ -From 6068c578a8d9cb4f49e39b4ae7f0fd7e60c7d0b6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 4 Aug 2017 18:31:00 +0200 -Subject: [PATCH 231/283] hotplug: duct-tape RT-rwlock usage for non-RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 231/290] hotplug: duct-tape RT-rwlock usage for non-RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=6ff0675b8cd730a82f8eb2a3c08e7d0a85750383 This type is only available on -RT. We need to craft something for non-RT. Since the only migrate_disable() user is -RT only, there is no @@ -14,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/kernel/cpu.c b/kernel/cpu.c -index 89b56880314d..0523f2f0f4a2 100644 +index e1cd4bfc03bc..69b5853f2854 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -75,7 +75,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { @@ -97,6 +96,3 @@ index 89b56880314d..0523f2f0f4a2 100644 /* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); --- -2.20.1 - diff --git a/debian/patches-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch b/debian/patches-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch index f82745b12..9eab37a57 100644 --- a/debian/patches-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch +++ b/debian/patches-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch @@ -1,8 +1,7 @@ -From 6cb14a01753bb2d287133787ca0f71e692eb74a5 Mon Sep 17 00:00:00 2001 From: Priyanka Jain Date: Thu, 17 May 2012 09:35:11 +0530 -Subject: [PATCH 232/283] net: Remove preemption disabling in netif_rx() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 232/290] net: Remove preemption disabling in netif_rx() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=88c9d7966005068f4e29f94d10effc2b8ecc8e9d 1)enqueue_to_backlog() (called from netif_rx) should be bind to a particluar CPU. This can be achieved by @@ -36,7 +35,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index d86972449f63..cdf356fe054c 100644 +index c9adf3a88771..b5b4b3b162a7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4484,7 +4484,7 @@ static int netif_rx_internal(struct sk_buff *skb) @@ -66,6 +65,3 @@ index d86972449f63..cdf356fe054c 100644 } return ret; } --- -2.20.1 - diff --git a/debian/patches-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch b/debian/patches-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch index 6d2458cd2..3b93c56df 100644 --- a/debian/patches-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch +++ b/debian/patches-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch @@ -1,8 +1,7 @@ -From d96d2893a46a35127c07a99df70a4c2b4bcde7c9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 26 Sep 2012 16:21:08 +0200 -Subject: [PATCH 233/283] net: Another local_irq_disable/kmalloc headache -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 233/290] net: Another local_irq_disable/kmalloc headache +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a081a1c720e485139420e138285bbf35271e89e8 Replace it by a local lock. Though that's pretty inefficient :( @@ -12,7 +11,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index 9b9f696281a9..9ce072fb662e 100644 +index 0629ca89ab74..6ca7cb2b4364 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -63,6 +63,7 @@ @@ -60,6 +59,3 @@ index 9b9f696281a9..9ce072fb662e 100644 if (unlikely(!data)) return NULL; --- -2.20.1 - diff --git a/debian/patches-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch index d1f81da8d..00a079103 100644 --- a/debian/patches-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch +++ b/debian/patches-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch @@ -1,9 +1,8 @@ -From a3b202fa9f0aec800acb97c47a90c7c0ca27283f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 15 Jan 2016 16:33:34 +0100 -Subject: [PATCH 234/283] net/core: protect users of napi_alloc_cache against +Subject: [PATCH 234/290] net/core: protect users of napi_alloc_cache against reentrance -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=0c07f26683c75380656e020a6cd8b071da568cf4 On -RT the code running in BH can not be moved to another CPU so CPU local variable remain local. However the code can be preempted @@ -18,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index 9ce072fb662e..15375d68b006 100644 +index 6ca7cb2b4364..c5c0d2095873 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -332,6 +332,7 @@ struct napi_alloc_cache { @@ -114,6 +113,3 @@ index 9ce072fb662e..15375d68b006 100644 } void __kfree_skb_defer(struct sk_buff *skb) { --- -2.20.1 - diff --git a/debian/patches-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch b/debian/patches-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch index c014aaa1d..3f02b7d6f 100644 --- a/debian/patches-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch +++ b/debian/patches-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch @@ -1,9 +1,8 @@ -From 0e06ddd7109d337dafee0134e5e07c2a2ca1b4fb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 11:18:08 +0100 -Subject: [PATCH 235/283] net: netfilter: Serialize xt_write_recseq sections on +Subject: [PATCH 235/290] net: netfilter: Serialize xt_write_recseq sections on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=4e6675bcdeed2f36f18ead465070cddfc53223c7 The netfilter code relies only on the implicit semantics of local_bh_disable() for serializing wt_write_recseq sections. RT breaks @@ -79,6 +78,3 @@ index 93aaec3a54ec..b364cf8e5776 100644 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; EXPORT_SYMBOL_GPL(nf_ipv6_ops); --- -2.20.1 - diff --git a/debian/patches-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch b/debian/patches-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch index 51d603a59..4b43c19a9 100644 --- a/debian/patches-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch +++ b/debian/patches-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch @@ -1,8 +1,7 @@ -From c9f595c704d979c0123fa62e3ef3a6254b32f2da Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Mar 2013 18:06:20 +0100 -Subject: [PATCH 236/283] net: Add a mutex around devnet_rename_seq -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 236/290] net: Add a mutex around devnet_rename_seq +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9b9fafec34118940cdc0ba2b843d8920157fd0a8 On RT write_seqcount_begin() disables preemption and device_rename() allocates memory with GFP_KERNEL and grabs later the sysfs_mutex @@ -22,7 +21,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index cdf356fe054c..63b3058dd172 100644 +index b5b4b3b162a7..1b1bf6186226 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -195,6 +195,7 @@ static unsigned int napi_gen_id = NR_CPUS; @@ -108,6 +107,3 @@ index cdf356fe054c..63b3058dd172 100644 } /** --- -2.20.1 - diff --git a/debian/patches-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch b/debian/patches-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch index efecb4d6c..12a3efe45 100644 --- a/debian/patches-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch +++ b/debian/patches-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch @@ -1,9 +1,8 @@ -From 817d0f6091be31075e77cb513f97d3087a692e82 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Mon, 16 Apr 2012 15:01:56 +0800 -Subject: [PATCH 237/283] lockdep: selftest: Only do hardirq context test for +Subject: [PATCH 237/290] lockdep: selftest: Only do hardirq context test for raw spinlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=611b5197a59dcb72ca3325c94b6f907ad3d877a9 On -rt there is no softirq context any more and rwlock is sleepable, disable softirq context test and rwlock+irq test. @@ -57,6 +56,3 @@ index 1e1bbf171eca..5cdf3809905e 100644 ww_tests(); --- -2.20.1 - diff --git a/debian/patches-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch index ff29e3968..903ccdfa4 100644 --- a/debian/patches-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch +++ b/debian/patches-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch @@ -1,9 +1,8 @@ -From 26b50852450a192a47d4baaaeb4911d5fa54789a Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Wed, 28 Jan 2015 13:08:45 -0600 -Subject: [PATCH 238/283] lockdep: selftest: fix warnings due to missing +Subject: [PATCH 238/290] lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=626b1deed94211cf41c043e70f75e33dd6d80f6d "lockdep: Selftest: Only do hardirq context test for raw spinlock" disabled the execution of certain tests with PREEMPT_RT_FULL, but did @@ -144,6 +143,3 @@ index 5cdf3809905e..32db9532ddd4 100644 /* * read-lock / write-lock recursion that is unsafe. */ --- -2.20.1 - diff --git a/debian/patches-rt/0239-sched-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0239-sched-Add-support-for-lazy-preemption.patch index 750f2ebee..2bd8a2712 100644 --- a/debian/patches-rt/0239-sched-Add-support-for-lazy-preemption.patch +++ b/debian/patches-rt/0239-sched-Add-support-for-lazy-preemption.patch @@ -1,8 +1,7 @@ -From 8cc7d52a64f1f62c24514589af91171961b4b31a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Oct 2012 18:50:54 +0100 -Subject: [PATCH 239/283] sched: Add support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 239/290] sched: Add support for lazy preemption +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=31b7c5de23c43474d2c4e69605d541d32c80e750 It has become an obsession to mitigate the determinism vs. throughput loss of RT. Looking at the mainline semantics of preemption points @@ -144,10 +143,10 @@ index ed8413e7140f..9c74a019bf57 100644 } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 0445d5c7ced0..dd95bd64504e 100644 +index 4559d8039c45..d2386fa9ed0f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1725,6 +1725,44 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) +@@ -1733,6 +1733,44 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -245,7 +244,7 @@ index 907d72b3ba95..306567f72a3e 100644 prompt "Preemption Model" default PREEMPT_NONE diff --git a/kernel/cpu.c b/kernel/cpu.c -index 0523f2f0f4a2..3857a0afdfbf 100644 +index 69b5853f2854..efa687c2c170 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -304,11 +304,13 @@ void pin_current_cpu(void) @@ -263,7 +262,7 @@ index 0523f2f0f4a2..3857a0afdfbf 100644 __read_rt_unlock(cpuhp_pin); goto again; diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 33e81e7be168..7831756f2097 100644 +index d7692e31a254..538461ee8ebd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -493,6 +493,48 @@ void resched_curr(struct rq *rq) @@ -325,7 +324,7 @@ index 33e81e7be168..7831756f2097 100644 #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3472,6 +3517,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -3513,6 +3558,7 @@ static void __sched notrace __schedule(bool preempt) next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -333,7 +332,7 @@ index 33e81e7be168..7831756f2097 100644 clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -3652,6 +3698,30 @@ static void __sched notrace preempt_schedule_common(void) +@@ -3693,6 +3739,30 @@ static void __sched notrace preempt_schedule_common(void) } while (need_resched()); } @@ -364,7 +363,7 @@ index 33e81e7be168..7831756f2097 100644 #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption -@@ -3666,7 +3736,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) +@@ -3707,7 +3777,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) */ if (likely(!preemptible())) return; @@ -374,7 +373,7 @@ index 33e81e7be168..7831756f2097 100644 preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); -@@ -3693,6 +3764,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) +@@ -3734,6 +3805,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) if (likely(!preemptible())) return; @@ -384,7 +383,7 @@ index 33e81e7be168..7831756f2097 100644 do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5461,7 +5535,9 @@ void init_idle(struct task_struct *idle, int cpu) +@@ -5502,7 +5576,9 @@ void init_idle(struct task_struct *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -395,7 +394,7 @@ index 33e81e7be168..7831756f2097 100644 /* * The idle tasks have their own, simple scheduling class: */ -@@ -7190,6 +7266,7 @@ void migrate_disable(void) +@@ -7227,6 +7303,7 @@ void migrate_disable(void) } preempt_disable(); @@ -403,7 +402,7 @@ index 33e81e7be168..7831756f2097 100644 pin_current_cpu(); migrate_disable_update_cpus_allowed(p); -@@ -7257,6 +7334,7 @@ void migrate_enable(void) +@@ -7294,6 +7371,7 @@ void migrate_enable(void) arg.dest_cpu = dest_cpu; unpin_current_cpu(); @@ -411,7 +410,7 @@ index 33e81e7be168..7831756f2097 100644 preempt_enable(); stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); -@@ -7265,6 +7343,7 @@ void migrate_enable(void) +@@ -7302,6 +7380,7 @@ void migrate_enable(void) } } unpin_current_cpu(); @@ -420,10 +419,10 @@ index 33e81e7be168..7831756f2097 100644 } EXPORT_SYMBOL(migrate_enable); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 0048a32a3b4d..2cca09d59019 100644 +index da5d60d25c27..e30187563bfc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4021,7 +4021,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4075,7 +4075,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -432,7 +431,7 @@ index 0048a32a3b4d..2cca09d59019 100644 /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4045,7 +4045,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4099,7 +4099,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) @@ -441,7 +440,7 @@ index 0048a32a3b4d..2cca09d59019 100644 } static void -@@ -4187,7 +4187,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) +@@ -4241,7 +4241,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { @@ -450,7 +449,7 @@ index 0048a32a3b4d..2cca09d59019 100644 return; } /* -@@ -4371,7 +4371,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) +@@ -4427,7 +4427,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -459,7 +458,7 @@ index 0048a32a3b4d..2cca09d59019 100644 } static __always_inline -@@ -5067,7 +5067,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) +@@ -5126,7 +5126,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) @@ -468,7 +467,7 @@ index 0048a32a3b4d..2cca09d59019 100644 return; } hrtick_start(rq, delta); -@@ -6643,7 +6643,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ +@@ -6702,7 +6702,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: @@ -477,7 +476,7 @@ index 0048a32a3b4d..2cca09d59019 100644 /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -9734,7 +9734,7 @@ static void task_fork_fair(struct task_struct *p) +@@ -9794,7 +9794,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -486,7 +485,7 @@ index 0048a32a3b4d..2cca09d59019 100644 } se->vruntime -= cfs_rq->min_vruntime; -@@ -9758,7 +9758,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) +@@ -9818,7 +9818,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) @@ -530,7 +529,7 @@ index 49ae30da28ee..f7c1c262457f 100644 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 496c5d358010..da619e974a11 100644 +index befe5978a832..d547c6ab0db2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2137,6 +2137,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, @@ -666,6 +665,3 @@ index 46c96744f09d..3f78b0afb729 100644 if (entry->migrate_disable) trace_seq_printf(s, "%x", entry->migrate_disable); else --- -2.20.1 - diff --git a/debian/patches-rt/0240-ftrace-Fix-trace-header-alignment.patch b/debian/patches-rt/0240-ftrace-Fix-trace-header-alignment.patch index 7b48ce168..980520953 100644 --- a/debian/patches-rt/0240-ftrace-Fix-trace-header-alignment.patch +++ b/debian/patches-rt/0240-ftrace-Fix-trace-header-alignment.patch @@ -1,8 +1,7 @@ -From 03fa817c57795a125667feb747f1c683b3c456c1 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 16 Oct 2016 05:08:30 +0200 -Subject: [PATCH 240/283] ftrace: Fix trace header alignment -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 240/290] ftrace: Fix trace header alignment +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1ab62a4df7e3cd8a388c1ac3fc114383300d5163 Line up helper arrows to the right column. @@ -15,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index da619e974a11..f82da65109eb 100644 +index d547c6ab0db2..bdcc8eda152c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3351,17 +3351,17 @@ get_total_entries(struct trace_buffer *buf, @@ -47,6 +46,3 @@ index da619e974a11..f82da65109eb 100644 } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) --- -2.20.1 - diff --git a/debian/patches-rt/0241-x86-Support-for-lazy-preemption.patch b/debian/patches-rt/0241-x86-Support-for-lazy-preemption.patch index 7d89bf4b1..d9e142514 100644 --- a/debian/patches-rt/0241-x86-Support-for-lazy-preemption.patch +++ b/debian/patches-rt/0241-x86-Support-for-lazy-preemption.patch @@ -1,8 +1,7 @@ -From fb20d310604701fc1f1d53bf74f792f58a83812f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 11:03:47 +0100 -Subject: [PATCH 241/283] x86: Support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 241/290] x86: Support for lazy preemption +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=fb1ac4d42675612d93fdf5915eb3ee4e0a65b1b0 Implement the x86 pieces for lazy preempt. @@ -52,7 +51,7 @@ index 91676b0d2d4c..3b5e41d9b29d 100644 #ifdef ARCH_RT_DELAYS_SIGNAL_SEND diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S -index b5c2b1091b18..83d43eb2f556 100644 +index 8059d4fd915c..985988227877 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -766,8 +766,25 @@ END(ret_from_exception) @@ -82,10 +81,10 @@ index b5c2b1091b18..83d43eb2f556 100644 jz restore_all_kernel call preempt_schedule_irq diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S -index 7b29f2c10d01..23dda6f4a69f 100644 +index 7ffd83c57ef2..663a99f6320f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S -@@ -708,7 +708,23 @@ retint_kernel: +@@ -732,7 +732,23 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) btl $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) @@ -235,6 +234,3 @@ index 01de31db300d..ce1c5b9fbd8c 100644 /* TLB state for the entry code */ OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); --- -2.20.1 - diff --git a/debian/patches-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch b/debian/patches-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch index 8769217ff..0cb116c28 100644 --- a/debian/patches-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch +++ b/debian/patches-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch @@ -1,9 +1,8 @@ -From e9b59157aa2e702f58d3f7c6f1d6fbcef65dc75b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 18 Feb 2019 16:57:09 +0100 -Subject: [PATCH 242/283] x86: lazy-preempt: properly check against +Subject: [PATCH 242/290] x86: lazy-preempt: properly check against preempt-mask -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e87427920276b51f5f1d11c24ac3f23473907b66 should_resched() should check against preempt_offset after unmasking the need-resched-bit. Otherwise should_resched() won't work for @@ -28,6 +27,3 @@ index 22992c837795..f66708779274 100644 return false; if (current_thread_info()->preempt_lazy_count) return false; --- -2.20.1 - diff --git a/debian/patches-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch b/debian/patches-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch index 81056a01d..7daf6194f 100644 --- a/debian/patches-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch +++ b/debian/patches-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch @@ -1,9 +1,8 @@ -From fb5b75a4b989c8e039eb585a99ee0c41ae05664b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 14:53:49 +0100 -Subject: [PATCH 243/283] x86: lazy-preempt: use proper return label on +Subject: [PATCH 243/290] x86: lazy-preempt: use proper return label on 32bit-x86 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8df5629dc5f632aa3559f60a3e53de3615401003 The lazy-preempt uses the wrong return label in case preemption isn't possible. This results crash while returning to the kernel. @@ -17,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S -index 83d43eb2f556..0b25d2efdb87 100644 +index 985988227877..d880352e410c 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -775,15 +775,15 @@ ENTRY(resume_kernel) @@ -40,6 +39,3 @@ index 83d43eb2f556..0b25d2efdb87 100644 #endif testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all_kernel --- -2.20.1 - diff --git a/debian/patches-rt/0244-arm-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0244-arm-Add-support-for-lazy-preemption.patch index cba06055f..54e08f8d9 100644 --- a/debian/patches-rt/0244-arm-Add-support-for-lazy-preemption.patch +++ b/debian/patches-rt/0244-arm-Add-support-for-lazy-preemption.patch @@ -1,8 +1,7 @@ -From f83f8291ac518e18c94e68d86fe1639ba7b25a2f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 31 Oct 2012 12:04:11 +0100 -Subject: [PATCH 244/283] arm: Add support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 244/290] arm: Add support for lazy preemption +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7f6d40445a651d7c70aa060e018f584cd31dd3f3 Implement the arm pieces for lazy preempt. @@ -17,7 +16,7 @@ Signed-off-by: Thomas Gleixner 6 files changed, 33 insertions(+), 8 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index e122dd212ab3..9413ad933336 100644 +index c1cc28f0841f..26b1cdc94210 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -90,6 +90,7 @@ config ARM @@ -84,7 +83,7 @@ diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index e85a3af9ddeb..cc67c0a3ae7b 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S -@@ -216,11 +216,18 @@ __irq_svc: +@@ -216,11 +216,18 @@ ENDPROC(__dabt_svc) #ifdef CONFIG_PREEMPT ldr r8, [tsk, #TI_PREEMPT] @ get preempt count @@ -105,7 +104,7 @@ index e85a3af9ddeb..cc67c0a3ae7b 100644 #endif svc_exit r5, irq = 1 @ return from exception -@@ -235,8 +242,14 @@ svc_preempt: +@@ -235,8 +242,14 @@ ENDPROC(__irq_svc) 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED @@ -125,7 +124,7 @@ diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 746565a876dc..156e3ba4b319 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S -@@ -56,7 +56,9 @@ __ret_fast_syscall: +@@ -56,7 +56,9 @@ saved_pc .req lr cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing @@ -136,7 +135,7 @@ index 746565a876dc..156e3ba4b319 100644 bne fast_work_pending -@@ -93,8 +95,11 @@ __ret_fast_syscall: +@@ -93,8 +95,11 @@ ENDPROC(ret_fast_syscall) cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing @@ -163,6 +162,3 @@ index b908382b69ff..339fbc281cf1 100644 schedule(); } else { if (unlikely(!user_mode(regs))) --- -2.20.1 - diff --git a/debian/patches-rt/0245-powerpc-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0245-powerpc-Add-support-for-lazy-preemption.patch index 10e147614..38b6c851c 100644 --- a/debian/patches-rt/0245-powerpc-Add-support-for-lazy-preemption.patch +++ b/debian/patches-rt/0245-powerpc-Add-support-for-lazy-preemption.patch @@ -1,8 +1,7 @@ -From 47a223b448c07e6c574e14e44d912dcd27ca4803 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 10:14:11 +0100 -Subject: [PATCH 245/283] powerpc: Add support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 245/290] powerpc: Add support for lazy preemption +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=80e79c5148f7aa9295c560cd5640bb1455ce9f73 Implement the powerpc pieces for lazy preempt. @@ -77,7 +76,7 @@ index 3c0002044bc9..ce316076bc52 100644 /* Bits in local_flags */ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c -index 89cf15566c4e..1870c87fb22a 100644 +index 7c3738d890e8..d16fc87332f8 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -156,6 +156,7 @@ int main(void) @@ -92,7 +91,7 @@ diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 26b3f853cbf6..3783f3ef17a4 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S -@@ -888,7 +888,14 @@ resume_kernel: +@@ -888,7 +888,14 @@ user_exc_return: /* r10 contains MSR_KERNEL here */ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ bne restore andi. r8,r8,_TIF_NEED_RESCHED @@ -107,7 +106,7 @@ index 26b3f853cbf6..3783f3ef17a4 100644 lwz r3,_MSR(r1) andi. r0,r3,MSR_EE /* interrupts off? */ beq restore /* don't schedule if so */ -@@ -899,11 +906,11 @@ resume_kernel: +@@ -899,11 +906,11 @@ user_exc_return: /* r10 contains MSR_KERNEL here */ */ bl trace_hardirqs_off #endif @@ -122,7 +121,7 @@ index 26b3f853cbf6..3783f3ef17a4 100644 #ifdef CONFIG_TRACE_IRQFLAGS /* And now, to properly rebalance the above, we tell lockdep they * are being turned back on, which will happen when we return -@@ -1232,7 +1239,7 @@ global_dbcr0: +@@ -1232,7 +1239,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ do_work: /* r10 contains MSR_KERNEL here */ @@ -131,7 +130,7 @@ index 26b3f853cbf6..3783f3ef17a4 100644 beq do_user_signal do_resched: /* r10 contains MSR_KERNEL here */ -@@ -1253,7 +1260,7 @@ recheck: +@@ -1253,7 +1260,7 @@ do_resched: /* r10 contains MSR_KERNEL here */ MTMSRD(r10) /* disable interrupts */ CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_FLAGS(r9) @@ -162,7 +161,7 @@ index 7a46e0e57a36..7671fa5da9fa 100644 beq 2f bl restore_interrupts SCHEDULE_USER -@@ -830,10 +830,18 @@ resume_kernel: +@@ -830,10 +830,18 @@ _GLOBAL(ret_from_except_lite) #ifdef CONFIG_PREEMPT /* Check if we need to preempt */ @@ -182,7 +181,7 @@ index 7a46e0e57a36..7671fa5da9fa 100644 cmpwi cr0,r8,0 bne restore ld r0,SOFTE(r1) -@@ -850,7 +858,7 @@ resume_kernel: +@@ -850,7 +858,7 @@ _GLOBAL(ret_from_except_lite) /* Re-test flags and eventually loop */ CURRENT_THREAD_INFO(r9, r1) ld r4,TI_FLAGS(r9) @@ -191,6 +190,3 @@ index 7a46e0e57a36..7671fa5da9fa 100644 bne 1b /* --- -2.20.1 - diff --git a/debian/patches-rt/0246-arch-arm64-Add-lazy-preempt-support.patch b/debian/patches-rt/0246-arch-arm64-Add-lazy-preempt-support.patch index 7e41b42b0..462cba4dd 100644 --- a/debian/patches-rt/0246-arch-arm64-Add-lazy-preempt-support.patch +++ b/debian/patches-rt/0246-arch-arm64-Add-lazy-preempt-support.patch @@ -1,8 +1,7 @@ -From 283e7db367d829d84c78b83eb0099bc009fadcb4 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Thu, 14 May 2015 17:52:17 +0200 -Subject: [PATCH 246/283] arch/arm64: Add lazy preempt support -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 246/290] arch/arm64: Add lazy preempt support +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9f39bfabe46e4f1ac88719c5ae7e2a746e724d12 arm64 is missing support for PREEMPT_RT. The main feature which is lacking is support for lazy preemption. The arch-specific entry code, @@ -21,10 +20,10 @@ Signed-off-by: Anders Roxell 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 8790a29d0af4..4a4db69c5e9a 100644 +index 51fe21f5d078..9bf5be2d6024 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -140,6 +140,7 @@ config ARM64 +@@ -141,6 +141,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -84,10 +83,10 @@ index 92fba851ce53..844c71bc865b 100644 #ifdef CONFIG_ARM64_SW_TTBR0_PAN DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S -index 8556876c9109..d30ca1b304cd 100644 +index 5f800384cb9a..b582580c8c4c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S -@@ -623,11 +623,16 @@ el1_irq: +@@ -623,11 +623,16 @@ ENDPROC(el1_sync) #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count @@ -107,7 +106,7 @@ index 8556876c9109..d30ca1b304cd 100644 #endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on -@@ -641,6 +646,7 @@ el1_preempt: +@@ -641,6 +646,7 @@ ENDPROC(el1_irq) 1: bl preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? @@ -128,6 +127,3 @@ index 5dcc942906db..4fec251fe147 100644 /* Unmask Debug and SError for the next task */ local_daif_restore(DAIF_PROCCTX_NOIRQ); --- -2.20.1 - diff --git a/debian/patches-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/debian/patches-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch index a9926edc7..27eb21781 100644 --- a/debian/patches-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch +++ b/debian/patches-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch @@ -1,9 +1,8 @@ -From cb13c6a753442e63d2b8097a890892bdd36305f1 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 16 Oct 2016 05:11:54 +0200 -Subject: [PATCH 247/283] connector/cn_proc: Protect send_msg() with a local +Subject: [PATCH 247/290] connector/cn_proc: Protect send_msg() with a local lock on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f9aeb51c498452fe9350417739ef34eda7856189 |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931 |in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep @@ -69,6 +68,3 @@ index ad48fd52cb53..c5264b3ee0b0 100644 } void proc_fork_connector(struct task_struct *task) --- -2.20.1 - diff --git a/debian/patches-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch index 2c3f4350b..bce81feb2 100644 --- a/debian/patches-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch +++ b/debian/patches-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch @@ -1,9 +1,8 @@ -From 0a9138797894a06ad63067de77df6b2dcd97b61e Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 31 Mar 2016 04:08:28 +0200 -Subject: [PATCH 248/283] drivers/block/zram: Replace bit spinlocks with +Subject: [PATCH 248/290] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b12499320b2c305e2a859e4c1af089986c409311 They're nondeterministic, and lead to ___might_sleep() splats in -rt. OTOH, they're a lot less wasteful than an rtmutex per page. @@ -16,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 41 insertions(+) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c -index 70cbd0ee1b07..42de45ebfb43 100644 +index 76abe40bfa83..d6cf9508b80d 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -53,6 +53,40 @@ static size_t huge_class_size; @@ -68,7 +67,7 @@ index 70cbd0ee1b07..42de45ebfb43 100644 static inline bool init_done(struct zram *zram) { -@@ -901,6 +936,8 @@ static DEVICE_ATTR_RO(io_stat); +@@ -902,6 +937,8 @@ static DEVICE_ATTR_RO(io_stat); static DEVICE_ATTR_RO(mm_stat); static DEVICE_ATTR_RO(debug_stat); @@ -77,7 +76,7 @@ index 70cbd0ee1b07..42de45ebfb43 100644 static void zram_meta_free(struct zram *zram, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; -@@ -931,6 +968,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) +@@ -932,6 +969,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) if (!huge_class_size) huge_class_size = zs_huge_class_size(zram->mem_pool); @@ -99,6 +98,3 @@ index d1095dfdffa8..144e91061df8 100644 #ifdef CONFIG_ZRAM_MEMORY_TRACKING ktime_t ac_time; #endif --- -2.20.1 - diff --git a/debian/patches-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/debian/patches-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch index b71050cb0..cde398b9b 100644 --- a/debian/patches-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch +++ b/debian/patches-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch @@ -1,9 +1,8 @@ -From 3af8e8733e58687a2fe72ce5b8d47e458d09a9b5 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 20 Oct 2016 11:15:22 +0200 -Subject: [PATCH 249/283] drivers/zram: Don't disable preemption in +Subject: [PATCH 249/290] drivers/zram: Don't disable preemption in zcomp_stream_get/put() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b66c8d1eb51ea78d7b44b9a8a959b2539755ce20 In v4.7, the driver switched to percpu compression streams, disabling preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We @@ -67,10 +66,10 @@ index 41c1002a7d7d..d424eafcbf8e 100644 /* dynamic per-device compression frontend */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c -index 42de45ebfb43..ffa3e9d67571 100644 +index d6cf9508b80d..71520199226a 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c -@@ -1027,6 +1027,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, +@@ -1028,6 +1028,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, unsigned long handle; unsigned int size; void *src, *dst; @@ -78,7 +77,7 @@ index 42de45ebfb43..ffa3e9d67571 100644 if (zram_wb_enabled(zram)) { zram_slot_lock(zram, index); -@@ -1061,6 +1062,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, +@@ -1062,6 +1063,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, size = zram_get_obj_size(zram, index); @@ -86,7 +85,7 @@ index 42de45ebfb43..ffa3e9d67571 100644 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { dst = kmap_atomic(page); -@@ -1068,14 +1070,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, +@@ -1069,14 +1071,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, kunmap_atomic(dst); ret = 0; } else { @@ -102,6 +101,3 @@ index 42de45ebfb43..ffa3e9d67571 100644 zram_slot_unlock(zram, index); /* Should NEVER happen. Return bio error if it does. */ --- -2.20.1 - diff --git a/debian/patches-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch b/debian/patches-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch index 48f6ba6fb..69f7cebc8 100644 --- a/debian/patches-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch +++ b/debian/patches-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch @@ -1,9 +1,8 @@ -From 50839b1e639f5c139278123e369ebdb5066c325e Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 23 Aug 2017 11:57:29 +0200 -Subject: [PATCH 250/283] drivers/zram: fix zcomp_stream_get() +Subject: [PATCH 250/290] drivers/zram: fix zcomp_stream_get() smp_processor_id() use in preemptible code -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=59f018ada02ffa0ce88d91aa36f5482f9c64054b Use get_local_ptr() instead this_cpu_ptr() to avoid a warning regarding smp_processor_id() in preemptible code. @@ -39,6 +38,3 @@ index dd65a27ae2cc..eece02262000 100644 } int zcomp_compress(struct zcomp_strm *zstrm, --- -2.20.1 - diff --git a/debian/patches-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch b/debian/patches-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch index f1f9f9349..075f53895 100644 --- a/debian/patches-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch +++ b/debian/patches-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch @@ -1,8 +1,7 @@ -From fb66fc28332e7786f93815228991067bf2b7feff Mon Sep 17 00:00:00 2001 From: Haris Okanovic Date: Tue, 15 Aug 2017 15:13:08 -0500 -Subject: [PATCH 251/283] tpm_tis: fix stall after iowrite*()s -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 251/290] tpm_tis: fix stall after iowrite*()s +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b0b78c13c30994b736f07aa1d0545d6b484a4b18 ioread8() operations to TPM MMIO addresses can stall the cpu when immediately following a sequence of iowrite*()'s to the same region. @@ -79,6 +78,3 @@ index f08949a5f678..9fefcfcae593 100644 return 0; } --- -2.20.1 - diff --git a/debian/patches-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch b/debian/patches-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch index c20b8e779..d560b84bf 100644 --- a/debian/patches-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch +++ b/debian/patches-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch @@ -1,8 +1,7 @@ -From 4ebb1f9be0a95dc2c8bb4b0b1d465d5c395f49a6 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Fri, 28 Sep 2018 21:03:51 +0000 -Subject: [PATCH 252/283] watchdog: prevent deferral of watchdogd wakeup on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 252/290] watchdog: prevent deferral of watchdogd wakeup on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=4d37485d195e0b7f778a9556486f4b63c3acd32a When PREEMPT_RT_FULL is enabled, all hrtimer expiry functions are deferred for execution into the context of ktimersoftd unless otherwise @@ -76,6 +75,3 @@ index ffbdc4642ea5..84f75b5045f6 100644 else pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", wdd->id); --- -2.20.1 - diff --git a/debian/patches-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch b/debian/patches-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch index a72f3eeb0..81fabb245 100644 --- a/debian/patches-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch +++ b/debian/patches-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch @@ -1,9 +1,8 @@ -From 40e35f1fee95ea4707c63bde3f25068e2cc24cc5 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 27 Feb 2016 08:09:11 +0100 -Subject: [PATCH 253/283] drm,radeon,i915: Use preempt_disable/enable_rt() +Subject: [PATCH 253/290] drm,radeon,i915: Use preempt_disable/enable_rt() where recommended -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c97ee923e5c79bc31feaa16e0c6caef8245ed995 DRM folks identified the spots, so use them. @@ -56,6 +55,3 @@ index d8e2d7b3b836..072b831aaf4f 100644 /* Decode into vertical and horizontal scanout position. */ *vpos = position & 0x1fff; --- -2.20.1 - diff --git a/debian/patches-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch b/debian/patches-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch index 71ee6e425..acfc9a4f5 100644 --- a/debian/patches-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch +++ b/debian/patches-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch @@ -1,9 +1,8 @@ -From dc4f20a517dce3a46fc2bc46240fb2f9452ce1cd Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 27 Feb 2016 09:01:42 +0100 -Subject: [PATCH 254/283] drm,i915: Use local_lock/unlock_irq() in +Subject: [PATCH 254/290] drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=fc3cde47b35c9aa69eb666ca9238a16704a072bf [ 8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918 [ 8.014041] in_atomic(): 0, irqs_disabled(): 1, pid: 78, name: kworker/u4:4 @@ -123,6 +122,3 @@ index f7026e887fa9..07e4ddebdd80 100644 if (intel_vgpu_active(dev_priv)) return; --- -2.20.1 - diff --git a/debian/patches-rt/0255-drm-i915-disable-tracing-on-RT.patch b/debian/patches-rt/0255-drm-i915-disable-tracing-on-RT.patch index 2638cc47b..a74d42648 100644 --- a/debian/patches-rt/0255-drm-i915-disable-tracing-on-RT.patch +++ b/debian/patches-rt/0255-drm-i915-disable-tracing-on-RT.patch @@ -1,8 +1,7 @@ -From d884bf96bfbde9a4c7e1435aa0745e52891ae9fa Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 6 Dec 2018 09:52:20 +0100 -Subject: [PATCH 255/283] drm/i915: disable tracing on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 255/290] drm/i915: disable tracing on -RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=244c11638562d2a0d5cd78ece67c99293f93a069 Luca Abeni reported this: | BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003 @@ -42,6 +41,3 @@ index b50c6b829715..cc54ec0ef75c 100644 #include #include #include --- -2.20.1 - diff --git a/debian/patches-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/debian/patches-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch index 4a214c039..eeca0cb96 100644 --- a/debian/patches-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch +++ b/debian/patches-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch @@ -1,9 +1,8 @@ -From 59051415a63f69b8eb7e36f6e30119334967d711 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 19 Dec 2018 10:47:02 +0100 -Subject: [PATCH 256/283] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with +Subject: [PATCH 256/290] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with NOTRACE -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=847acb068749101352be88fb869b853c5c5c9150 The order of the header files is important. If this header file is included after tracepoint.h was included then the NOTRACE here becomes a @@ -28,6 +27,3 @@ index cc54ec0ef75c..33028d8f470e 100644 DEFINE_EVENT(i915_request, i915_request_submit, TP_PROTO(struct i915_request *rq), TP_ARGS(rq) --- -2.20.1 - diff --git a/debian/patches-rt/0257-cgroups-use-simple-wait-in-css_release.patch b/debian/patches-rt/0257-cgroups-use-simple-wait-in-css_release.patch index 014a2aac5..3b21e61ef 100644 --- a/debian/patches-rt/0257-cgroups-use-simple-wait-in-css_release.patch +++ b/debian/patches-rt/0257-cgroups-use-simple-wait-in-css_release.patch @@ -1,8 +1,7 @@ -From bd17886ee0bbb3d61ca16373b799a2e9f4f71fb3 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 13 Feb 2015 15:52:24 +0100 -Subject: [PATCH 257/283] cgroups: use simple wait in css_release() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 257/290] cgroups: use simple wait in css_release() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=0e80b9d1ee1339d795d635a353e51fbbefad6ed5 To avoid: |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 @@ -35,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h -index a6090154b2ab..46a706e2ba35 100644 +index a01ebb630abc..34fb541e90be 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -20,6 +20,7 @@ @@ -55,10 +54,10 @@ index a6090154b2ab..46a706e2ba35 100644 /* diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c -index 81441117f611..7b536796daf8 100644 +index 78ef274b036e..30dd5754e62e 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c -@@ -4628,10 +4628,10 @@ static void css_free_rwork_fn(struct work_struct *work) +@@ -4666,10 +4666,10 @@ static void css_free_rwork_fn(struct work_struct *work) } } @@ -71,7 +70,7 @@ index 81441117f611..7b536796daf8 100644 struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; -@@ -4693,8 +4693,8 @@ static void css_release(struct percpu_ref *ref) +@@ -4731,8 +4731,8 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); @@ -82,7 +81,7 @@ index 81441117f611..7b536796daf8 100644 } static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5420,6 +5420,7 @@ static int __init cgroup_wq_init(void) +@@ -5458,6 +5458,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); @@ -90,6 +89,3 @@ index 81441117f611..7b536796daf8 100644 return 0; } core_initcall(cgroup_wq_init); --- -2.20.1 - diff --git a/debian/patches-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch index 83da24197..2bd2a7072 100644 --- a/debian/patches-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch @@ -1,8 +1,7 @@ -From ca3c9be1f8eb136f8c37c87c0eb8a528ebbbba72 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 8 Jan 2017 09:32:25 +0100 -Subject: [PATCH 258/283] cpuset: Convert callback_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 258/290] cpuset: Convert callback_lock to raw_spinlock_t +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b929bcca97d17ac7c7b56351098a97c990818c3b The two commits below add up to a cpuset might_sleep() splat for RT: @@ -288,6 +287,3 @@ index 7bb129c5b412..92575cb9b493 100644 return allowed; } --- -2.20.1 - diff --git a/debian/patches-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch b/debian/patches-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch index a9df89bad..d7fefe861 100644 --- a/debian/patches-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch +++ b/debian/patches-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch @@ -1,8 +1,7 @@ -From 304bb4fbcbaaf44ce6ea8b7605d79c64d72c47df Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 11 Oct 2017 17:43:49 +0200 -Subject: [PATCH 259/283] apparmor: use a locallock instead preempt_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 259/290] apparmor: use a locallock instead preempt_disable() +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=32350fce319d81f4b6f402844b50d789ebb92796 get_buffers() disables preemption which acts as a lock for the per-CPU variable. Since we can't disable preemption here on RT, a local_lock is @@ -80,6 +79,3 @@ index 8b8b70620bbe..8330ef57a784 100644 /* * LSM hook functions --- -2.20.1 - diff --git a/debian/patches-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch b/debian/patches-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch index 2ee749084..86d871639 100644 --- a/debian/patches-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch +++ b/debian/patches-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch @@ -1,8 +1,7 @@ -From 9d56ef1ce2d305509eb734d96c2bd1799ef7314f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 27 Jun 2014 16:24:52 +0200 -Subject: [PATCH 260/283] workqueue: Prevent deadlock/stall on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 260/290] workqueue: Prevent deadlock/stall on RT +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=2570b4e1c83da05d76ee298401f0e4c4d46428a1 Austin reported a XFS deadlock/stall on RT where scheduled work gets never exececuted and tasks are waiting for each other for ever. @@ -43,10 +42,10 @@ Cc: Steven Rostedt 2 files changed, 51 insertions(+), 15 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 7831756f2097..91a9b2556fb0 100644 +index 538461ee8ebd..06abce7fef9e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3569,9 +3569,8 @@ void __noreturn do_task_dead(void) +@@ -3610,9 +3610,8 @@ void __noreturn do_task_dead(void) static inline void sched_submit_work(struct task_struct *tsk) { @@ -57,7 +56,7 @@ index 7831756f2097..91a9b2556fb0 100644 /* * If a worker went to sleep, notify and ask workqueue whether * it wants to wake up a task to maintain concurrency. -@@ -3585,6 +3584,9 @@ static inline void sched_submit_work(struct task_struct *tsk) +@@ -3626,6 +3625,9 @@ static inline void sched_submit_work(struct task_struct *tsk) preempt_enable_no_resched(); } @@ -202,6 +201,3 @@ index aa39924bd3b5..12137825bf5a 100644 worker->flags |= WORKER_DIE; wake_up_process(worker->task); } --- -2.20.1 - diff --git a/debian/patches-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch index ac308a129..71adbcf7d 100644 --- a/debian/patches-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ b/debian/patches-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -1,8 +1,7 @@ -From 0eec97926a2deb4ccc517f574a40578bddc19cd7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:56 -0500 -Subject: [PATCH 261/283] signals: Allow rt tasks to cache one sigqueue struct -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 261/290] signals: Allow rt tasks to cache one sigqueue struct +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f72cd03b9416b342bf265ecdecb9c1071a6d0438 To avoid allocation allow rt tasks to cache one sigqueue struct in task struct. @@ -17,7 +16,7 @@ Signed-off-by: Thomas Gleixner 5 files changed, 70 insertions(+), 5 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index dd95bd64504e..c342fa06ab99 100644 +index d2386fa9ed0f..3c213ec3d3b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -895,6 +895,8 @@ struct task_struct { @@ -42,7 +41,7 @@ index e4d01469ed60..746dd5d28c54 100644 /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/kernel/exit.c b/kernel/exit.c -index 5c0964dc805a..47d4161d1104 100644 +index e10de9836dd7..ac4ca468db84 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -160,7 +160,7 @@ static void __exit_signal(struct task_struct *tsk) @@ -55,7 +54,7 @@ index 5c0964dc805a..47d4161d1104 100644 spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c -index 96297e71019c..aa4905338ff4 100644 +index 652986ccb41c..6c4df21994f1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1827,6 +1827,7 @@ static __latent_entropy struct task_struct *copy_process( @@ -208,6 +207,3 @@ index 56edb0580a3a..ac32b4f41d24 100644 if (q) q->flags |= SIGQUEUE_PREALLOC; --- -2.20.1 - diff --git a/debian/patches-rt/0262-Add-localversion-for-RT-release.patch b/debian/patches-rt/0262-Add-localversion-for-RT-release.patch index 7e4be4cc9..dac47c4ee 100644 --- a/debian/patches-rt/0262-Add-localversion-for-RT-release.patch +++ b/debian/patches-rt/0262-Add-localversion-for-RT-release.patch @@ -1,8 +1,7 @@ -From 1d605947258bfd1a27d7d2d155e7466da763ffd2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Jul 2011 20:25:16 +0200 -Subject: [PATCH 262/283] Add localversion for -RT release -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 262/290] Add localversion for -RT release +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=93c0abfa50932dbc3f5c7a3a9a1043396d09aec2 Signed-off-by: Thomas Gleixner --- @@ -17,6 +16,3 @@ index 000000000000..1199ebade17b +++ b/localversion-rt @@ -0,0 +1 @@ +-rt16 --- -2.20.1 - diff --git a/debian/patches-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch b/debian/patches-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch index 71b1a6fc8..7155c5200 100644 --- a/debian/patches-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch +++ b/debian/patches-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch @@ -1,9 +1,8 @@ -From e3dd55f0a687316bb52bce0835070079b26a21a2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Mar 2019 18:31:54 +0100 -Subject: [PATCH 263/283] powerpc/pseries/iommu: Use a locallock instead +Subject: [PATCH 263/290] powerpc/pseries/iommu: Use a locallock instead local_irq_save() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=45852db84c605cfc7fffb1df575cea2315523540 The locallock protects the per-CPU variable tce_page. The function attempts to allocate memory while tce_page is protected (by disabling @@ -92,6 +91,3 @@ index 06f02960b439..d80d919c78d3 100644 return rc; } --- -2.20.1 - diff --git a/debian/patches-rt/0264-powerpc-reshuffle-TIF-bits.patch b/debian/patches-rt/0264-powerpc-reshuffle-TIF-bits.patch index db37d3e74..a7a5ccc0b 100644 --- a/debian/patches-rt/0264-powerpc-reshuffle-TIF-bits.patch +++ b/debian/patches-rt/0264-powerpc-reshuffle-TIF-bits.patch @@ -1,8 +1,7 @@ -From 76ccb7ba434581a4fa6338c0451f3f0752582520 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 22 Mar 2019 17:15:58 +0100 -Subject: [PATCH 264/283] powerpc: reshuffle TIF bits -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 264/290] powerpc: reshuffle TIF bits +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=02293a0f686ec8fb1f177452b08491a6a6fecbb3 Powerpc32/64 does not compile because TIF_SYSCALL_TRACE's bit is higher than 15 and the assembly instructions don't expect that. @@ -60,7 +59,7 @@ diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3783f3ef17a4..44bcf1585bd1 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S -@@ -393,7 +393,9 @@ ret_from_syscall: +@@ -393,7 +393,9 @@ _GLOBAL(DoSyscall) MTMSRD(r10) lwz r9,TI_FLAGS(r12) li r8,-MAX_ERRNO @@ -71,7 +70,7 @@ index 3783f3ef17a4..44bcf1585bd1 100644 bne- syscall_exit_work cmplw 0,r3,r8 blt+ syscall_exit_cont -@@ -511,13 +513,13 @@ syscall_dotrace: +@@ -511,13 +513,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) b syscall_dotrace_cont syscall_exit_work: @@ -87,7 +86,7 @@ index 3783f3ef17a4..44bcf1585bd1 100644 bne- 1f lwz r11,_CCR(r1) /* Load CR */ neg r3,r3 -@@ -526,12 +528,12 @@ syscall_exit_work: +@@ -526,12 +528,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) 1: stw r6,RESULT(r1) /* Save result */ stw r3,GPR3(r1) /* Update return value */ @@ -106,7 +105,7 @@ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 7671fa5da9fa..fe713d014220 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S -@@ -250,7 +250,9 @@ system_call_exit: +@@ -250,7 +250,9 @@ system_call: /* label this so stack traces look sane */ ld r9,TI_FLAGS(r12) li r11,-MAX_ERRNO @@ -147,6 +146,3 @@ index 7671fa5da9fa..fe713d014220 100644 addi r12,r12,TI_FLAGS 3: ldarx r10,0,r12 andc r10,r10,r11 --- -2.20.1 - diff --git a/debian/patches-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch index a13e5b9a5..d4f781fb8 100644 --- a/debian/patches-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch @@ -1,8 +1,7 @@ -From f264ec0373b575d16c232921fab39a5d7a6a3c21 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Wed, 13 Mar 2019 11:40:34 +0000 -Subject: [PATCH 265/283] tty/sysrq: Convert show_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 265/290] tty/sysrq: Convert show_lock to raw_spinlock_t +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a537a158fe895cd0a7726b68e73237244cdaa7bb Systems which don't provide arch_trigger_cpumask_backtrace() will invoke showacpu() from a smp_call_function() function which is invoked @@ -58,6 +57,3 @@ index 06ed20dd01ba..627517ad55bf 100644 } static void sysrq_showregs_othercpus(struct work_struct *dummy) --- -2.20.1 - diff --git a/debian/patches-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch b/debian/patches-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch index ab981e38a..418e3ddd8 100644 --- a/debian/patches-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch +++ b/debian/patches-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch @@ -1,9 +1,8 @@ -From da8db6d203d29fd33215f60a1948967f2fe76386 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 10 Apr 2019 11:01:37 +0200 -Subject: [PATCH 266/283] drm/i915: Don't disable interrupts independently of +Subject: [PATCH 266/290] drm/i915: Don't disable interrupts independently of the lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1fd04accf7d1da34be2d03a8cc9db0ba68c8cf9f The locks (timeline->lock and rq->lock) need to be taken with disabled interrupts. This is done in __retire_engine_request() by disabling the @@ -46,6 +45,3 @@ index 5c2c93cbab12..7124510b9131 100644 /* * The backing object for the context is done after switching to the --- -2.20.1 - diff --git a/debian/patches-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch b/debian/patches-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch index d5d7b7a07..5fa81f8c1 100644 --- a/debian/patches-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch +++ b/debian/patches-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch @@ -1,9 +1,8 @@ -From 2de445a925a0c92865a6308ce0e1587ebc250ccb Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Thu, 9 May 2019 14:33:20 -0500 -Subject: [PATCH 267/283] sched/completion: Fix a lockup in +Subject: [PATCH 267/290] sched/completion: Fix a lockup in wait_for_completion() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3a62fc3bb6b7ef809d6b55dfb8ef3563c5216df2 Consider following race: @@ -64,6 +63,3 @@ index 755a58084978..49c14137988e 100644 __set_current_state(state); raw_spin_unlock_irq(&x->wait.lock); timeout = action(timeout); --- -2.20.1 - diff --git a/debian/patches-rt/0268-kthread-add-a-global-worker-thread.patch b/debian/patches-rt/0268-kthread-add-a-global-worker-thread.patch index 781442887..4d5232f3e 100644 --- a/debian/patches-rt/0268-kthread-add-a-global-worker-thread.patch +++ b/debian/patches-rt/0268-kthread-add-a-global-worker-thread.patch @@ -1,8 +1,7 @@ -From 59fa1a99358d2859eff36b2531fed5a219437ad5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 12 Feb 2019 15:09:38 +0100 -Subject: [PATCH 268/283] kthread: add a global worker thread. -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 268/290] kthread: add a global worker thread. +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9300291e699568258ebbabb1bbb8722cefaaa542 [ Upstream commit 0532e87d9d44795221aa921ba7024bde689cc894 ] @@ -29,7 +28,7 @@ Signed-off-by: Steven Rostedt (VMware) create mode 100644 include/linux/kthread-cgroup.h diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index f1e63eb7cbca..aa76c816dbb4 100644 +index 126c2c514673..b736cb6c6228 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -70,7 +70,7 @@ @@ -175,6 +174,3 @@ index 5641b55783a6..9db017761a1f 100644 #ifdef CONFIG_BLK_CGROUP /** * kthread_associate_blkcg - associate blkcg to current kthread --- -2.20.1 - diff --git a/debian/patches-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch index 353c05277..be9bc6e8b 100644 --- a/debian/patches-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch +++ b/debian/patches-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch @@ -1,9 +1,8 @@ -From 1c00699faf9b42d9e365cb0a9c4f701c72089d90 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 21 Aug 2013 17:48:46 +0200 -Subject: [PATCH 269/283] genirq: Do not invoke the affinity callback via a +Subject: [PATCH 269/290] genirq: Do not invoke the affinity callback via a workqueue on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=8eeb2b164aeb9df1434b2bd622a69c654c4e5fd4 [ Upstream commit 2122adbe011cdc0eb62ad62494e181005b23c76a ] @@ -50,10 +49,10 @@ index 72333899f043..a9321f6429f2 100644 struct work_struct work; #endif diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 7f4041357d2f..381305c48a0a 100644 +index ce86341a9e19..d5539e04e00a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -261,7 +261,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, +@@ -287,7 +287,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, kref_get(&desc->affinity_notify->kref); #ifdef CONFIG_PREEMPT_RT_BASE @@ -62,7 +61,7 @@ index 7f4041357d2f..381305c48a0a 100644 #else schedule_work(&desc->affinity_notify->work); #endif -@@ -326,21 +326,11 @@ static void _irq_affinity_notify(struct irq_affinity_notify *notify) +@@ -352,21 +352,11 @@ static void _irq_affinity_notify(struct irq_affinity_notify *notify) } #ifdef CONFIG_PREEMPT_RT_BASE @@ -86,7 +85,7 @@ index 7f4041357d2f..381305c48a0a 100644 _irq_affinity_notify(notify); } -@@ -383,8 +373,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +@@ -409,8 +399,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) notify->irq = irq; kref_init(¬ify->kref); #ifdef CONFIG_PREEMPT_RT_BASE @@ -96,6 +95,3 @@ index 7f4041357d2f..381305c48a0a 100644 #else INIT_WORK(¬ify->work, irq_affinity_notify); #endif --- -2.20.1 - diff --git a/debian/patches-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch b/debian/patches-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch index 1a82d7a6f..72c9600ad 100644 --- a/debian/patches-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch +++ b/debian/patches-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch @@ -1,9 +1,8 @@ -From 1f38c3b121feba50524996f2cceb92eb47269e52 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 28 May 2019 10:42:15 +0200 -Subject: [PATCH 270/283] genirq: Handle missing work_struct in +Subject: [PATCH 270/290] genirq: Handle missing work_struct in irq_set_affinity_notifier() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a03f41214e2dccb4d558c97bee856d5ba751de70 [ Upstream commit bbc4d2a7d6ff54ba923640d9a42c7bef7185fe98 ] @@ -22,10 +21,10 @@ Signed-off-by: Steven Rostedt (VMware) 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 381305c48a0a..b2736d7d863b 100644 +index d5539e04e00a..290cd520dba1 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -385,8 +385,9 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +@@ -411,8 +411,9 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) raw_spin_unlock_irqrestore(&desc->lock, flags); if (old_notify) { @@ -37,6 +36,3 @@ index 381305c48a0a..b2736d7d863b 100644 cancel_work_sync(&old_notify->work); #endif kref_put(&old_notify->kref, old_notify->release); --- -2.20.1 - diff --git a/debian/patches-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch b/debian/patches-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch index 3f7c6388a..0d00cb8c7 100644 --- a/debian/patches-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch +++ b/debian/patches-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch @@ -1,8 +1,7 @@ -From 17d57bbeda05b3bfe93276333b7b1db0958796d7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 14 May 2019 17:07:44 +0200 -Subject: [PATCH 271/283] arm: imx6: cpuidle: Use raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 271/290] arm: imx6: cpuidle: Use raw_spinlock_t +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a2d8fe8a09c2bd45f501d347d8fd685bc4e253f9 [ Upstream commit 40d0332ec8312e9c090f0a5414d9c90e12b13611 ] @@ -47,6 +46,3 @@ index 326e870d7123..d9ac80aa1eb0 100644 return index; } --- -2.20.1 - diff --git a/debian/patches-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch b/debian/patches-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch index 91498b208..a9b10e8ac 100644 --- a/debian/patches-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch +++ b/debian/patches-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch @@ -1,9 +1,8 @@ -From 8f756e565320c9c32ed9f5e964f03f939dd38379 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 24 Jun 2019 18:29:13 +0200 -Subject: [PATCH 272/283] rcu: Don't allow to change rcu_normal_after_boot on +Subject: [PATCH 272/290] rcu: Don't allow to change rcu_normal_after_boot on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d907b0f62c7449b966af5924b51a0f409580d1b9 [ Upstream commit c6c058c10577815a2491ce661876cff00a4c3b15 ] @@ -31,6 +30,3 @@ index 16d8dba23329..ed75addd3ccd 100644 #endif /* #ifndef CONFIG_TINY_RCU */ #ifdef CONFIG_DEBUG_LOCK_ALLOC --- -2.20.1 - diff --git a/debian/patches-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch b/debian/patches-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch index 13c6de4a6..41f1e1029 100644 --- a/debian/patches-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch +++ b/debian/patches-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch @@ -1,8 +1,7 @@ -From 652a25fea4d424e12f2f3ad7bcf5e30f6a6efa3d Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Sat, 13 Apr 2019 11:22:51 +0800 -Subject: [PATCH 273/283] pci/switchtec: fix stream_open.cocci warnings -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 273/290] pci/switchtec: fix stream_open.cocci warnings +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=163db421a90c12f33a0d3033405d56f8be864abb [ Upstream commit 9462c69e29307adc95c289f50839d5d683973891 ] @@ -35,6 +34,3 @@ index 77d4fb86d05b..ea70bc0b06e9 100644 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); --- -2.20.1 - diff --git a/debian/patches-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch b/debian/patches-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch index 1bdd42cb5..70fbac4b0 100644 --- a/debian/patches-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch +++ b/debian/patches-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch @@ -1,8 +1,7 @@ -From 13885eb4d49323964f1067b3f1b9f6503f07e29e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 24 Jun 2019 19:33:16 +0200 -Subject: [PATCH 274/283] sched/core: Drop a preempt_disable_rt() statement -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 274/290] sched/core: Drop a preempt_disable_rt() statement +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=5f5a0dcfddf4d7c1d2b7c1380024986d3bbfccb7 [ Upstream commit 761126efdcbe3fa3e99c9079fa0ad6eca2f251f2 ] @@ -16,7 +15,7 @@ Signed-off-by: Steven Rostedt (VMware) 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 91a9b2556fb0..1b2503b87473 100644 +index 06abce7fef9e..fab44ac4e1dd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -558,14 +558,11 @@ void resched_cpu(int cpu) @@ -45,6 +44,3 @@ index 91a9b2556fb0..1b2503b87473 100644 return cpu; } --- -2.20.1 - diff --git a/debian/patches-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch b/debian/patches-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch index bd3db81a7..2a5b3383c 100644 --- a/debian/patches-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch +++ b/debian/patches-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch @@ -1,9 +1,8 @@ -From dc0135054565b6bea6821540271adc3643f1098c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 24 Jun 2019 19:39:06 +0200 -Subject: [PATCH 275/283] timers: Redo the notification of canceling timers on +Subject: [PATCH 275/290] timers: Redo the notification of canceling timers on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=fa4d144249a94725f03c3af4680ea73861dbf2ff [ Upstream commit c71273154c2ad12e13333aada340ff30e826a11b ] @@ -136,7 +135,7 @@ index 0571b498db73..3e6c91bdf2ef 100644 /* diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index 966708e8ce14..efa1e433974b 100644 +index 1d1f077cffb3..61ab2c923579 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -436,7 +436,7 @@ int alarm_cancel(struct alarm *alarm) @@ -242,10 +241,10 @@ index 55b0e58368bf..a5ff222df4c7 100644 } expires = timeval_to_ktime(value->it_value); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c -index baeeaef3b721..59ceedbb03f0 100644 +index 8d95e8de98b2..765e700962ab 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c -@@ -789,6 +789,7 @@ check_timers_list(struct list_head *timers, +@@ -792,6 +792,7 @@ check_timers_list(struct list_head *timers, return t->expires; t->firing = 1; @@ -253,7 +252,7 @@ index baeeaef3b721..59ceedbb03f0 100644 list_move_tail(&t->entry, firing); } -@@ -1134,6 +1135,20 @@ static inline int fastpath_timer_check(struct task_struct *tsk) +@@ -1138,6 +1139,20 @@ static inline int fastpath_timer_check(struct task_struct *tsk) return 0; } @@ -274,7 +273,7 @@ index baeeaef3b721..59ceedbb03f0 100644 /* * This is called from the timer interrupt handler. The irq handler has * already updated our counts. We need to check if any timers fire now. -@@ -1144,6 +1159,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) +@@ -1148,6 +1163,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) LIST_HEAD(firing); struct k_itimer *timer, *next; unsigned long flags; @@ -282,7 +281,7 @@ index baeeaef3b721..59ceedbb03f0 100644 /* * The fast path checks that there are no expired thread or thread -@@ -1152,6 +1168,9 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) +@@ -1156,6 +1172,9 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) if (!fastpath_timer_check(tsk)) return; @@ -292,7 +291,7 @@ index baeeaef3b721..59ceedbb03f0 100644 if (!lock_task_sighand(tsk, &flags)) return; /* -@@ -1186,6 +1205,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) +@@ -1190,6 +1209,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) list_del_init(&timer->it.cpu.entry); cpu_firing = timer->it.cpu.firing; timer->it.cpu.firing = 0; @@ -300,7 +299,7 @@ index baeeaef3b721..59ceedbb03f0 100644 /* * The firing flag is -1 if we collided with a reset * of the timer, which already reported this -@@ -1195,6 +1215,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) +@@ -1199,6 +1219,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) cpu_timer_fire(timer); spin_unlock(&timer->it_lock); } @@ -308,7 +307,7 @@ index baeeaef3b721..59ceedbb03f0 100644 } #ifdef CONFIG_PREEMPT_RT_BASE -@@ -1460,6 +1481,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, +@@ -1466,6 +1487,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, spin_unlock_irq(&timer.it_lock); while (error == TIMER_RETRY) { @@ -461,7 +460,7 @@ index ddb21145211a..725bd230a8db 100644 void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting); diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index 781483c76b17..d6289d8df06b 100644 +index 2fcd56aa6092..1c67aab34ff5 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -44,7 +44,6 @@ @@ -617,7 +616,7 @@ index 781483c76b17..d6289d8df06b 100644 raw_spin_lock_irq(&base->lock); } } -@@ -1696,6 +1693,7 @@ static inline void __run_timers(struct timer_base *base) +@@ -1698,6 +1695,7 @@ static inline void __run_timers(struct timer_base *base) if (!time_after_eq(jiffies, base->clk)) return; @@ -625,7 +624,7 @@ index 781483c76b17..d6289d8df06b 100644 raw_spin_lock_irq(&base->lock); /* -@@ -1723,7 +1721,7 @@ static inline void __run_timers(struct timer_base *base) +@@ -1725,7 +1723,7 @@ static inline void __run_timers(struct timer_base *base) expire_timers(base, heads + levels); } raw_spin_unlock_irq(&base->lock); @@ -634,7 +633,7 @@ index 781483c76b17..d6289d8df06b 100644 } /* -@@ -1970,9 +1968,7 @@ static void __init init_timer_cpu(int cpu) +@@ -1972,9 +1970,7 @@ static void __init init_timer_cpu(int cpu) base->cpu = cpu; raw_spin_lock_init(&base->lock); base->clk = jiffies; @@ -645,6 +644,3 @@ index 781483c76b17..d6289d8df06b 100644 } } --- -2.20.1 - diff --git a/debian/patches-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch b/debian/patches-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch index d631ab169..81cf91fd9 100644 --- a/debian/patches-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch +++ b/debian/patches-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch @@ -1,9 +1,8 @@ -From d6630ac9f4bcf1f8fd51923ea266c42e87f9d312 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 17:44:13 +0200 -Subject: [PATCH 276/283] Revert "futex: Ensure lock/unlock symetry versus +Subject: [PATCH 276/290] Revert "futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock" -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=cf2d016887fc80fdc7b517711849d31904430e8a [ Upstream commit 6a773b70cf105b46298ed3b44e77c102ce31d9ec ] @@ -30,6 +29,3 @@ index fe90164aa6ec..a58af833bb77 100644 put_pi_state(pi_state); continue; } --- -2.20.1 - diff --git a/debian/patches-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch b/debian/patches-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch index b0a52f50d..34f212564 100644 --- a/debian/patches-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch +++ b/debian/patches-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch @@ -1,9 +1,8 @@ -From 9848e5129d0d928c27247ab10835cdfb2948bd60 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 17:44:18 +0200 -Subject: [PATCH 277/283] Revert "futex: Fix bug on when a requeued RT task +Subject: [PATCH 277/290] Revert "futex: Fix bug on when a requeued RT task times out" -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=127082c7aefaa73f80f77516af1fdd5037660c56 [ Upstream commit f1a170cb3289a48df26cae3c60d77608f7a988bb ] @@ -78,6 +77,3 @@ index 546aaf058b9e..a501f3b47081 100644 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, --- -2.20.1 - diff --git a/debian/patches-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch b/debian/patches-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch index aca8a4daf..4d72d2af2 100644 --- a/debian/patches-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch +++ b/debian/patches-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch @@ -1,9 +1,8 @@ -From 0c3cb70fa05bdcb4ff249079bab804e098149371 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 17:44:21 +0200 -Subject: [PATCH 278/283] Revert "rtmutex: Handle the various new futex race +Subject: [PATCH 278/290] Revert "rtmutex: Handle the various new futex race conditions" -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=1d0a4cd097d5b50ac88a0cc66b53947112296d4b [ Upstream commit 9e0265c21af4d6388d47dcd5ce20f76ec3a2e468 ] @@ -253,6 +252,3 @@ index a501f3b47081..758dc43872e5 100644 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); --- -2.20.1 - diff --git a/debian/patches-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch b/debian/patches-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch index 4f5ffa9a7..cbc70f116 100644 --- a/debian/patches-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch +++ b/debian/patches-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch @@ -1,9 +1,8 @@ -From a4c6efd8adfd48a378c579bb7bc2c1a3162ced7f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 17:44:27 +0200 -Subject: [PATCH 279/283] Revert "futex: workaround migrate_disable/enable in +Subject: [PATCH 279/290] Revert "futex: workaround migrate_disable/enable in different context" -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=bdc746779fff787b4f505ee627699bc93b6446f0 [ Upstream commit a71221d81cc4873891ae44f3aa02df596079b786 ] @@ -65,6 +64,3 @@ index 1d9423914bf4..54ffc25183ed 100644 put_pi_state(pi_state); /* --- -2.20.1 - diff --git a/debian/patches-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch b/debian/patches-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch index 25335e04e..4596bdb15 100644 --- a/debian/patches-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch +++ b/debian/patches-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch @@ -1,8 +1,7 @@ -From b3425a889b12c0166d2eb44b8eaab8af20a73a22 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 11:59:44 +0200 -Subject: [PATCH 280/283] futex: Make the futex_hash_bucket lock raw -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 280/290] futex: Make the futex_hash_bucket lock raw +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b1ed021119af7d543fc60797dcb48f365e579d40 [ Upstream commit f646521aadedab78801c9befe193e2e8a0c99298 ] @@ -344,6 +343,3 @@ index 54ffc25183ed..b02d9969330b 100644 } return 0; --- -2.20.1 - diff --git a/debian/patches-rt/0281-futex-Delay-deallocation-of-pi_state.patch b/debian/patches-rt/0281-futex-Delay-deallocation-of-pi_state.patch index 4db7f1857..6f71ffbe2 100644 --- a/debian/patches-rt/0281-futex-Delay-deallocation-of-pi_state.patch +++ b/debian/patches-rt/0281-futex-Delay-deallocation-of-pi_state.patch @@ -1,8 +1,7 @@ -From 640583a34e7d86efdc8dc382f0567af929cd2c94 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 26 Jun 2019 13:35:36 +0200 -Subject: [PATCH 281/283] futex: Delay deallocation of pi_state -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Subject: [PATCH 281/290] futex: Delay deallocation of pi_state +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=49d0177d9457d5012cea7a5fa82bd1cb01ab319a [ Upstream commit d7c7cf8cb68b7df17e6e50be1f25f35d83e686c7 ] @@ -178,6 +177,3 @@ index b02d9969330b..688b6fcb79cb 100644 } } else { struct rt_mutex *pi_mutex; --- -2.20.1 - diff --git a/debian/patches-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch b/debian/patches-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch index 9fe262666..1b798af67 100644 --- a/debian/patches-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch +++ b/debian/patches-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch @@ -1,9 +1,8 @@ -From 6c3c5543f7820dca5be5c020721d30a34374ccf9 Mon Sep 17 00:00:00 2001 From: "Luis Claudio R. Goncalves" Date: Tue, 25 Jun 2019 11:28:04 -0300 -Subject: [PATCH 282/283] mm/zswap: Do not disable preemption in +Subject: [PATCH 282/290] mm/zswap: Do not disable preemption in zswap_frontswap_store() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=462f06ccfcce395acd6e89f8b9d9a91bd924750c [ Upstream commit 4e4cf4be79635e67144632d9135286381acbc95a ] @@ -122,6 +121,3 @@ index cd91fd9d96b8..420225d3ff0b 100644 zswap_pool_put(entry->pool); freepage: zswap_entry_cache_free(entry); --- -2.20.1 - diff --git a/debian/patches-rt/0283-Linux-4.19.59-rt24-REBASE.patch b/debian/patches-rt/0283-Linux-4.19.59-rt24-REBASE.patch deleted file mode 100644 index b7272f17c..000000000 --- a/debian/patches-rt/0283-Linux-4.19.59-rt24-REBASE.patch +++ /dev/null @@ -1,20 +0,0 @@ -From bfbf9b36800f945fc7bd6bf934e65b59831aa03d Mon Sep 17 00:00:00 2001 -From: "Steven Rostedt (VMware)" -Date: Fri, 19 Jul 2019 17:46:46 -0400 -Subject: [PATCH 283/283] Linux 4.19.59-rt24 REBASE -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.59-rt24.tar.xz - ---- - localversion-rt | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/localversion-rt b/localversion-rt -index 1199ebade17b..b2111a212663 100644 ---- a/localversion-rt -+++ b/localversion-rt -@@ -1 +1 @@ ---rt16 -+-rt24 --- -2.20.1 - diff --git a/debian/patches-rt/0283-revert-aio.patch b/debian/patches-rt/0283-revert-aio.patch new file mode 100644 index 000000000..c066985e3 --- /dev/null +++ b/debian/patches-rt/0283-revert-aio.patch @@ -0,0 +1,67 @@ +From: "Steven Rostedt (VMware)" +Date: Fri, 20 Sep 2019 17:50:53 -0400 +Subject: [PATCH 283/290] revert-aio +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f2fbada945e3ef792e7e82ba32e222e752a0a88a + +revert: fs/aio: simple simple work + +Signed-off-by: Steven Rostedt (VMware) +--- + fs/aio.c | 15 ++------------- + 1 file changed, 2 insertions(+), 13 deletions(-) + +diff --git a/fs/aio.c b/fs/aio.c +index 16dcf8521c2c..911e23087dfb 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -42,7 +42,6 @@ + #include + #include + #include +-#include + + #include + #include +@@ -122,7 +121,6 @@ struct kioctx { + long nr_pages; + + struct rcu_work free_rwork; /* see free_ioctx() */ +- struct swork_event free_swork; /* see free_ioctx() */ + + /* + * signals when all in-flight requests are done +@@ -267,7 +265,6 @@ static int __init aio_setup(void) + .mount = aio_mount, + .kill_sb = kill_anon_super, + }; +- BUG_ON(swork_get()); + aio_mnt = kern_mount(&aio_fs); + if (IS_ERR(aio_mnt)) + panic("Failed to create aio fs mount."); +@@ -609,9 +606,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) + * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - + * now it's safe to cancel any that need to be. + */ +-static void free_ioctx_users_work(struct swork_event *sev) ++static void free_ioctx_users(struct percpu_ref *ref) + { +- struct kioctx *ctx = container_of(sev, struct kioctx, free_swork); ++ struct kioctx *ctx = container_of(ref, struct kioctx, users); + struct aio_kiocb *req; + + spin_lock_irq(&ctx->ctx_lock); +@@ -629,14 +626,6 @@ static void free_ioctx_users_work(struct swork_event *sev) + percpu_ref_put(&ctx->reqs); + } + +-static void free_ioctx_users(struct percpu_ref *ref) +-{ +- struct kioctx *ctx = container_of(ref, struct kioctx, users); +- +- INIT_SWORK(&ctx->free_swork, free_ioctx_users_work); +- swork_queue(&ctx->free_swork); +-} +- + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) + { + unsigned i, new_nr; diff --git a/debian/patches-rt/0284-fs-aio-simple-simple-work.patch b/debian/patches-rt/0284-fs-aio-simple-simple-work.patch new file mode 100644 index 000000000..cc84a6433 --- /dev/null +++ b/debian/patches-rt/0284-fs-aio-simple-simple-work.patch @@ -0,0 +1,72 @@ +From: Sebastian Andrzej Siewior +Date: Mon, 16 Feb 2015 18:49:10 +0100 +Subject: [PATCH 284/290] fs/aio: simple simple work +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=06079632e222f342e079d8f4d5cddc3944f99913 + +[ Upstream commit 1a142116f6435ef070ecebb66d2d599507c10601 ] + +|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768 +|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2 +|2 locks held by rcuos/2/26: +| #0: (rcu_callback){.+.+..}, at: [] rcu_nocb_kthread+0x1e2/0x380 +| #1: (rcu_read_lock_sched){.+.+..}, at: [] percpu_ref_kill_rcu+0xa6/0x1c0 +|Preemption disabled at:[] rcu_nocb_kthread+0x263/0x380 +|Call Trace: +| [] dump_stack+0x4e/0x9c +| [] __might_sleep+0xfb/0x170 +| [] rt_spin_lock+0x24/0x70 +| [] free_ioctx_users+0x30/0x130 +| [] percpu_ref_kill_rcu+0x1b4/0x1c0 +| [] rcu_nocb_kthread+0x263/0x380 +| [] kthread+0xd6/0xf0 +| [] ret_from_fork+0x7c/0xb0 + +replace this preempt_disable() friendly swork. + +Reported-By: Mike Galbraith +Suggested-by: Benjamin LaHaise +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + fs/aio.c | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +diff --git a/fs/aio.c b/fs/aio.c +index 911e23087dfb..0c613d805bf1 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -121,6 +121,7 @@ struct kioctx { + long nr_pages; + + struct rcu_work free_rwork; /* see free_ioctx() */ ++ struct kthread_work free_kwork; /* see free_ioctx() */ + + /* + * signals when all in-flight requests are done +@@ -606,9 +607,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) + * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - + * now it's safe to cancel any that need to be. + */ +-static void free_ioctx_users(struct percpu_ref *ref) ++static void free_ioctx_users_work(struct kthread_work *work) + { +- struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ struct kioctx *ctx = container_of(work, struct kioctx, free_kwork); + struct aio_kiocb *req; + + spin_lock_irq(&ctx->ctx_lock); +@@ -626,6 +627,14 @@ static void free_ioctx_users(struct percpu_ref *ref) + percpu_ref_put(&ctx->reqs); + } + ++static void free_ioctx_users(struct percpu_ref *ref) ++{ ++ struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ ++ kthread_init_work(&ctx->free_kwork, free_ioctx_users_work); ++ kthread_schedule_work(&ctx->free_kwork); ++} ++ + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) + { + unsigned i, new_nr; diff --git a/debian/patches-rt/0285-revert-thermal.patch b/debian/patches-rt/0285-revert-thermal.patch new file mode 100644 index 000000000..d1c3911e0 --- /dev/null +++ b/debian/patches-rt/0285-revert-thermal.patch @@ -0,0 +1,116 @@ +From: "Steven Rostedt (VMware)" +Date: Fri, 20 Sep 2019 17:50:53 -0400 +Subject: [PATCH 285/290] revert-thermal +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=aed786370d98d846df0ae73df3467f288a7debe0 + +Revert: thermal: Defer thermal wakups to threads + +Signed-off-by: Steven Rostedt (VMware) +--- + drivers/thermal/x86_pkg_temp_thermal.c | 52 ++------------------------ + 1 file changed, 3 insertions(+), 49 deletions(-) + +diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c +index a5991cbb408f..1ef937d799e4 100644 +--- a/drivers/thermal/x86_pkg_temp_thermal.c ++++ b/drivers/thermal/x86_pkg_temp_thermal.c +@@ -29,7 +29,6 @@ + #include + #include + #include +-#include + #include + #include + +@@ -330,7 +329,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) + schedule_delayed_work_on(cpu, work, ms); + } + +-static void pkg_thermal_notify_work(struct swork_event *event) ++static int pkg_thermal_notify(u64 msr_val) + { + int cpu = smp_processor_id(); + struct pkg_device *pkgdev; +@@ -349,47 +348,9 @@ static void pkg_thermal_notify_work(struct swork_event *event) + } + + spin_unlock_irqrestore(&pkg_temp_lock, flags); +-} +- +-#ifdef CONFIG_PREEMPT_RT_FULL +-static struct swork_event notify_work; +- +-static int pkg_thermal_notify_work_init(void) +-{ +- int err; +- +- err = swork_get(); +- if (err) +- return err; +- +- INIT_SWORK(¬ify_work, pkg_thermal_notify_work); + return 0; + } + +-static void pkg_thermal_notify_work_cleanup(void) +-{ +- swork_put(); +-} +- +-static int pkg_thermal_notify(u64 msr_val) +-{ +- swork_queue(¬ify_work); +- return 0; +-} +- +-#else /* !CONFIG_PREEMPT_RT_FULL */ +- +-static int pkg_thermal_notify_work_init(void) { return 0; } +- +-static void pkg_thermal_notify_work_cleanup(void) { } +- +-static int pkg_thermal_notify(u64 msr_val) +-{ +- pkg_thermal_notify_work(NULL); +- return 0; +-} +-#endif /* CONFIG_PREEMPT_RT_FULL */ +- + static int pkg_temp_thermal_device_add(unsigned int cpu) + { + int pkgid = topology_logical_package_id(cpu); +@@ -554,16 +515,11 @@ static int __init pkg_temp_thermal_init(void) + if (!x86_match_cpu(pkg_temp_thermal_ids)) + return -ENODEV; + +- if (!pkg_thermal_notify_work_init()) +- return -ENODEV; +- + max_packages = topology_max_packages(); + packages = kcalloc(max_packages, sizeof(struct pkg_device *), + GFP_KERNEL); +- if (!packages) { +- ret = -ENOMEM; +- goto err; +- } ++ if (!packages) ++ return -ENOMEM; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online", + pkg_thermal_cpu_online, pkg_thermal_cpu_offline); +@@ -581,7 +537,6 @@ static int __init pkg_temp_thermal_init(void) + return 0; + + err: +- pkg_thermal_notify_work_cleanup(); + kfree(packages); + return ret; + } +@@ -595,7 +550,6 @@ static void __exit pkg_temp_thermal_exit(void) + cpuhp_remove_state(pkg_thermal_hp_state); + debugfs_remove_recursive(debugfs); + kfree(packages); +- pkg_thermal_notify_work_cleanup(); + } + module_exit(pkg_temp_thermal_exit) + diff --git a/debian/patches-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch new file mode 100644 index 000000000..7ceb1d946 --- /dev/null +++ b/debian/patches-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch @@ -0,0 +1,94 @@ +From: Daniel Wagner +Date: Tue, 17 Feb 2015 09:37:44 +0100 +Subject: [PATCH 286/290] thermal: Defer thermal wakups to threads +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=7d01bb1ab8d1c56ea8368b9bea790bbbfec73d1b + +[ Upstream commit ad2408dc248fe58536eef5b2b5734d8f9d3a280b ] + +On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will +call schedule while we run in irq context. + +[] dump_stack+0x4e/0x8f +[] __schedule_bug+0xa6/0xb4 +[] __schedule+0x5b4/0x700 +[] schedule+0x2a/0x90 +[] rt_spin_lock_slowlock+0xe5/0x2d0 +[] rt_spin_lock+0x25/0x30 +[] pkg_temp_thermal_platform_thermal_notify+0x45/0x134 [x86_pkg_temp_thermal] +[] ? therm_throt_process+0x1b/0x160 +[] intel_thermal_interrupt+0x211/0x250 +[] smp_thermal_interrupt+0x21/0x40 +[] thermal_interrupt+0x6d/0x80 + +Let's defer the work to a kthread. + +Signed-off-by: Daniel Wagner +Signed-off-by: Steven Rostedt (VMware) +[bigeasy: reoder init/denit position. TODO: flush swork on exit] +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/thermal/x86_pkg_temp_thermal.c | 28 +++++++++++++++++++++++++- + 1 file changed, 27 insertions(+), 1 deletion(-) + +diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c +index 1ef937d799e4..82f21fd4afb0 100644 +--- a/drivers/thermal/x86_pkg_temp_thermal.c ++++ b/drivers/thermal/x86_pkg_temp_thermal.c +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -329,7 +330,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) + schedule_delayed_work_on(cpu, work, ms); + } + +-static int pkg_thermal_notify(u64 msr_val) ++static void pkg_thermal_notify_work(struct kthread_work *work) + { + int cpu = smp_processor_id(); + struct pkg_device *pkgdev; +@@ -348,8 +349,32 @@ static int pkg_thermal_notify(u64 msr_val) + } + + spin_unlock_irqrestore(&pkg_temp_lock, flags); ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static DEFINE_KTHREAD_WORK(notify_work, pkg_thermal_notify_work); ++ ++static int pkg_thermal_notify(u64 msr_val) ++{ ++ kthread_schedule_work(¬ify_work); ++ return 0; ++} ++ ++static void pkg_thermal_notify_flush(void) ++{ ++ kthread_flush_work(¬ify_work); ++} ++ ++#else /* !CONFIG_PREEMPT_RT_FULL */ ++ ++static void pkg_thermal_notify_flush(void) { } ++ ++static int pkg_thermal_notify(u64 msr_val) ++{ ++ pkg_thermal_notify_work(NULL); + return 0; + } ++#endif /* CONFIG_PREEMPT_RT_FULL */ + + static int pkg_temp_thermal_device_add(unsigned int cpu) + { +@@ -548,6 +573,7 @@ static void __exit pkg_temp_thermal_exit(void) + platform_thermal_package_rate_control = NULL; + + cpuhp_remove_state(pkg_thermal_hp_state); ++ pkg_thermal_notify_flush(); + debugfs_remove_recursive(debugfs); + kfree(packages); + } diff --git a/debian/patches-rt/0287-revert-block.patch b/debian/patches-rt/0287-revert-block.patch new file mode 100644 index 000000000..2e5a171b9 --- /dev/null +++ b/debian/patches-rt/0287-revert-block.patch @@ -0,0 +1,79 @@ +From: "Steven Rostedt (VMware)" +Date: Fri, 20 Sep 2019 17:50:54 -0400 +Subject: [PATCH 287/290] revert-block +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=9cd549c202419d8738748fcd99688ad1d62aed2f + +Revert swork version of: block: blk-mq: move blk_queue_usage_counter_release() into process context + +In order to switch to upstream, we need to revert the swork code. + +Signed-off-by: Steven Rostedt (VMware) +--- + block/blk-core.c | 14 +------------- + include/linux/blkdev.h | 2 -- + 2 files changed, 1 insertion(+), 15 deletions(-) + +diff --git a/block/blk-core.c b/block/blk-core.c +index 06fcd081696e..0edb346263b8 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -970,21 +970,12 @@ void blk_queue_exit(struct request_queue *q) + percpu_ref_put(&q->q_usage_counter); + } + +-static void blk_queue_usage_counter_release_swork(struct swork_event *sev) +-{ +- struct request_queue *q = +- container_of(sev, struct request_queue, mq_pcpu_wake); +- +- wake_up_all(&q->mq_freeze_wq); +-} +- + static void blk_queue_usage_counter_release(struct percpu_ref *ref) + { + struct request_queue *q = + container_of(ref, struct request_queue, q_usage_counter); + +- if (wq_has_sleeper(&q->mq_freeze_wq)) +- swork_queue(&q->mq_pcpu_wake); ++ wake_up_all(&q->mq_freeze_wq); + } + + static void blk_rq_timed_out_timer(struct timer_list *t) +@@ -1081,7 +1072,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); + + init_waitqueue_head(&q->mq_freeze_wq); +- INIT_SWORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_swork); + + /* + * Init percpu_ref in atomic mode so that it's faster to shutdown. +@@ -3971,8 +3961,6 @@ int __init blk_dev_init(void) + if (!kblockd_workqueue) + panic("Failed to create kblockd\n"); + +- BUG_ON(swork_get()); +- + request_cachep = kmem_cache_create("blkdev_requests", + sizeof(struct request), 0, SLAB_PANIC, NULL); + +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 7b7c0bc6a514..f1960add94df 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -27,7 +27,6 @@ + #include + #include + #include +-#include + + struct module; + struct scsi_ioctl_command; +@@ -656,7 +655,6 @@ struct request_queue { + #endif + struct rcu_head rcu_head; + wait_queue_head_t mq_freeze_wq; +- struct swork_event mq_pcpu_wake; + struct percpu_ref q_usage_counter; + struct list_head all_q_node; + diff --git a/debian/patches-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/debian/patches-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch new file mode 100644 index 000000000..548e0a57e --- /dev/null +++ b/debian/patches-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch @@ -0,0 +1,110 @@ +From: Sebastian Andrzej Siewior +Date: Tue, 13 Mar 2018 13:49:16 +0100 +Subject: [PATCH 288/290] block: blk-mq: move blk_queue_usage_counter_release() + into process context +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=e73f701fa4cf094c7eeeb1959b4eb29b520ed548 + +[ Upstream commit 61c928ecf4fe200bda9b49a0813b5ba0f43995b5 ] + +| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 +| in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6 +| 5 locks held by kworker/u257:6/255: +| #0: ("events_unbound"){.+.+.+}, at: [] process_one_work+0x171/0x5e0 +| #1: ((&entry->work)){+.+.+.}, at: [] process_one_work+0x171/0x5e0 +| #2: (&shost->scan_mutex){+.+.+.}, at: [] __scsi_add_device+0xa3/0x130 [scsi_mod] +| #3: (&set->tag_list_lock){+.+...}, at: [] blk_mq_init_queue+0x96a/0xa50 +| #4: (rcu_read_lock_sched){......}, at: [] percpu_ref_kill_and_confirm+0x1d/0x120 +| Preemption disabled at:[] blk_mq_freeze_queue_start+0x56/0x70 +| +| CPU: 2 PID: 255 Comm: kworker/u257:6 Not tainted 3.18.7-rt0+ #1 +| Workqueue: events_unbound async_run_entry_fn +| 0000000000000003 ffff8800bc29f998 ffffffff815b3a12 0000000000000000 +| 0000000000000000 ffff8800bc29f9b8 ffffffff8109aa16 ffff8800bc29fa28 +| ffff8800bc5d1bc8 ffff8800bc29f9e8 ffffffff815b8dd4 ffff880000000000 +| Call Trace: +| [] dump_stack+0x4f/0x7c +| [] __might_sleep+0x116/0x190 +| [] rt_spin_lock+0x24/0x60 +| [] __wake_up+0x29/0x60 +| [] blk_mq_usage_counter_release+0x1e/0x20 +| [] percpu_ref_kill_and_confirm+0x106/0x120 +| [] blk_mq_freeze_queue_start+0x56/0x70 +| [] blk_mq_update_tag_set_depth+0x40/0xd0 +| [] blk_mq_init_queue+0x98c/0xa50 +| [] scsi_mq_alloc_queue+0x20/0x60 [scsi_mod] +| [] scsi_alloc_sdev+0x2f5/0x370 [scsi_mod] +| [] scsi_probe_and_add_lun+0x9e4/0xdd0 [scsi_mod] +| [] __scsi_add_device+0x126/0x130 [scsi_mod] +| [] ata_scsi_scan_host+0xaf/0x200 [libata] +| [] async_port_probe+0x46/0x60 [libata] +| [] async_run_entry_fn+0x3b/0xf0 +| [] process_one_work+0x201/0x5e0 + +percpu_ref_kill_and_confirm() invokes blk_mq_usage_counter_release() in +a rcu-sched region. swait based wake queue can't be used due to +wake_up_all() usage and disabled interrupts in !RT configs (as reported +by Corey Minyard). +The wq_has_sleeper() check has been suggested by Peter Zijlstra. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + block/blk-core.c | 12 +++++++++++- + include/linux/blkdev.h | 2 ++ + 2 files changed, 13 insertions(+), 1 deletion(-) + +diff --git a/block/blk-core.c b/block/blk-core.c +index 0edb346263b8..f0d9ebeea16d 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -970,12 +970,21 @@ void blk_queue_exit(struct request_queue *q) + percpu_ref_put(&q->q_usage_counter); + } + ++static void blk_queue_usage_counter_release_wrk(struct kthread_work *work) ++{ ++ struct request_queue *q = ++ container_of(work, struct request_queue, mq_pcpu_wake); ++ ++ wake_up_all(&q->mq_freeze_wq); ++} ++ + static void blk_queue_usage_counter_release(struct percpu_ref *ref) + { + struct request_queue *q = + container_of(ref, struct request_queue, q_usage_counter); + +- wake_up_all(&q->mq_freeze_wq); ++ if (wq_has_sleeper(&q->mq_freeze_wq)) ++ kthread_schedule_work(&q->mq_pcpu_wake); + } + + static void blk_rq_timed_out_timer(struct timer_list *t) +@@ -1072,6 +1081,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); + + init_waitqueue_head(&q->mq_freeze_wq); ++ kthread_init_work(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); + + /* + * Init percpu_ref in atomic mode so that it's faster to shutdown. +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index f1960add94df..15a489abfb62 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -655,6 +656,7 @@ struct request_queue { + #endif + struct rcu_head rcu_head; + wait_queue_head_t mq_freeze_wq; ++ struct kthread_work mq_pcpu_wake; + struct percpu_ref q_usage_counter; + struct list_head all_q_node; + diff --git a/debian/patches-rt/0289-workqueue-rework.patch b/debian/patches-rt/0289-workqueue-rework.patch new file mode 100644 index 000000000..a688ca29b --- /dev/null +++ b/debian/patches-rt/0289-workqueue-rework.patch @@ -0,0 +1,1548 @@ +From: Sebastian Andrzej Siewior +Date: Wed, 29 May 2019 18:52:27 +0200 +Subject: [PATCH 289/290] workqueue: rework +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=00ac16d9ec3aca2ef09504ceae1d966bdef1a352 + +[ Upstream commit d15a862f24df983458533aebd6fa207ecdd1095a ] + +This is an all-in change of the workqueue rework. +The worker_pool.lock is made to raw_spinlock_t. With this change we can +schedule workitems from preempt-disable sections and sections with disabled +interrupts. This change allows to remove all kthread_.* workarounds we used to +have. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + block/blk-core.c | 6 +- + drivers/block/loop.c | 2 +- + drivers/spi/spi-rockchip.c | 1 - + drivers/thermal/x86_pkg_temp_thermal.c | 28 +-- + fs/aio.c | 10 +- + include/linux/blk-cgroup.h | 2 +- + include/linux/blkdev.h | 2 +- + include/linux/interrupt.h | 5 - + include/linux/kthread-cgroup.h | 17 -- + include/linux/kthread.h | 15 +- + include/linux/swait.h | 14 ++ + include/linux/workqueue.h | 4 - + init/main.c | 1 - + kernel/irq/manage.c | 36 +-- + kernel/kthread.c | 14 -- + kernel/sched/core.c | 1 + + kernel/time/hrtimer.c | 24 -- + kernel/workqueue.c | 300 +++++++++++-------------- + 18 files changed, 164 insertions(+), 318 deletions(-) + delete mode 100644 include/linux/kthread-cgroup.h + +diff --git a/block/blk-core.c b/block/blk-core.c +index f0d9ebeea16d..cc69a47dc57a 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -970,7 +970,7 @@ void blk_queue_exit(struct request_queue *q) + percpu_ref_put(&q->q_usage_counter); + } + +-static void blk_queue_usage_counter_release_wrk(struct kthread_work *work) ++static void blk_queue_usage_counter_release_wrk(struct work_struct *work) + { + struct request_queue *q = + container_of(work, struct request_queue, mq_pcpu_wake); +@@ -984,7 +984,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref) + container_of(ref, struct request_queue, q_usage_counter); + + if (wq_has_sleeper(&q->mq_freeze_wq)) +- kthread_schedule_work(&q->mq_pcpu_wake); ++ schedule_work(&q->mq_pcpu_wake); + } + + static void blk_rq_timed_out_timer(struct timer_list *t) +@@ -1081,7 +1081,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); + + init_waitqueue_head(&q->mq_freeze_wq); +- kthread_init_work(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); ++ INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); + + /* + * Init percpu_ref in atomic mode so that it's faster to shutdown. +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index b736cb6c6228..126c2c514673 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -70,7 +70,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c +index b56619418cea..fdcf3076681b 100644 +--- a/drivers/spi/spi-rockchip.c ++++ b/drivers/spi/spi-rockchip.c +@@ -22,7 +22,6 @@ + #include + #include + #include +-#include + + #define DRIVER_NAME "rockchip-spi" + +diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c +index 82f21fd4afb0..1ef937d799e4 100644 +--- a/drivers/thermal/x86_pkg_temp_thermal.c ++++ b/drivers/thermal/x86_pkg_temp_thermal.c +@@ -29,7 +29,6 @@ + #include + #include + #include +-#include + #include + #include + +@@ -330,7 +329,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) + schedule_delayed_work_on(cpu, work, ms); + } + +-static void pkg_thermal_notify_work(struct kthread_work *work) ++static int pkg_thermal_notify(u64 msr_val) + { + int cpu = smp_processor_id(); + struct pkg_device *pkgdev; +@@ -349,32 +348,8 @@ static void pkg_thermal_notify_work(struct kthread_work *work) + } + + spin_unlock_irqrestore(&pkg_temp_lock, flags); +-} +- +-#ifdef CONFIG_PREEMPT_RT_FULL +-static DEFINE_KTHREAD_WORK(notify_work, pkg_thermal_notify_work); +- +-static int pkg_thermal_notify(u64 msr_val) +-{ +- kthread_schedule_work(¬ify_work); +- return 0; +-} +- +-static void pkg_thermal_notify_flush(void) +-{ +- kthread_flush_work(¬ify_work); +-} +- +-#else /* !CONFIG_PREEMPT_RT_FULL */ +- +-static void pkg_thermal_notify_flush(void) { } +- +-static int pkg_thermal_notify(u64 msr_val) +-{ +- pkg_thermal_notify_work(NULL); + return 0; + } +-#endif /* CONFIG_PREEMPT_RT_FULL */ + + static int pkg_temp_thermal_device_add(unsigned int cpu) + { +@@ -573,7 +548,6 @@ static void __exit pkg_temp_thermal_exit(void) + platform_thermal_package_rate_control = NULL; + + cpuhp_remove_state(pkg_thermal_hp_state); +- pkg_thermal_notify_flush(); + debugfs_remove_recursive(debugfs); + kfree(packages); + } +diff --git a/fs/aio.c b/fs/aio.c +index 0c613d805bf1..c74dd321f5b7 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -121,7 +121,7 @@ struct kioctx { + long nr_pages; + + struct rcu_work free_rwork; /* see free_ioctx() */ +- struct kthread_work free_kwork; /* see free_ioctx() */ ++ struct work_struct free_work; /* see free_ioctx() */ + + /* + * signals when all in-flight requests are done +@@ -607,9 +607,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) + * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - + * now it's safe to cancel any that need to be. + */ +-static void free_ioctx_users_work(struct kthread_work *work) ++static void free_ioctx_users_work(struct work_struct *work) + { +- struct kioctx *ctx = container_of(work, struct kioctx, free_kwork); ++ struct kioctx *ctx = container_of(work, struct kioctx, free_work); + struct aio_kiocb *req; + + spin_lock_irq(&ctx->ctx_lock); +@@ -631,8 +631,8 @@ static void free_ioctx_users(struct percpu_ref *ref) + { + struct kioctx *ctx = container_of(ref, struct kioctx, users); + +- kthread_init_work(&ctx->free_kwork, free_ioctx_users_work); +- kthread_schedule_work(&ctx->free_kwork); ++ INIT_WORK(&ctx->free_work, free_ioctx_users_work); ++ schedule_work(&ctx->free_work); + } + + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) +diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h +index 0473efda4c65..da587e60fe86 100644 +--- a/include/linux/blk-cgroup.h ++++ b/include/linux/blk-cgroup.h +@@ -14,7 +14,7 @@ + * Nauman Rafique + */ + +-#include ++#include + #include + #include + #include +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 15a489abfb62..45653e23e4cd 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -656,7 +656,7 @@ struct request_queue { + #endif + struct rcu_head rcu_head; + wait_queue_head_t mq_freeze_wq; +- struct kthread_work mq_pcpu_wake; ++ struct work_struct mq_pcpu_wake; + struct percpu_ref q_usage_counter; + struct list_head all_q_node; + +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index a9321f6429f2..97d9ba26915e 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -13,7 +13,6 @@ + #include + #include + #include +-#include + + #include + #include +@@ -239,11 +238,7 @@ extern void resume_device_irqs(void); + struct irq_affinity_notify { + unsigned int irq; + struct kref kref; +-#ifdef CONFIG_PREEMPT_RT_BASE +- struct kthread_work work; +-#else + struct work_struct work; +-#endif + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); + }; +diff --git a/include/linux/kthread-cgroup.h b/include/linux/kthread-cgroup.h +deleted file mode 100644 +index 53d34bca9d72..000000000000 +--- a/include/linux/kthread-cgroup.h ++++ /dev/null +@@ -1,17 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#ifndef _LINUX_KTHREAD_CGROUP_H +-#define _LINUX_KTHREAD_CGROUP_H +-#include +-#include +- +-#ifdef CONFIG_BLK_CGROUP +-void kthread_associate_blkcg(struct cgroup_subsys_state *css); +-struct cgroup_subsys_state *kthread_blkcg(void); +-#else +-static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } +-static inline struct cgroup_subsys_state *kthread_blkcg(void) +-{ +- return NULL; +-} +-#endif +-#endif +diff --git a/include/linux/kthread.h b/include/linux/kthread.h +index 7cf56eb54103..6b8c064f0cbc 100644 +--- a/include/linux/kthread.h ++++ b/include/linux/kthread.h +@@ -4,6 +4,7 @@ + /* Simple interface for creating and stopping kernel threads without mess. */ + #include + #include ++#include + + __printf(4, 5) + struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), +@@ -197,12 +198,14 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); + + void kthread_destroy_worker(struct kthread_worker *worker); + +-extern struct kthread_worker kthread_global_worker; +-void kthread_init_global_worker(void); +- +-static inline bool kthread_schedule_work(struct kthread_work *work) ++#ifdef CONFIG_BLK_CGROUP ++void kthread_associate_blkcg(struct cgroup_subsys_state *css); ++struct cgroup_subsys_state *kthread_blkcg(void); ++#else ++static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } ++static inline struct cgroup_subsys_state *kthread_blkcg(void) + { +- return kthread_queue_work(&kthread_global_worker, work); ++ return NULL; + } +- ++#endif + #endif /* _LINUX_KTHREAD_H */ +diff --git a/include/linux/swait.h b/include/linux/swait.h +index f426a0661aa0..21ae66cd41d3 100644 +--- a/include/linux/swait.h ++++ b/include/linux/swait.h +@@ -299,4 +299,18 @@ do { \ + __ret; \ + }) + ++#define __swait_event_lock_irq(wq, condition, lock, cmd) \ ++ ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ ++ raw_spin_unlock_irq(&lock); \ ++ cmd; \ ++ schedule(); \ ++ raw_spin_lock_irq(&lock)) ++ ++#define swait_event_lock_irq(wq_head, condition, lock) \ ++ do { \ ++ if (condition) \ ++ break; \ ++ __swait_event_lock_irq(wq_head, condition, lock, ); \ ++ } while (0) ++ + #endif /* _LINUX_SWAIT_H */ +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h +index 60d673e15632..546aa73fba6a 100644 +--- a/include/linux/workqueue.h ++++ b/include/linux/workqueue.h +@@ -455,10 +455,6 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, + + extern void destroy_workqueue(struct workqueue_struct *wq); + +-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); +-void free_workqueue_attrs(struct workqueue_attrs *attrs); +-int apply_workqueue_attrs(struct workqueue_struct *wq, +- const struct workqueue_attrs *attrs); + int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); + + extern bool queue_work_on(int cpu, struct workqueue_struct *wq, +diff --git a/init/main.c b/init/main.c +index b0e95351c22c..4a7471606e53 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -1130,7 +1130,6 @@ static noinline void __init kernel_init_freeable(void) + smp_prepare_cpus(setup_max_cpus); + + workqueue_init(); +- kthread_init_global_worker(); + + init_mm_internals(); + +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 290cd520dba1..82b3728685ca 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -285,12 +285,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, + + if (desc->affinity_notify) { + kref_get(&desc->affinity_notify->kref); +- +-#ifdef CONFIG_PREEMPT_RT_BASE +- kthread_schedule_work(&desc->affinity_notify->work); +-#else + schedule_work(&desc->affinity_notify->work); +-#endif + } + irqd_set(data, IRQD_AFFINITY_SET); + +@@ -328,8 +323,10 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) + } + EXPORT_SYMBOL_GPL(irq_set_affinity_hint); + +-static void _irq_affinity_notify(struct irq_affinity_notify *notify) ++static void irq_affinity_notify(struct work_struct *work) + { ++ struct irq_affinity_notify *notify = ++ container_of(work, struct irq_affinity_notify, work); + struct irq_desc *desc = irq_to_desc(notify->irq); + cpumask_var_t cpumask; + unsigned long flags; +@@ -351,25 +348,6 @@ static void _irq_affinity_notify(struct irq_affinity_notify *notify) + kref_put(¬ify->kref, notify->release); + } + +-#ifdef CONFIG_PREEMPT_RT_BASE +- +-static void irq_affinity_notify(struct kthread_work *work) +-{ +- struct irq_affinity_notify *notify = +- container_of(work, struct irq_affinity_notify, work); +- _irq_affinity_notify(notify); +-} +- +-#else +- +-static void irq_affinity_notify(struct work_struct *work) +-{ +- struct irq_affinity_notify *notify = +- container_of(work, struct irq_affinity_notify, work); +- _irq_affinity_notify(notify); +-} +-#endif +- + /** + * irq_set_affinity_notifier - control notification of IRQ affinity changes + * @irq: Interrupt for which to enable/disable notification +@@ -398,11 +376,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) + if (notify) { + notify->irq = irq; + kref_init(¬ify->kref); +-#ifdef CONFIG_PREEMPT_RT_BASE +- kthread_init_work(¬ify->work, irq_affinity_notify); +-#else + INIT_WORK(¬ify->work, irq_affinity_notify); +-#endif + } + + raw_spin_lock_irqsave(&desc->lock, flags); +@@ -411,11 +385,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) + raw_spin_unlock_irqrestore(&desc->lock, flags); + + if (old_notify) { +-#ifdef CONFIG_PREEMPT_RT_BASE +- kthread_cancel_work_sync(¬ify->work); +-#else + cancel_work_sync(&old_notify->work); +-#endif + kref_put(&old_notify->kref, old_notify->release); + } + +diff --git a/kernel/kthread.c b/kernel/kthread.c +index 9db017761a1f..5641b55783a6 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -20,7 +20,6 @@ + #include + #include + #include +-#include + #include + + static DEFINE_SPINLOCK(kthread_create_lock); +@@ -1181,19 +1180,6 @@ void kthread_destroy_worker(struct kthread_worker *worker) + } + EXPORT_SYMBOL(kthread_destroy_worker); + +-DEFINE_KTHREAD_WORKER(kthread_global_worker); +-EXPORT_SYMBOL(kthread_global_worker); +- +-__init void kthread_init_global_worker(void) +-{ +- kthread_global_worker.task = kthread_create(kthread_worker_fn, +- &kthread_global_worker, +- "kswork"); +- if (WARN_ON(IS_ERR(kthread_global_worker.task))) +- return; +- wake_up_process(kthread_global_worker.task); +-} +- + #ifdef CONFIG_BLK_CGROUP + /** + * kthread_associate_blkcg - associate blkcg to current kthread +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index fab44ac4e1dd..442a2071cde7 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3607,6 +3607,7 @@ static inline void sched_submit_work(struct task_struct *tsk) + { + if (!tsk->state) + return; ++ + /* + * If a worker went to sleep, notify and ask workqueue whether + * it wants to wake up a task to maintain concurrency. +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 4534e7871c8c..67951292df58 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -730,29 +730,6 @@ static void hrtimer_switch_to_hres(void) + retrigger_next_event(NULL); + } + +-#ifdef CONFIG_PREEMPT_RT_FULL +- +-static struct swork_event clock_set_delay_work; +- +-static void run_clock_set_delay(struct swork_event *event) +-{ +- clock_was_set(); +-} +- +-void clock_was_set_delayed(void) +-{ +- swork_queue(&clock_set_delay_work); +-} +- +-static __init int create_clock_set_delay_thread(void) +-{ +- WARN_ON(swork_get()); +- INIT_SWORK(&clock_set_delay_work, run_clock_set_delay); +- return 0; +-} +-early_initcall(create_clock_set_delay_thread); +-#else /* PREEMPT_RT_FULL */ +- + static void clock_was_set_work(struct work_struct *work) + { + clock_was_set(); +@@ -768,7 +745,6 @@ void clock_was_set_delayed(void) + { + schedule_work(&hrtimer_work); + } +-#endif + + #else + +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 12137825bf5a..f6d12421e7bb 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -49,8 +49,6 @@ + #include + #include + #include +-#include +-#include + + #include "workqueue_internal.h" + +@@ -125,11 +123,6 @@ enum { + * cpu or grabbing pool->lock is enough for read access. If + * POOL_DISASSOCIATED is set, it's identical to L. + * +- * On RT we need the extra protection via rt_lock_idle_list() for +- * the list manipulations against read access from +- * wq_worker_sleeping(). All other places are nicely serialized via +- * pool->lock. +- * + * A: wq_pool_attach_mutex protected. + * + * PL: wq_pool_mutex protected. +@@ -151,7 +144,7 @@ enum { + /* struct worker is defined in workqueue_internal.h */ + + struct worker_pool { +- spinlock_t lock; /* the pool lock */ ++ raw_spinlock_t lock; /* the pool lock */ + int cpu; /* I: the associated cpu */ + int node; /* I: the associated node ID */ + int id; /* I: pool ID */ +@@ -304,8 +297,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; + + static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ + static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ +-static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ +-static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ ++static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ ++static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ + + static LIST_HEAD(workqueues); /* PR: list of all workqueues */ + static bool workqueue_freezing; /* PL: have wqs started freezing? */ +@@ -357,8 +350,6 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq); + struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; + EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); + +-static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); +- + static int worker_thread(void *__worker); + static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + +@@ -435,31 +426,6 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ + else + +-#ifdef CONFIG_PREEMPT_RT_BASE +-static inline void rt_lock_idle_list(struct worker_pool *pool) +-{ +- preempt_disable(); +-} +-static inline void rt_unlock_idle_list(struct worker_pool *pool) +-{ +- preempt_enable(); +-} +-static inline void sched_lock_idle_list(struct worker_pool *pool) { } +-static inline void sched_unlock_idle_list(struct worker_pool *pool) { } +-#else +-static inline void rt_lock_idle_list(struct worker_pool *pool) { } +-static inline void rt_unlock_idle_list(struct worker_pool *pool) { } +-static inline void sched_lock_idle_list(struct worker_pool *pool) +-{ +- spin_lock_irq(&pool->lock); +-} +-static inline void sched_unlock_idle_list(struct worker_pool *pool) +-{ +- spin_unlock_irq(&pool->lock); +-} +-#endif +- +- + #ifdef CONFIG_DEBUG_OBJECTS_WORK + + static struct debug_obj_descr work_debug_descr; +@@ -862,20 +828,14 @@ static struct worker *first_idle_worker(struct worker_pool *pool) + * Wake up the first idle worker of @pool. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void wake_up_worker(struct worker_pool *pool) + { +- struct worker *worker; +- +- rt_lock_idle_list(pool); +- +- worker = first_idle_worker(pool); ++ struct worker *worker = first_idle_worker(pool); + + if (likely(worker)) + wake_up_process(worker->task); +- +- rt_unlock_idle_list(pool); + } + + /** +@@ -904,7 +864,7 @@ void wq_worker_running(struct task_struct *task) + */ + void wq_worker_sleeping(struct task_struct *task) + { +- struct worker *worker = kthread_data(task); ++ struct worker *next, *worker = kthread_data(task); + struct worker_pool *pool; + + /* +@@ -921,18 +881,26 @@ void wq_worker_sleeping(struct task_struct *task) + return; + + worker->sleeping = 1; ++ raw_spin_lock_irq(&pool->lock); + + /* + * The counterpart of the following dec_and_test, implied mb, + * worklist not empty test sequence is in insert_work(). + * Please read comment there. ++ * ++ * NOT_RUNNING is clear. This means that we're bound to and ++ * running on the local cpu w/ rq lock held and preemption ++ * disabled, which in turn means that none else could be ++ * manipulating idle_list, so dereferencing idle_list without pool ++ * lock is safe. + */ + if (atomic_dec_and_test(&pool->nr_running) && + !list_empty(&pool->worklist)) { +- sched_lock_idle_list(pool); +- wake_up_worker(pool); +- sched_unlock_idle_list(pool); ++ next = first_idle_worker(pool); ++ if (next) ++ wake_up_process(next->task); + } ++ raw_spin_unlock_irq(&pool->lock); + } + + /** +@@ -943,7 +911,7 @@ void wq_worker_sleeping(struct task_struct *task) + * Set @flags in @worker->flags and adjust nr_running accordingly. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) ++ * raw_spin_lock_irq(pool->lock) + */ + static inline void worker_set_flags(struct worker *worker, unsigned int flags) + { +@@ -968,7 +936,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags) + * Clear @flags in @worker->flags and adjust nr_running accordingly. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) ++ * raw_spin_lock_irq(pool->lock) + */ + static inline void worker_clr_flags(struct worker *worker, unsigned int flags) + { +@@ -1016,7 +984,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) + * actually occurs, it should be easy to locate the culprit work function. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + * + * Return: + * Pointer to worker which is executing @work if found, %NULL +@@ -1051,7 +1019,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, + * nested inside outer list_for_each_entry_safe(). + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void move_linked_works(struct work_struct *work, struct list_head *head, + struct work_struct **nextp) +@@ -1129,11 +1097,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) + * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ +- rcu_read_lock(); +- local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); ++ raw_spin_lock_irq(&pwq->pool->lock); + put_pwq(pwq); +- local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); +- rcu_read_unlock(); ++ raw_spin_unlock_irq(&pwq->pool->lock); + } + } + +@@ -1166,7 +1132,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq) + * decrement nr_in_flight of its pwq and handle workqueue flushing. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) + { +@@ -1237,7 +1203,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + struct worker_pool *pool; + struct pool_workqueue *pwq; + +- local_lock_irqsave(pendingb_lock, *flags); ++ local_irq_save(*flags); + + /* try to steal the timer if it exists */ + if (is_dwork) { +@@ -1265,7 +1231,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + if (!pool) + goto fail; + +- spin_lock(&pool->lock); ++ raw_spin_lock(&pool->lock); + /* + * work->data is guaranteed to point to pwq only while the work + * item is queued on pwq->wq, and both updating work->data to point +@@ -1294,17 +1260,17 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + /* work->data points to pwq iff queued, point to pool */ + set_work_pool_and_keep_pending(work, pool->id); + +- spin_unlock(&pool->lock); ++ raw_spin_unlock(&pool->lock); + rcu_read_unlock(); + return 1; + } +- spin_unlock(&pool->lock); ++ raw_spin_unlock(&pool->lock); + fail: + rcu_read_unlock(); +- local_unlock_irqrestore(pendingb_lock, *flags); ++ local_irq_restore(*flags); + if (work_is_canceling(work)) + return -ENOENT; +- cpu_chill(); ++ cpu_relax(); + return -EAGAIN; + } + +@@ -1319,7 +1285,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + * work_struct flags. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, + struct list_head *head, unsigned int extra_flags) +@@ -1406,13 +1372,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + * queued or lose PENDING. Grabbing PENDING and queueing should + * happen with IRQ disabled. + */ +-#ifndef CONFIG_PREEMPT_RT_FULL +- /* +- * nort: On RT the "interrupts-disabled" rule has been replaced with +- * pendingb_lock. +- */ + lockdep_assert_irqs_disabled(); +-#endif + + debug_work_activate(work); + +@@ -1440,7 +1400,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + if (last_pool && last_pool != pwq->pool) { + struct worker *worker; + +- spin_lock(&last_pool->lock); ++ raw_spin_lock(&last_pool->lock); + + worker = find_worker_executing_work(last_pool, work); + +@@ -1448,11 +1408,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + pwq = worker->current_pwq; + } else { + /* meh... not running there, queue here */ +- spin_unlock(&last_pool->lock); +- spin_lock(&pwq->pool->lock); ++ raw_spin_unlock(&last_pool->lock); ++ raw_spin_lock(&pwq->pool->lock); + } + } else { +- spin_lock(&pwq->pool->lock); ++ raw_spin_lock(&pwq->pool->lock); + } + + /* +@@ -1465,7 +1425,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + */ + if (unlikely(!pwq->refcnt)) { + if (wq->flags & WQ_UNBOUND) { +- spin_unlock(&pwq->pool->lock); ++ raw_spin_unlock(&pwq->pool->lock); + cpu_relax(); + goto retry; + } +@@ -1497,7 +1457,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + insert_work(pwq, work, worklist, work_flags); + + out: +- spin_unlock(&pwq->pool->lock); ++ raw_spin_unlock(&pwq->pool->lock); + rcu_read_unlock(); + } + +@@ -1518,14 +1478,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, + bool ret = false; + unsigned long flags; + +- local_lock_irqsave(pendingb_lock,flags); ++ local_irq_save(flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_work(cpu, wq, work); + ret = true; + } + +- local_unlock_irqrestore(pendingb_lock, flags); ++ local_irq_restore(flags); + return ret; + } + EXPORT_SYMBOL(queue_work_on); +@@ -1533,12 +1493,11 @@ EXPORT_SYMBOL(queue_work_on); + void delayed_work_timer_fn(struct timer_list *t) + { + struct delayed_work *dwork = from_timer(dwork, t, timer); ++ unsigned long flags; + +- /* XXX */ +- /* local_lock(pendingb_lock); */ +- /* should have been called from irqsafe timer with irq already off */ ++ local_irq_save(flags); + __queue_work(dwork->cpu, dwork->wq, &dwork->work); +- /* local_unlock(pendingb_lock); */ ++ local_irq_restore(flags); + } + EXPORT_SYMBOL(delayed_work_timer_fn); + +@@ -1593,14 +1552,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, + unsigned long flags; + + /* read the comment in __queue_work() */ +- local_lock_irqsave(pendingb_lock, flags); ++ local_irq_save(flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_delayed_work(cpu, wq, dwork, delay); + ret = true; + } + +- local_unlock_irqrestore(pendingb_lock, flags); ++ local_irq_restore(flags); + return ret; + } + EXPORT_SYMBOL(queue_delayed_work_on); +@@ -1635,7 +1594,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, + + if (likely(ret >= 0)) { + __queue_delayed_work(cpu, wq, dwork, delay); +- local_unlock_irqrestore(pendingb_lock, flags); ++ local_irq_restore(flags); + } + + /* -ENOENT from try_to_grab_pending() becomes %true */ +@@ -1646,12 +1605,11 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on); + static void rcu_work_rcufn(struct rcu_head *rcu) + { + struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); +- unsigned long flags; + + /* read the comment in __queue_work() */ +- local_lock_irqsave(pendingb_lock, flags); ++ local_irq_disable(); + __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); +- local_unlock_irqrestore(pendingb_lock, flags); ++ local_irq_enable(); + } + + /** +@@ -1686,7 +1644,7 @@ EXPORT_SYMBOL(queue_rcu_work); + * necessary. + * + * LOCKING: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void worker_enter_idle(struct worker *worker) + { +@@ -1703,9 +1661,7 @@ static void worker_enter_idle(struct worker *worker) + worker->last_active = jiffies; + + /* idle_list is LIFO */ +- rt_lock_idle_list(pool); + list_add(&worker->entry, &pool->idle_list); +- rt_unlock_idle_list(pool); + + if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) + mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); +@@ -1728,7 +1684,7 @@ static void worker_enter_idle(struct worker *worker) + * @worker is leaving idle state. Update stats. + * + * LOCKING: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void worker_leave_idle(struct worker *worker) + { +@@ -1738,9 +1694,7 @@ static void worker_leave_idle(struct worker *worker) + return; + worker_clr_flags(worker, WORKER_IDLE); + pool->nr_idle--; +- rt_lock_idle_list(pool); + list_del_init(&worker->entry); +- rt_unlock_idle_list(pool); + } + + static struct worker *alloc_worker(int node) +@@ -1868,11 +1822,11 @@ static struct worker *create_worker(struct worker_pool *pool) + worker_attach_to_pool(worker, pool); + + /* start the newly created worker */ +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + worker->pool->nr_workers++; + worker_enter_idle(worker); + wake_up_process(worker->task); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + return worker; + +@@ -1891,7 +1845,7 @@ static struct worker *create_worker(struct worker_pool *pool) + * be idle. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void destroy_worker(struct worker *worker) + { +@@ -1908,9 +1862,7 @@ static void destroy_worker(struct worker *worker) + pool->nr_workers--; + pool->nr_idle--; + +- rt_lock_idle_list(pool); + list_del_init(&worker->entry); +- rt_unlock_idle_list(pool); + worker->flags |= WORKER_DIE; + wake_up_process(worker->task); + } +@@ -1919,7 +1871,7 @@ static void idle_worker_timeout(struct timer_list *t) + { + struct worker_pool *pool = from_timer(pool, t, idle_timer); + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + while (too_many_workers(pool)) { + struct worker *worker; +@@ -1937,7 +1889,7 @@ static void idle_worker_timeout(struct timer_list *t) + destroy_worker(worker); + } + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + + static void send_mayday(struct work_struct *work) +@@ -1968,8 +1920,8 @@ static void pool_mayday_timeout(struct timer_list *t) + struct worker_pool *pool = from_timer(pool, t, mayday_timer); + struct work_struct *work; + +- spin_lock_irq(&pool->lock); +- spin_lock(&wq_mayday_lock); /* for wq->maydays */ ++ raw_spin_lock_irq(&pool->lock); ++ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ + + if (need_to_create_worker(pool)) { + /* +@@ -1982,8 +1934,8 @@ static void pool_mayday_timeout(struct timer_list *t) + send_mayday(work); + } + +- spin_unlock(&wq_mayday_lock); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock(&wq_mayday_lock); ++ raw_spin_unlock_irq(&pool->lock); + + mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); + } +@@ -2002,7 +1954,7 @@ static void pool_mayday_timeout(struct timer_list *t) + * may_start_working() %true. + * + * LOCKING: +- * spin_lock_irq(pool->lock) which may be released and regrabbed ++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. Called only from + * manager. + */ +@@ -2011,7 +1963,7 @@ __releases(&pool->lock) + __acquires(&pool->lock) + { + restart: +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ + mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); +@@ -2027,7 +1979,7 @@ __acquires(&pool->lock) + } + + del_timer_sync(&pool->mayday_timer); +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + /* + * This is necessary even after a new worker was just successfully + * created as @pool->lock was dropped and the new worker might have +@@ -2050,7 +2002,7 @@ __acquires(&pool->lock) + * and may_start_working() is true. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) which may be released and regrabbed ++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. + * + * Return: +@@ -2073,7 +2025,7 @@ static bool manage_workers(struct worker *worker) + + pool->manager = NULL; + pool->flags &= ~POOL_MANAGER_ACTIVE; +- wake_up(&wq_manager_wait); ++ swake_up_one(&wq_manager_wait); + return true; + } + +@@ -2089,7 +2041,7 @@ static bool manage_workers(struct worker *worker) + * call this function to process a work. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) which is released and regrabbed. ++ * raw_spin_lock_irq(pool->lock) which is released and regrabbed. + */ + static void process_one_work(struct worker *worker, struct work_struct *work) + __releases(&pool->lock) +@@ -2171,7 +2123,7 @@ __acquires(&pool->lock) + */ + set_work_pool_and_clear_pending(work, pool->id); + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + lock_map_acquire(&pwq->wq->lockdep_map); + lock_map_acquire(&lockdep_map); +@@ -2226,7 +2178,7 @@ __acquires(&pool->lock) + */ + cond_resched(); + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* clear cpu intensive status */ + if (unlikely(cpu_intensive)) +@@ -2249,7 +2201,7 @@ __acquires(&pool->lock) + * fetches a work from the top and executes it. + * + * CONTEXT: +- * spin_lock_irq(pool->lock) which may be released and regrabbed ++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed + * multiple times. + */ + static void process_scheduled_works(struct worker *worker) +@@ -2291,11 +2243,11 @@ static int worker_thread(void *__worker) + /* tell the scheduler that this is a workqueue worker */ + set_pf_worker(true); + woke_up: +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* am I supposed to die? */ + if (unlikely(worker->flags & WORKER_DIE)) { +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + WARN_ON_ONCE(!list_empty(&worker->entry)); + set_pf_worker(false); + +@@ -2361,7 +2313,7 @@ static int worker_thread(void *__worker) + */ + worker_enter_idle(worker); + __set_current_state(TASK_IDLE); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + schedule(); + goto woke_up; + } +@@ -2415,7 +2367,7 @@ static int rescuer_thread(void *__rescuer) + should_stop = kthread_should_stop(); + + /* see whether any pwq is asking for help */ +- spin_lock_irq(&wq_mayday_lock); ++ raw_spin_lock_irq(&wq_mayday_lock); + + while (!list_empty(&wq->maydays)) { + struct pool_workqueue *pwq = list_first_entry(&wq->maydays, +@@ -2427,11 +2379,11 @@ static int rescuer_thread(void *__rescuer) + __set_current_state(TASK_RUNNING); + list_del_init(&pwq->mayday_node); + +- spin_unlock_irq(&wq_mayday_lock); ++ raw_spin_unlock_irq(&wq_mayday_lock); + + worker_attach_to_pool(rescuer, pool); + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* + * Slurp in all works issued via this workqueue and +@@ -2460,10 +2412,10 @@ static int rescuer_thread(void *__rescuer) + * incur MAYDAY_INTERVAL delay inbetween. + */ + if (need_to_create_worker(pool)) { +- spin_lock(&wq_mayday_lock); ++ raw_spin_lock(&wq_mayday_lock); + get_pwq(pwq); + list_move_tail(&pwq->mayday_node, &wq->maydays); +- spin_unlock(&wq_mayday_lock); ++ raw_spin_unlock(&wq_mayday_lock); + } + } + +@@ -2481,14 +2433,14 @@ static int rescuer_thread(void *__rescuer) + if (need_more_worker(pool)) + wake_up_worker(pool); + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + worker_detach_from_pool(rescuer); + +- spin_lock_irq(&wq_mayday_lock); ++ raw_spin_lock_irq(&wq_mayday_lock); + } + +- spin_unlock_irq(&wq_mayday_lock); ++ raw_spin_unlock_irq(&wq_mayday_lock); + + if (should_stop) { + __set_current_state(TASK_RUNNING); +@@ -2568,7 +2520,7 @@ static void wq_barrier_func(struct work_struct *work) + * underneath us, so we can't reliably determine pwq from @target. + * + * CONTEXT: +- * spin_lock_irq(pool->lock). ++ * raw_spin_lock_irq(pool->lock). + */ + static void insert_wq_barrier(struct pool_workqueue *pwq, + struct wq_barrier *barr, +@@ -2655,7 +2607,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, + for_each_pwq(pwq, wq) { + struct worker_pool *pool = pwq->pool; + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + if (flush_color >= 0) { + WARN_ON_ONCE(pwq->flush_color != -1); +@@ -2672,7 +2624,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, + pwq->work_color = work_color; + } + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + + if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) +@@ -2872,9 +2824,9 @@ void drain_workqueue(struct workqueue_struct *wq) + for_each_pwq(pwq, wq) { + bool drained; + +- spin_lock_irq(&pwq->pool->lock); ++ raw_spin_lock_irq(&pwq->pool->lock); + drained = !pwq->nr_active && list_empty(&pwq->delayed_works); +- spin_unlock_irq(&pwq->pool->lock); ++ raw_spin_unlock_irq(&pwq->pool->lock); + + if (drained) + continue; +@@ -2910,7 +2862,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + return false; + } + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + /* see the comment in try_to_grab_pending() with the same code */ + pwq = get_work_pwq(work); + if (pwq) { +@@ -2926,7 +2878,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + check_flush_dependency(pwq->wq, work); + + insert_wq_barrier(pwq, barr, work, worker); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + /* + * Force a lock recursion deadlock when using flush_work() inside a +@@ -2945,7 +2897,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + rcu_read_unlock(); + return true; + already_gone: +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + rcu_read_unlock(); + return false; + } +@@ -3046,7 +2998,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) + + /* tell other tasks trying to grab @work to back off */ + mark_work_canceling(work); +- local_unlock_irqrestore(pendingb_lock, flags); ++ local_irq_restore(flags); + + /* + * This allows canceling during early boot. We know that @work +@@ -3107,10 +3059,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); + */ + bool flush_delayed_work(struct delayed_work *dwork) + { +- local_lock_irq(pendingb_lock); ++ local_irq_disable(); + if (del_timer_sync(&dwork->timer)) + __queue_work(dwork->cpu, dwork->wq, &dwork->work); +- local_unlock_irq(pendingb_lock); ++ local_irq_enable(); + return flush_work(&dwork->work); + } + EXPORT_SYMBOL(flush_delayed_work); +@@ -3148,7 +3100,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) + return false; + + set_work_pool_and_clear_pending(work, get_work_pool_id(work)); +- local_unlock_irqrestore(pendingb_lock, flags); ++ local_irq_restore(flags); + return ret; + } + +@@ -3258,7 +3210,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); + * + * Undo alloc_workqueue_attrs(). + */ +-void free_workqueue_attrs(struct workqueue_attrs *attrs) ++static void free_workqueue_attrs(struct workqueue_attrs *attrs) + { + if (attrs) { + free_cpumask_var(attrs->cpumask); +@@ -3268,21 +3220,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) + + /** + * alloc_workqueue_attrs - allocate a workqueue_attrs +- * @gfp_mask: allocation mask to use + * + * Allocate a new workqueue_attrs, initialize with default settings and + * return it. + * + * Return: The allocated new workqueue_attr on success. %NULL on failure. + */ +-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) ++static struct workqueue_attrs *alloc_workqueue_attrs(void) + { + struct workqueue_attrs *attrs; + +- attrs = kzalloc(sizeof(*attrs), gfp_mask); ++ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); + if (!attrs) + goto fail; +- if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) ++ if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) + goto fail; + + cpumask_copy(attrs->cpumask, cpu_possible_mask); +@@ -3339,7 +3290,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, + */ + static int init_worker_pool(struct worker_pool *pool) + { +- spin_lock_init(&pool->lock); ++ raw_spin_lock_init(&pool->lock); + pool->id = -1; + pool->cpu = -1; + pool->node = NUMA_NO_NODE; +@@ -3360,7 +3311,7 @@ static int init_worker_pool(struct worker_pool *pool) + pool->refcnt = 1; + + /* shouldn't fail above this point */ +- pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); ++ pool->attrs = alloc_workqueue_attrs(); + if (!pool->attrs) + return -ENOMEM; + return 0; +@@ -3425,15 +3376,15 @@ static void put_unbound_pool(struct worker_pool *pool) + * @pool's workers from blocking on attach_mutex. We're the last + * manager and @pool gets freed with the flag set. + */ +- spin_lock_irq(&pool->lock); +- wait_event_lock_irq(wq_manager_wait, ++ raw_spin_lock_irq(&pool->lock); ++ swait_event_lock_irq(wq_manager_wait, + !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); + pool->flags |= POOL_MANAGER_ACTIVE; + + while ((worker = first_idle_worker(pool))) + destroy_worker(worker); + WARN_ON(pool->nr_workers || pool->nr_idle); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + + mutex_lock(&wq_pool_attach_mutex); + if (!list_empty(&pool->workers)) +@@ -3587,7 +3538,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) + return; + + /* this function can be called during early boot w/ irq disabled */ +- spin_lock_irqsave(&pwq->pool->lock, flags); ++ raw_spin_lock_irqsave(&pwq->pool->lock, flags); + + /* + * During [un]freezing, the caller is responsible for ensuring that +@@ -3610,7 +3561,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) + pwq->max_active = 0; + } + +- spin_unlock_irqrestore(&pwq->pool->lock, flags); ++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + } + + /* initialize newly alloced @pwq which is associated with @wq and @pool */ +@@ -3783,8 +3734,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, + + ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); + +- new_attrs = alloc_workqueue_attrs(GFP_KERNEL); +- tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); ++ new_attrs = alloc_workqueue_attrs(); ++ tmp_attrs = alloc_workqueue_attrs(); + if (!ctx || !new_attrs || !tmp_attrs) + goto out_free; + +@@ -3920,7 +3871,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, + * + * Return: 0 on success and -errno on failure. + */ +-int apply_workqueue_attrs(struct workqueue_struct *wq, ++static int apply_workqueue_attrs(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs) + { + int ret; +@@ -3931,7 +3882,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, + + return ret; + } +-EXPORT_SYMBOL_GPL(apply_workqueue_attrs); + + /** + * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug +@@ -4009,9 +3959,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, + + use_dfl_pwq: + mutex_lock(&wq->mutex); +- spin_lock_irq(&wq->dfl_pwq->pool->lock); ++ raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); + get_pwq(wq->dfl_pwq); +- spin_unlock_irq(&wq->dfl_pwq->pool->lock); ++ raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); + old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); + out_unlock: + mutex_unlock(&wq->mutex); +@@ -4130,7 +4080,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, + return NULL; + + if (flags & WQ_UNBOUND) { +- wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); ++ wq->unbound_attrs = alloc_workqueue_attrs(); + if (!wq->unbound_attrs) + goto err_free_wq; + } +@@ -4399,10 +4349,10 @@ unsigned int work_busy(struct work_struct *work) + rcu_read_lock(); + pool = get_work_pool(work); + if (pool) { +- spin_lock_irqsave(&pool->lock, flags); ++ raw_spin_lock_irqsave(&pool->lock, flags); + if (find_worker_executing_work(pool, work)) + ret |= WORK_BUSY_RUNNING; +- spin_unlock_irqrestore(&pool->lock, flags); ++ raw_spin_unlock_irqrestore(&pool->lock, flags); + } + rcu_read_unlock(); + +@@ -4608,10 +4558,10 @@ void show_workqueue_state(void) + pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); + + for_each_pwq(pwq, wq) { +- spin_lock_irqsave(&pwq->pool->lock, flags); ++ raw_spin_lock_irqsave(&pwq->pool->lock, flags); + if (pwq->nr_active || !list_empty(&pwq->delayed_works)) + show_pwq(pwq); +- spin_unlock_irqrestore(&pwq->pool->lock, flags); ++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + /* + * We could be printing a lot from atomic context, e.g. + * sysrq-t -> show_workqueue_state(). Avoid triggering +@@ -4625,7 +4575,7 @@ void show_workqueue_state(void) + struct worker *worker; + bool first = true; + +- spin_lock_irqsave(&pool->lock, flags); ++ raw_spin_lock_irqsave(&pool->lock, flags); + if (pool->nr_workers == pool->nr_idle) + goto next_pool; + +@@ -4644,7 +4594,7 @@ void show_workqueue_state(void) + } + pr_cont("\n"); + next_pool: +- spin_unlock_irqrestore(&pool->lock, flags); ++ raw_spin_unlock_irqrestore(&pool->lock, flags); + /* + * We could be printing a lot from atomic context, e.g. + * sysrq-t -> show_workqueue_state(). Avoid triggering +@@ -4674,7 +4624,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) + struct worker_pool *pool = worker->pool; + + if (pool) { +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + /* + * ->desc tracks information (wq name or + * set_worker_desc()) for the latest execution. If +@@ -4688,7 +4638,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) + scnprintf(buf + off, size - off, "-%s", + worker->desc); + } +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + } + +@@ -4719,7 +4669,7 @@ static void unbind_workers(int cpu) + + for_each_cpu_worker_pool(pool, cpu) { + mutex_lock(&wq_pool_attach_mutex); +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + /* + * We've blocked all attach/detach operations. Make all workers +@@ -4733,7 +4683,7 @@ static void unbind_workers(int cpu) + + pool->flags |= POOL_DISASSOCIATED; + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + mutex_unlock(&wq_pool_attach_mutex); + + /* +@@ -4759,9 +4709,9 @@ static void unbind_workers(int cpu) + * worker blocking could lead to lengthy stalls. Kick off + * unbound chain execution of currently pending work items. + */ +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + wake_up_worker(pool); +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + } + +@@ -4788,7 +4738,7 @@ static void rebind_workers(struct worker_pool *pool) + WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, + pool->attrs->cpumask) < 0); + +- spin_lock_irq(&pool->lock); ++ raw_spin_lock_irq(&pool->lock); + + pool->flags &= ~POOL_DISASSOCIATED; + +@@ -4827,7 +4777,7 @@ static void rebind_workers(struct worker_pool *pool) + WRITE_ONCE(worker->flags, worker_flags); + } + +- spin_unlock_irq(&pool->lock); ++ raw_spin_unlock_irq(&pool->lock); + } + + /** +@@ -5279,7 +5229,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) + + lockdep_assert_held(&wq_pool_mutex); + +- attrs = alloc_workqueue_attrs(GFP_KERNEL); ++ attrs = alloc_workqueue_attrs(); + if (!attrs) + return NULL; + +@@ -5701,7 +5651,7 @@ static void __init wq_numa_init(void) + return; + } + +- wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); ++ wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(); + BUG_ON(!wq_update_unbound_numa_attrs_buf); + + /* +@@ -5776,7 +5726,7 @@ int __init workqueue_init_early(void) + for (i = 0; i < NR_STD_WORKER_POOLS; i++) { + struct workqueue_attrs *attrs; + +- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); ++ BUG_ON(!(attrs = alloc_workqueue_attrs())); + attrs->nice = std_nice[i]; + unbound_std_wq_attrs[i] = attrs; + +@@ -5785,7 +5735,7 @@ int __init workqueue_init_early(void) + * guaranteed by max_active which is enforced by pwqs. + * Turn off NUMA so that dfl_pwq is used for all nodes. + */ +- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); ++ BUG_ON(!(attrs = alloc_workqueue_attrs())); + attrs->nice = std_nice[i]; + attrs->no_numa = true; + ordered_wq_attrs[i] = attrs; diff --git a/debian/patches-rt/0290-Linux-4.19.82-rt30-REBASE.patch b/debian/patches-rt/0290-Linux-4.19.82-rt30-REBASE.patch new file mode 100644 index 000000000..865a62edc --- /dev/null +++ b/debian/patches-rt/0290-Linux-4.19.82-rt30-REBASE.patch @@ -0,0 +1,16 @@ +From: Steven Rostedt +Date: Mon, 14 Oct 2019 13:24:30 -0400 +Subject: [PATCH 290/290] Linux 4.19.82-rt30 REBASE +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=968a307194d9b66f841f0728ad8dd4d197bd5c92 + +--- + localversion-rt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/localversion-rt b/localversion-rt +index 1199ebade17b..b72862e06be4 100644 +--- a/localversion-rt ++++ b/localversion-rt +@@ -1 +1 @@ +--rt16 ++-rt30 diff --git a/debian/patches-rt/series b/debian/patches-rt/series index 9e8c5754c..7a4657f71 100644 --- a/debian/patches-rt/series +++ b/debian/patches-rt/series @@ -280,4 +280,11 @@ 0280-futex-Make-the-futex_hash_bucket-lock-raw.patch 0281-futex-Delay-deallocation-of-pi_state.patch 0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch -0283-Linux-4.19.59-rt24-REBASE.patch +0283-revert-aio.patch +0284-fs-aio-simple-simple-work.patch +0285-revert-thermal.patch +0286-thermal-Defer-thermal-wakups-to-threads.patch +0287-revert-block.patch +0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch +0289-workqueue-rework.patch +0290-Linux-4.19.82-rt30-REBASE.patch