From 30aaabe6a8641da3ed7935288ccb460e140c98fa Mon Sep 17 00:00:00 2001 From: Salvatore Bonaccorso Date: Fri, 21 Feb 2020 19:07:43 +0100 Subject: [PATCH] [rt] Update to 4.19.103-rt42 --- debian/changelog | 1 + ...M-at91-add-TCB-registers-definitions.patch | 8 +- ...ers-Add-a-new-driver-for-the-Atmel-A.patch | 8 +- ...ers-timer-atmel-tcb-add-clockevent-d.patch | 8 +- ...drivers-atmel-pit-make-option-silent.patch | 8 +- ...at91-Implement-clocksource-selection.patch | 8 +- ...onfigs-at91-use-new-TCB-timer-driver.patch | 8 +- .../0007-ARM-configs-at91-unselect-PIT.patch | 8 +- ...ts-Move-pending-table-allocation-to-.patch | 10 +- ...-convert-worker-lock-to-raw-spinlock.patch | 8 +- ...m-qi-simplify-CGR-allocation-freeing.patch | 8 +- ...obustify-CFS-bandwidth-timer-locking.patch | 26 +- ...012-arm-Convert-arm-boot_lock-to-raw.patch | 8 +- ...-let-setaffinity-unmask-threaded-EOI.patch | 8 +- ...irqsave-in-cgroup_rstat_flush_locked.patch | 8 +- ...lize-cookie-hash-table-raw-spinlocks.patch | 8 +- ...mbus-include-header-for-get_irq_regs.patch | 8 +- ...de-irqflags.h-for-raw_local_irq_save.patch | 8 +- .../0018-efi-Allow-efi-runtime.patch | 8 +- ...fi-drop-task_lock-from-efi_switch_mm.patch | 8 +- ...e_layout-before-altenates-are-applie.patch | 8 +- ...-phandle-cache-outside-of-the-devtre.patch | 8 +- ...ake-quarantine_lock-a-raw_spinlock_t.patch | 8 +- ...xpedited-GP-parallelization-cleverne.patch | 8 +- ...-kmemleak_lock-to-raw-spinlock-on-RT.patch | 8 +- ...-replace-seqcount_t-with-a-seqlock_t.patch | 12 +- ...vide-a-pointer-to-the-valid-CPU-mask.patch | 46 +- ...ernel-sched-core-add-migrate_disable.patch | 10 +- ...sable-Add-export_symbol_gpl-for-__mi.patch | 8 +- ...o-not-disable-enable-clocks-in-a-row.patch | 8 +- ...B-Allow-higher-clock-rates-for-clock.patch | 8 +- ...31-timekeeping-Split-jiffies-seqlock.patch | 18 +- ...2-signal-Revert-ptrace-preempt-magic.patch | 12 +- ...et-sched-Use-msleep-instead-of-yield.patch | 8 +- ...rq-remove-BUG_ON-irqs_disabled-check.patch | 8 +- ...do-no-disable-interrupts-in-giveback.patch | 8 +- ...rovide-PREEMPT_RT_BASE-config-switch.patch | 8 +- ...sable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch | 8 +- ...abel-disable-if-stop_machine-is-used.patch | 8 +- ...config-options-which-are-not-RT-comp.patch | 8 +- .../0040-lockdep-disable-self-test.patch | 8 +- .../0041-mm-Allow-only-slub-on-RT.patch | 8 +- ...locking-Disable-spin-on-owner-for-RT.patch | 8 +- ...043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch | 8 +- ...044-rcu-make-RCU_BOOST-default-on-RT.patch | 8 +- ...-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch | 8 +- ...46-net-core-disable-NET_RX_BUSY_POLL.patch | 8 +- ...0047-arm-disable-NEON-in-kernel-mode.patch | 8 +- ...0048-powerpc-Use-generic-rwsem-on-RT.patch | 10 +- ...ble-in-kernel-MPIC-emulation-for-PRE.patch | 8 +- .../0050-powerpc-Disable-highmem-on-RT.patch | 12 +- .../0051-mips-Disable-highmem-on-RT.patch | 8 +- ...86-Use-generic-rwsem_spinlocks-on-rt.patch | 8 +- ...ds-trigger-disable-CPU-trigger-on-RT.patch | 8 +- ...rop-K8-s-driver-from-beeing-selected.patch | 8 +- .../patches-rt/0055-md-disable-bcache.patch | 8 +- ...6-efi-Disable-runtime-services-on-RT.patch | 8 +- ...0057-printk-Add-a-printk-kill-switch.patch | 10 +- ..._early_printk-boot-param-to-help-wit.patch | 10 +- ...pt-Provide-preempt_-_-no-rt-variants.patch | 8 +- ...-migrate_disable-enable-in-different.patch | 8 +- .../0061-rt-Add-local-irq-locks.patch | 8 +- ...provide-get-put-_locked_ptr-variants.patch | 8 +- ...catterlist-Do-not-disable-irqs-on-RT.patch | 8 +- ...-x86-Delay-calling-signals-in-atomic.patch | 16 +- ...ignal-delay-calling-signals-on-32bit.patch | 8 +- ...head-Replace-bh_uptodate_lock-for-rt.patch | 10 +- ...-state-lock-and-journal-head-lock-rt.patch | 8 +- ...st_bl-Make-list-head-locking-RT-safe.patch | 8 +- ...-list_bl-fixup-bogus-lockdep-warning.patch | 8 +- .../0070-genirq-Disable-irqpoll-on-rt.patch | 8 +- ...-genirq-Force-interrupt-thread-on-RT.patch | 8 +- ...d-zone-lock-while-freeing-pages-from.patch | 10 +- ...d-zone-lock-while-freeing-pages-from.patch | 10 +- ...B-change-list_lock-to-raw_spinlock_t.patch | 8 +- ...ving-back-empty-slubs-to-IRQ-enabled.patch | 8 +- ...page_alloc-rt-friendly-per-cpu-pages.patch | 14 +- ...077-mm-swap-Convert-to-percpu-locked.patch | 12 +- ...m-perform-lru_add_drain_all-remotely.patch | 8 +- ...t-per-cpu-variables-with-preempt-dis.patch | 8 +- ...plit-page-table-locks-for-vector-pag.patch | 8 +- .../0081-mm-Enable-SLUB-for-RT.patch | 8 +- ...0082-slub-Enable-irqs-for-__GFP_WAIT.patch | 8 +- .../0083-slub-Disable-SLUB_CPU_PARTIAL.patch | 8 +- ...n-t-call-schedule_work_on-in-preempt.patch | 8 +- ...place-local_irq_disable-with-local-l.patch | 8 +- ...oc-copy-with-get_cpu_var-and-locking.patch | 8 +- ...le-preemption-__split_large_page-aft.patch | 8 +- .../0088-radix-tree-use-local-locks.patch | 8 +- ...9-timers-Prepare-for-full-preemption.patch | 8 +- ...090-x86-kvm-Require-const-tsc-for-RT.patch | 12 +- ...ec-Don-t-use-completion-s-wait-queue.patch | 22 +- .../0092-wait.h-include-atomic.h.patch | 8 +- ...mple-Simple-work-queue-implemenation.patch | 8 +- ...-a-shit-statement-in-SWORK_EVENT_PEN.patch | 8 +- ...95-completion-Use-simple-wait-queues.patch | 10 +- .../0096-fs-aio-simple-simple-work.patch | 10 +- ...voke-the-affinity-callback-via-a-wor.patch | 8 +- ...id-schedule_work-with-interrupts-dis.patch | 8 +- ...ate-hrtimer_init-hrtimer_init_sleepe.patch | 12 +- ...100-hrtimers-Prepare-full-preemption.patch | 12 +- ...s-by-default-into-the-softirq-contex.patch | 24 +- ...air-Make-the-hrtimers-non-hard-again.patch | 12 +- ...-schedule_work-call-to-helper-thread.patch | 8 +- ...te-change-before-hrtimer_cancel-in-d.patch | 8 +- ...timers-Thread-posix-cpu-timers-on-rt.patch | 14 +- ...ched-Move-task_struct-cleanup-to-RCU.patch | 16 +- ...-number-of-task-migrations-per-batch.patch | 8 +- .../0108-sched-Move-mmdrop-to-RCU-on-RT.patch | 12 +- ...e-stack-kprobe-clean-up-to-__put_tas.patch | 12 +- ...state-for-tasks-blocked-on-sleeping-.patch | 10 +- ...ount-rcu_preempt_depth-on-RT-in-migh.patch | 8 +- ...-proper-LOCK_OFFSET-for-cond_resched.patch | 8 +- .../0113-sched-Disable-TTWU_QUEUE-on-RT.patch | 8 +- ...Only-wake-up-idle-workers-if-not-blo.patch | 8 +- ...ease-the-nr-of-migratory-tasks-when-.patch | 8 +- ...-hotplug-Lightweight-get-online-cpus.patch | 8 +- ...e-disabled-counter-to-tracing-output.patch | 8 +- .../0118-lockdep-Make-it-RT-aware.patch | 8 +- ...tasklets-from-going-into-infinite-sp.patch | 8 +- ...eemption-after-reenabling-interrupts.patch | 16 +- ...oftirq-Disable-softirq-stacks-for-RT.patch | 8 +- .../0122-softirq-Split-softirq-locks.patch | 14 +- ...-use-local_bh_disable-in-netif_rx_ni.patch | 12 +- ...abling-of-softirq-processing-in-irq-.patch | 10 +- ...plit-timer-softirqs-out-of-ksoftirqd.patch | 8 +- ...cal_softirq_pending-messages-if-ksof.patch | 8 +- ...cal_softirq_pending-messages-if-task.patch | 8 +- .../0128-rtmutex-trylock-is-okay-on-RT.patch | 8 +- ...-nfs-turn-rmdir_sem-into-a-semaphore.patch | 14 +- ...he-various-new-futex-race-conditions.patch | 8 +- ...on-when-a-requeued-RT-task-times-out.patch | 8 +- ...k-unlock-symetry-versus-pi_lock-and-.patch | 8 +- .../0133-pid.h-include-atomic.h.patch | 8 +- ...arm-include-definition-for-cpumask_t.patch | 8 +- ...ure-Do-NOT-include-rwlock.h-directly.patch | 8 +- ...36-rtmutex-Add-rtmutex_lock_killable.patch | 8 +- ...0137-rtmutex-Make-lock_killable-work.patch | 8 +- ...spinlock-Split-the-lock-types-header.patch | 8 +- .../0139-rtmutex-Avoid-include-hell.patch | 8 +- ...-rbtree-don-t-include-the-rcu-header.patch | 8 +- ...tex-Provide-rt_mutex_slowlock_locked.patch | 8 +- ...ockdep-less-version-of-rt_mutex-s-lo.patch | 8 +- ...tex-add-sleeping-lock-implementation.patch | 14 +- ...utex-implementation-based-on-rtmutex.patch | 8 +- ...wsem-implementation-based-on-rtmutex.patch | 8 +- ...lock-implementation-based-on-rtmutex.patch | 8 +- ...-preserve-state-like-a-sleeping-lock.patch | 8 +- .../0148-rtmutex-wire-up-RT-s-locking.patch | 10 +- ...utex-add-ww_mutex-addon-for-mutex-rt.patch | 8 +- .../0150-kconfig-Add-PREEMPT_RT_FULL.patch | 8 +- ...-fix-deadlock-in-device-mapper-block.patch | 8 +- ...utex-Flush-block-plug-on-__down_read.patch | 8 +- ...re-init-the-wait_lock-in-rt_mutex_in.patch | 8 +- ...ace-fix-ptrace-vs-tasklist_lock-race.patch | 12 +- ...mutex-annotate-sleeping-lock-context.patch | 12 +- ...sable-fallback-to-preempt_disable-in.patch | 10 +- ...eck-for-__LINUX_SPINLOCK_TYPES_H-on-.patch | 8 +- .../0158-rcu-Frob-softirq-test.patch | 10 +- ...59-rcu-Merge-RCU-bh-into-RCU-preempt.patch | 8 +- ...ke-ksoftirqd-do-RCU-quiescent-states.patch | 18 +- ...nate-softirq-processing-from-rcutree.patch | 20 +- ...-use-cpu_online-instead-custom-check.patch | 8 +- ...place-local_irqsave-with-a-locallock.patch | 8 +- ..._normal_after_boot-by-default-for-RT.patch | 8 +- ...erial-omap-Make-the-locking-RT-aware.patch | 8 +- ...al-pl011-Make-the-locking-work-on-RT.patch | 8 +- ...-explicitly-initialize-the-flags-var.patch | 8 +- ...mprove-the-serial-console-PASS_LIMIT.patch | 8 +- ...0-don-t-take-the-trylock-during-oops.patch | 8 +- ...wsem-Remove-preempt_disable-variants.patch | 8 +- ...ate_mm-by-preempt_-disable-enable-_r.patch | 8 +- ...back-explicit-INIT_HLIST_BL_HEAD-ini.patch | 8 +- ...e-preemption-on-i_dir_seq-s-write-si.patch | 10 +- ...e-of-local-lock-in-multi_cpu-decompr.patch | 8 +- ...rmal-Defer-thermal-wakups-to-threads.patch | 8 +- ...e-preemption-around-local_bh_disable.patch | 8 +- ...poll-Do-not-disable-preemption-on-RT.patch | 8 +- ...er-preempt-disable-region-which-suck.patch | 8 +- .../0179-block-mq-use-cpu_light.patch | 8 +- ...ock-mq-do-not-invoke-preempt_disable.patch | 8 +- ...k-mq-don-t-complete-requests-via-IPI.patch | 10 +- ...-Make-raid5_percpu-handling-RT-aware.patch | 8 +- .../0183-rt-Introduce-cpu_chill.patch | 8 +- ...rtimer-Don-t-lose-state-in-cpu_chill.patch | 8 +- ...chill-save-task-state-in-saved_state.patch | 8 +- ...e-blk_queue_usage_counter_release-in.patch | 10 +- ...-block-Use-cpu_chill-for-retry-loops.patch | 8 +- ...cache-Use-cpu_chill-in-trylock-loops.patch | 8 +- ...t-Use-cpu_chill-instead-of-cpu_relax.patch | 10 +- ...use-swait_queue-instead-of-waitqueue.patch | 20 +- .../0191-workqueue-Use-normal-rcu.patch | 8 +- ...cal-irq-lock-instead-of-irq-disable-.patch | 8 +- ...t-workqueue-versus-ata-piix-livelock.patch | 8 +- ...tangle-worker-accounting-from-rqlock.patch | 8 +- .../0195-debugobjects-Make-RT-aware.patch | 8 +- .../0196-seqlock-Prevent-rt-starvation.patch | 8 +- ...vc_xprt_do_enqueue-use-get_cpu_light.patch | 8 +- ...0198-net-Use-skbufhead-with-raw-lock.patch | 26 +- ...recursion-to-per-task-variable-on-RT.patch | 45 +- ...y-to-delegate-processing-a-softirq-t.patch | 12 +- ...ake-qdisc-s-busylock-in-__dev_xmit_s.patch | 10 +- ...Qdisc-use-a-seqlock-instead-seqcount.patch | 14 +- ...-missing-serialization-in-ip_send_un.patch | 8 +- .../0204-net-add-a-lock-around-icmp_sk.patch | 8 +- ...schedule_irqoff-disable-interrupts-o.patch | 16 +- ...-push-most-work-into-softirq-context.patch | 12 +- .../0207-printk-Make-rt-aware.patch | 10 +- ...n-t-try-to-print-from-IRQ-NMI-region.patch | 10 +- ...intk-Drop-the-logbuf_lock-more-often.patch | 10 +- ...n-translation-section-permission-fau.patch | 8 +- ...-irq_set_irqchip_state-documentation.patch | 8 +- ...wngrade-preempt_disable-d-region-to-.patch | 8 +- ...-preemp_disable-in-addition-to-local.patch | 8 +- ...14-kgdb-serial-Short-term-workaround.patch | 8 +- ...-sysfs-Add-sys-kernel-realtime-entry.patch | 8 +- .../0216-mm-rt-kmap_atomic-scheduling.patch | 10 +- ...highmem-Add-a-already-used-pte-check.patch | 8 +- .../0218-arm-highmem-Flush-tlb-on-unmap.patch | 8 +- .../0219-arm-Enable-highmem-for-rt.patch | 8 +- .../0220-scsi-fcoe-Make-RT-aware.patch | 10 +- ...ypto-Reduce-preempt-disabled-regions.patch | 8 +- ...-preempt-disabled-regions-more-algos.patch | 8 +- ...ypto-limit-more-FPU-enabled-sections.patch | 8 +- ...-serialize-RT-percpu-scratch-buffer-.patch | 8 +- ...d-a-lock-instead-preempt_disable-loc.patch | 8 +- ...andom_bytes-for-RT_FULL-in-init_oops.patch | 8 +- ...ackprotector-Avoid-random-pool-on-rt.patch | 8 +- .../0228-random-Make-it-work-on-rt.patch | 10 +- ...29-cpu-hotplug-Implement-CPU-pinning.patch | 10 +- ...ed-user-tasks-to-be-awakened-to-the-.patch | 8 +- ...duct-tape-RT-rwlock-usage-for-non-RT.patch | 8 +- ...ove-preemption-disabling-in-netif_rx.patch | 14 +- ...r-local_irq_disable-kmalloc-headache.patch | 8 +- ...-users-of-napi_alloc_cache-against-r.patch | 8 +- ...rialize-xt_write_recseq-sections-on-.patch | 8 +- ...Add-a-mutex-around-devnet_rename_seq.patch | 10 +- ...-Only-do-hardirq-context-test-for-ra.patch | 8 +- ...-fix-warnings-due-to-missing-PREEMPT.patch | 8 +- ...ched-Add-support-for-lazy-preemption.patch | 30 +- ...40-ftrace-Fix-trace-header-alignment.patch | 8 +- ...0241-x86-Support-for-lazy-preemption.patch | 8 +- ...-properly-check-against-preempt-mask.patch | 8 +- ...-use-proper-return-label-on-32bit-x8.patch | 8 +- ...-arm-Add-support-for-lazy-preemption.patch | 8 +- ...erpc-Add-support-for-lazy-preemption.patch | 10 +- ...-arch-arm64-Add-lazy-preempt-support.patch | 8 +- ...c-Protect-send_msg-with-a-local-lock.patch | 8 +- ...am-Replace-bit-spinlocks-with-rtmute.patch | 8 +- ...-t-disable-preemption-in-zcomp_strea.patch | 8 +- ...-zcomp_stream_get-smp_processor_id-u.patch | 8 +- ...51-tpm_tis-fix-stall-after-iowrite-s.patch | 8 +- ...t-deferral-of-watchdogd-wakeup-on-RT.patch | 18 +- ...Use-preempt_disable-enable_rt-where-.patch | 8 +- ...al_lock-unlock_irq-in-intel_pipe_upd.patch | 8 +- .../0255-drm-i915-disable-tracing-on-RT.patch | 8 +- ...M_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch | 8 +- ...roups-use-simple-wait-in-css_release.patch | 16 +- ...vert-callback_lock-to-raw_spinlock_t.patch | 8 +- ...-a-locallock-instead-preempt_disable.patch | 10 +- ...rkqueue-Prevent-deadlock-stall-on-RT.patch | 8 +- ...t-tasks-to-cache-one-sigqueue-struct.patch | 34 +- ...0262-Add-localversion-for-RT-release.patch | 8 +- ...iommu-Use-a-locallock-instead-local_.patch | 8 +- .../0264-powerpc-reshuffle-TIF-bits.patch | 8 +- ...-Convert-show_lock-to-raw_spinlock_t.patch | 8 +- ...isable-interrupts-independently-of-t.patch | 8 +- ...-Fix-a-lockup-in-wait_for_completion.patch | 8 +- ...8-kthread-add-a-global-worker-thread.patch | 8 +- ...voke-the-affinity-callback-via-a-wor.patch | 8 +- ...ssing-work_struct-in-irq_set_affinit.patch | 8 +- ...-arm-imx6-cpuidle-Use-raw_spinlock_t.patch | 8 +- ...to-change-rcu_normal_after_boot-on-R.patch | 8 +- ...chtec-fix-stream_open.cocci-warnings.patch | 12 +- ...-Drop-a-preempt_disable_rt-statement.patch | 8 +- ...notification-of-canceling-timers-on-.patch | 12 +- ...ure-lock-unlock-symetry-versus-pi_lo.patch | 8 +- ...-bug-on-when-a-requeued-RT-task-time.patch | 8 +- ...andle-the-various-new-futex-race-con.patch | 8 +- ...karound-migrate_disable-enable-in-di.patch | 8 +- ...-Make-the-futex_hash_bucket-lock-raw.patch | 8 +- ...futex-Delay-deallocation-of-pi_state.patch | 8 +- ...disable-preemption-in-zswap_frontswa.patch | 8 +- debian/patches-rt/0283-revert-aio.patch | 10 +- .../0284-fs-aio-simple-simple-work.patch | 10 +- debian/patches-rt/0285-revert-thermal.patch | 8 +- ...rmal-Defer-thermal-wakups-to-threads.patch | 8 +- debian/patches-rt/0287-revert-block.patch | 10 +- ...e-blk_queue_usage_counter_release-in.patch | 10 +- debian/patches-rt/0289-workqueue-rework.patch | 12 +- .../0290-Linux-4.19.94-rt38-REBASE.patch | 20 - ...0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch | 48 ++ ...0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch | 42 + ...nsure-inactive_timer-runs-in-hardirq.patch | 50 ++ ...temp-make-pkg_temp_lock-a-raw-spinlo.patch | 120 +++ ...lock_t-instread-disabling-preemption.patch | 296 +++++++ ...t-the-timer-expire-in-hardirq-contex.patch | 54 ++ ...k-preemption-level-before-looking-at.patch | 34 + ..._ONCE-to-access-timer-base-in-hrimer.patch | 42 + ...ab-the-expiry-lock-for-non-soft-hrti.patch | 41 + ...using-hrtimer_grab_expiry_lock-on-mi.patch | 43 + ...ssing-bracket-and-hide-migration_bas.patch | 75 ++ ...lock-expiry-lock-in-the-early-return.patch | 41 + ...s-enable-Use-sleeping_lock-to-annota.patch | 58 ++ ..._allowed_ptr-Check-cpus_mask-not-cpu.patch | 39 + ...Remove-dead-__migrate_disabled-check.patch | 39 + ...e-disable-Protect-cpus_ptr-with-lock.patch | 47 ++ ...processor_id-Don-t-use-cpumask_equal.patch | 45 ++ ...utex_hash_bucket-spinlock_t-again-an.patch | 738 ++++++++++++++++++ ...Clean-pi_blocked_on-in-the-error-cas.patch | 102 +++ ...ib-ubsan-Don-t-seralize-UBSAN-report.patch | 308 ++++++++ ...the-lock-of-kmemleak_object-to-raw_s.patch | 293 +++++++ ...igrate_enable-Use-select_fallback_rq.patch | 66 ++ ...ched-Lazy-migrate_disable-processing.patch | 616 +++++++++++++++ ...grate_enable-Use-stop_one_cpu_nowait.patch | 121 +++ ...alize-split-page-table-locks-for-vec.patch | 87 +++ ...nlock_t-and-rwlock_t-a-RCU-section-o.patch | 129 +++ ...te_enable-must-access-takedown_cpu_t.patch | 55 ++ ...r_id-Adjust-check_preemption_disable.patch | 41 + ...able-Busy-loop-until-the-migration-r.patch | 57 ++ .../0319-Linux-4.19.103-rt42-REBASE.patch | 20 + debian/patches-rt/series | 31 +- 322 files changed, 5172 insertions(+), 1404 deletions(-) delete mode 100644 debian/patches-rt/0290-Linux-4.19.94-rt38-REBASE.patch create mode 100644 debian/patches-rt/0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch create mode 100644 debian/patches-rt/0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch create mode 100644 debian/patches-rt/0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch create mode 100644 debian/patches-rt/0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch create mode 100644 debian/patches-rt/0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch create mode 100644 debian/patches-rt/0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch create mode 100644 debian/patches-rt/0296-x86-preempt-Check-preemption-level-before-looking-at.patch create mode 100644 debian/patches-rt/0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch create mode 100644 debian/patches-rt/0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch create mode 100644 debian/patches-rt/0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch create mode 100644 debian/patches-rt/0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch create mode 100644 debian/patches-rt/0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch create mode 100644 debian/patches-rt/0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch create mode 100644 debian/patches-rt/0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch create mode 100644 debian/patches-rt/0304-sched-Remove-dead-__migrate_disabled-check.patch create mode 100644 debian/patches-rt/0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch create mode 100644 debian/patches-rt/0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch create mode 100644 debian/patches-rt/0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch create mode 100644 debian/patches-rt/0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch create mode 100644 debian/patches-rt/0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch create mode 100644 debian/patches-rt/0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch create mode 100644 debian/patches-rt/0311-sched-migrate_enable-Use-select_fallback_rq.patch create mode 100644 debian/patches-rt/0312-sched-Lazy-migrate_disable-processing.patch create mode 100644 debian/patches-rt/0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch create mode 100644 debian/patches-rt/0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch create mode 100644 debian/patches-rt/0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch create mode 100644 debian/patches-rt/0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch create mode 100644 debian/patches-rt/0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch create mode 100644 debian/patches-rt/0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch create mode 100644 debian/patches-rt/0319-Linux-4.19.103-rt42-REBASE.patch diff --git a/debian/changelog b/debian/changelog index 1620f547b..86a8fc429 100644 --- a/debian/changelog +++ b/debian/changelog @@ -813,6 +813,7 @@ linux (4.19.103-1) UNRELEASED; urgency=medium out-of-tree modules"" for context changes in 4.19.99 * Refresh "ARM: dts: bcm283x: Correct vchiq compatible string" for context changes in 4.19.99 + * [rt] Update to 4.19.103-rt42 [ Ben Hutchings ] * [x86] Drop "Add a SysRq option to lift kernel lockdown" (Closes: #947021) diff --git a/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch b/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch index 4206e8cae..f04322218 100644 --- a/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch +++ b/debian/patches-rt/0001-ARM-at91-add-TCB-registers-definitions.patch @@ -1,8 +1,8 @@ -From b08620f77bc1fbffba40e033f492a241bc2c62d6 Mon Sep 17 00:00:00 2001 +From adb82ae54a91c725fe773b745ff424807228c874 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:18 +0200 -Subject: [PATCH 001/290] ARM: at91: add TCB registers definitions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 001/319] ARM: at91: add TCB registers definitions +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Add registers and bits definitions for the timer counter blocks found on Atmel ARM SoCs. @@ -206,5 +206,5 @@ index 000000000000..657e234b1483 + +#endif /* __SOC_ATMEL_TCB_H */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch b/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch index f0556e179..da9fe47b6 100644 --- a/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch +++ b/debian/patches-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch @@ -1,9 +1,9 @@ -From bc9f46c6ecb0be8a5a14b31b032740f3a1d74761 Mon Sep 17 00:00:00 2001 +From 16aff6aa0f31ed7dc6a68735bd65e263810395bc Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:19 +0200 -Subject: [PATCH 002/290] clocksource/drivers: Add a new driver for the Atmel +Subject: [PATCH 002/319] clocksource/drivers: Add a new driver for the Atmel ARM TC blocks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Add a driver for the Atmel Timer Counter Blocks. This driver provides a clocksource and two clockevent devices. @@ -481,5 +481,5 @@ index 000000000000..21fbe430f91b +} +TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch b/debian/patches-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch index be41e3f45..18ec4aedb 100644 --- a/debian/patches-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch +++ b/debian/patches-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch @@ -1,9 +1,9 @@ -From bfee0cb2aad9baa607add364cd5a6c05d6782641 Mon Sep 17 00:00:00 2001 +From 11cf60ef1efd769bc725a511c7a4bcdc2f5256cf Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:20 +0200 -Subject: [PATCH 003/290] clocksource/drivers: timer-atmel-tcb: add clockevent +Subject: [PATCH 003/319] clocksource/drivers: timer-atmel-tcb: add clockevent device on separate channel -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Add an other clockevent device that uses a separate TCB channel when available. @@ -267,5 +267,5 @@ index 21fbe430f91b..63ce3b69338a 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch b/debian/patches-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch index 3f6100635..333cff292 100644 --- a/debian/patches-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch +++ b/debian/patches-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch @@ -1,8 +1,8 @@ -From 00029cfcfece0c114d6428fcb7e52545a88cf2c1 Mon Sep 17 00:00:00 2001 +From 7e01ab45bcd7a5cdfeb78da4a89a02fe78225b87 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:21 +0200 -Subject: [PATCH 004/290] clocksource/drivers: atmel-pit: make option silent -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 004/319] clocksource/drivers: atmel-pit: make option silent +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz To conform with the other option, make the ATMEL_PIT option silent so it can be selected from the platform @@ -32,5 +32,5 @@ index 0ab22e7037f4..34b07047b91f 100644 config ATMEL_ST bool "Atmel ST timer support" if COMPILE_TEST -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0005-ARM-at91-Implement-clocksource-selection.patch b/debian/patches-rt/0005-ARM-at91-Implement-clocksource-selection.patch index b35117d9b..2aef5ab70 100644 --- a/debian/patches-rt/0005-ARM-at91-Implement-clocksource-selection.patch +++ b/debian/patches-rt/0005-ARM-at91-Implement-clocksource-selection.patch @@ -1,8 +1,8 @@ -From d6f57849d568f7bad4a562fc3466f773384c5af7 Mon Sep 17 00:00:00 2001 +From 1f8b901e987ee6fbb0a34495e4095bd064af77d7 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:22 +0200 -Subject: [PATCH 005/290] ARM: at91: Implement clocksource selection -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 005/319] ARM: at91: Implement clocksource selection +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Allow selecting and unselecting the PIT clocksource driver so it doesn't have to be compile when unused. @@ -51,5 +51,5 @@ index 903f23c309df..fa493a86e2bb 100644 bool -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch b/debian/patches-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch index 3dcc1d582..2bd154887 100644 --- a/debian/patches-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch +++ b/debian/patches-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch @@ -1,8 +1,8 @@ -From 218e2c7678889a89128237d82a61bf1f23d9cc77 Mon Sep 17 00:00:00 2001 +From ba834da858cfeffa02be54b46249bdccbf9fd309 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:23 +0200 -Subject: [PATCH 006/290] ARM: configs: at91: use new TCB timer driver -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 006/319] ARM: configs: at91: use new TCB timer driver +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to timer-atmel-tcb. @@ -39,5 +39,5 @@ index 2080025556b5..f2bbc6339ca6 100644 CONFIG_EEPROM_AT24=y CONFIG_SCSI=y -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0007-ARM-configs-at91-unselect-PIT.patch b/debian/patches-rt/0007-ARM-configs-at91-unselect-PIT.patch index 1abc8cd1a..933020194 100644 --- a/debian/patches-rt/0007-ARM-configs-at91-unselect-PIT.patch +++ b/debian/patches-rt/0007-ARM-configs-at91-unselect-PIT.patch @@ -1,8 +1,8 @@ -From 4773aa4fa5a18ca0b25a456fc06be41464775211 Mon Sep 17 00:00:00 2001 +From e3b88dcec86a917afae928ccc22d054e19d7f2a1 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 13 Sep 2018 13:30:24 +0200 -Subject: [PATCH 007/290] ARM: configs: at91: unselect PIT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 007/319] ARM: configs: at91: unselect PIT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The PIT is not required anymore to successfully boot and may actually harm in case preempt-rt is used because the PIT interrupt is shared. @@ -40,5 +40,5 @@ index f2bbc6339ca6..be92871ab155 100644 CONFIG_UACCESS_WITH_MEMCPY=y CONFIG_ZBOOT_ROM_TEXT=0x0 -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch b/debian/patches-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch index 9dbf3a7fb..74089219f 100644 --- a/debian/patches-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch +++ b/debian/patches-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch @@ -1,9 +1,9 @@ -From 5322b48ab45dcd7e238e7a92c17746b904092021 Mon Sep 17 00:00:00 2001 +From 6f4c4902f7a8db4d12083c42bc3915b6e85140c7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 27 Jul 2018 13:38:54 +0100 -Subject: [PATCH 008/290] irqchip/gic-v3-its: Move pending table allocation to +Subject: [PATCH 008/319] irqchip/gic-v3-its: Move pending table allocation to init time -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Signed-off-by: Marc Zyngier Signed-off-by: Sebastian Andrzej Siewior @@ -155,7 +155,7 @@ index 050d6e040128..fc4c319ee1d7 100644 return err; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h -index 3188c0bef3e7..5b57501fd2e7 100644 +index 1d21e98d6854..fdddead7e307 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -585,6 +585,7 @@ struct rdists { @@ -167,5 +167,5 @@ index 3188c0bef3e7..5b57501fd2e7 100644 struct page *prop_page; u64 flags; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch b/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch index 97995ac58..6d3df8280 100644 --- a/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch +++ b/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch @@ -1,8 +1,8 @@ -From e2fc3df409c6764dc2c394061aae000a86437a7c Mon Sep 17 00:00:00 2001 +From ee9a7655a308cca866723c29779c21d2d6cdd5d7 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Fri, 28 Sep 2018 21:03:51 +0000 -Subject: [PATCH 009/290] kthread: convert worker lock to raw spinlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 009/319] kthread: convert worker lock to raw spinlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In order to enable the queuing of kthread work items from hardirq context even when PREEMPT_RT_FULL is enabled, convert the worker @@ -199,5 +199,5 @@ index 087d18d771b5..5641b55783a6 100644 return ret; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch b/debian/patches-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch index 002204ded..9c49bb226 100644 --- a/debian/patches-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch +++ b/debian/patches-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch @@ -1,11 +1,11 @@ -From ba865f26206ec12d7d6a140c2a9f6871c7be3d5f Mon Sep 17 00:00:00 2001 +From 4a1b145c2c26fba4753a539feef323c721bc37b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Mon, 8 Oct 2018 14:09:37 +0300 -Subject: [PATCH 010/290] crypto: caam/qi - simplify CGR allocation, freeing +Subject: [PATCH 010/319] crypto: caam/qi - simplify CGR allocation, freeing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [Upstream commit 29e83c757006fd751966bdc53392bb22d74179c6] @@ -136,5 +136,5 @@ index 357b69f57072..b6c8acc30853 100644 /** * qi_cache_alloc - Allocate buffers from CAAM-QI cache -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch index 6ef84bf95..96da2ec57 100644 --- a/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch +++ b/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch @@ -1,8 +1,8 @@ -From 147b5bb1f8cd2975bdfa1368ba792f28df1ab8a6 Mon Sep 17 00:00:00 2001 +From b8bc45a342f3e594019e206b4d7a4a0a740b2b4d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 7 Jan 2019 13:52:31 +0100 -Subject: [PATCH 011/290] sched/fair: Robustify CFS-bandwidth timer locking -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 011/319] sched/fair: Robustify CFS-bandwidth timer locking +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Traditionally hrtimer callbacks were run with IRQs disabled, but with the introduction of HRTIMER_MODE_SOFT it is possible they run from @@ -30,10 +30,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index f0abb8fe0ae9..b5e03fc56d65 100644 +index 7f4f4ab5bfef..0f1ba3d72336 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4560,7 +4560,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) +@@ -4576,7 +4576,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; @@ -42,7 +42,7 @@ index f0abb8fe0ae9..b5e03fc56d65 100644 if (!cfs_rq_throttled(cfs_rq)) goto next; -@@ -4579,7 +4579,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) +@@ -4595,7 +4595,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) unthrottle_cfs_rq(cfs_rq); next: @@ -51,7 +51,7 @@ index f0abb8fe0ae9..b5e03fc56d65 100644 if (!remaining) break; -@@ -4595,7 +4595,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) +@@ -4611,7 +4611,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) * period the timer is deactivated until scheduling resumes; cfs_b->idle is * used to track this state. */ @@ -60,7 +60,7 @@ index f0abb8fe0ae9..b5e03fc56d65 100644 { u64 runtime; int throttled; -@@ -4635,10 +4635,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) +@@ -4651,10 +4651,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; @@ -73,7 +73,7 @@ index f0abb8fe0ae9..b5e03fc56d65 100644 cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); -@@ -4746,16 +4746,17 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) +@@ -4762,16 +4762,17 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) { u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); @@ -94,7 +94,7 @@ index f0abb8fe0ae9..b5e03fc56d65 100644 return; } -@@ -4765,17 +4766,17 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) +@@ -4781,17 +4782,17 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) if (runtime) cfs_b->distribute_running = 1; @@ -115,7 +115,7 @@ index f0abb8fe0ae9..b5e03fc56d65 100644 } /* -@@ -4855,11 +4856,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +@@ -4871,11 +4872,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer); @@ -129,7 +129,7 @@ index f0abb8fe0ae9..b5e03fc56d65 100644 for (;;) { overrun = hrtimer_forward_now(timer, cfs_b->period); if (!overrun) -@@ -4895,11 +4897,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +@@ -4911,11 +4913,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) count = 0; } @@ -144,5 +144,5 @@ index f0abb8fe0ae9..b5e03fc56d65 100644 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch b/debian/patches-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch index 2da3751a9..db2ce8643 100644 --- a/debian/patches-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch +++ b/debian/patches-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch @@ -1,8 +1,8 @@ -From fd46cb23c2b2f9fe3514d2e773f289e0e2f46d73 Mon Sep 17 00:00:00 2001 +From 852931124f107be1f8224e3c3ae7a3906e8bce28 Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Mon, 19 Sep 2011 14:51:14 -0700 -Subject: [PATCH 012/290] arm: Convert arm boot_lock to raw -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 012/319] arm: Convert arm boot_lock to raw +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The arm boot_lock is used by the secondary processor startup code. The locking task is the idle thread, which has idle->sched_class == &idle_sched_class. @@ -428,5 +428,5 @@ index c2366510187a..6b60f582b738 100644 return pen_release != -1 ? -ENOSYS : 0; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch b/debian/patches-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch index 4db56668c..13e1f7ec1 100644 --- a/debian/patches-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch +++ b/debian/patches-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch @@ -1,9 +1,9 @@ -From 2ab61b3cb2b66bd2515c8531da37be20f5cc14a9 Mon Sep 17 00:00:00 2001 +From 96c91f4fbcd59a2f307356e5204c81d846ba7968 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Jul 2018 18:25:31 +0200 -Subject: [PATCH 013/290] x86/ioapic: Don't let setaffinity unmask threaded EOI +Subject: [PATCH 013/319] x86/ioapic: Don't let setaffinity unmask threaded EOI interrupt too early -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz There is an issue with threaded interrupts which are marked ONESHOT and using the fasteoi handler. @@ -97,5 +97,5 @@ index fa3b85b222e3..1bdad61a3ef7 100644 static void ioapic_ir_ack_level(struct irq_data *irq_data) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch index 5654b34a4..641219257 100644 --- a/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch +++ b/debian/patches-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch @@ -1,8 +1,8 @@ -From 56933f479ee6e504e58236259c2fdacd09bcdeb1 Mon Sep 17 00:00:00 2001 +From 606b2f0a914ecc6c0badb779e346538c22096ceb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 3 Jul 2018 18:19:48 +0200 -Subject: [PATCH 014/290] cgroup: use irqsave in cgroup_rstat_flush_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 014/319] cgroup: use irqsave in cgroup_rstat_flush_locked() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock either with spin_lock_irq() or spin_lock_irqsave(). @@ -46,5 +46,5 @@ index bb95a35e8c2d..3266a9781b4e 100644 /* if @may_sleep, play nice and yield if necessary */ if (may_sleep && (need_resched() || -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch b/debian/patches-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch index cea1a1723..9aee2698c 100644 --- a/debian/patches-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch +++ b/debian/patches-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch @@ -1,8 +1,8 @@ -From 11a5fe1b2f98fc37eac8919e99282c1b91f5bb05 Mon Sep 17 00:00:00 2001 +From 56cec20764014ac10973058dabf6944a533ee8e7 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Tue, 3 Jul 2018 13:34:30 -0500 -Subject: [PATCH 015/290] fscache: initialize cookie hash table raw spinlocks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 015/319] fscache: initialize cookie hash table raw spinlocks +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT @@ -60,5 +60,5 @@ index 84b90a79d75a..87a9330eafa2 100644 /** * fscache_register_netfs - Register a filesystem as desiring caching services -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch b/debian/patches-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch index c9078f6cd..03c1e8c71 100644 --- a/debian/patches-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch +++ b/debian/patches-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch @@ -1,11 +1,11 @@ -From 42e5ab6924b31025c97846b749ebe188ee4b5fdd Mon Sep 17 00:00:00 2001 +From 0f9c9b3757dc490f5d7f8f37432eb2fe10f9c325 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 29 Aug 2018 21:59:04 +0200 -Subject: [PATCH 016/290] Drivers: hv: vmbus: include header for get_irq_regs() +Subject: [PATCH 016/319] Drivers: hv: vmbus: include header for get_irq_regs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On !RT the header file get_irq_regs() gets pulled in via other header files. On RT it does not and the build fails: @@ -36,5 +36,5 @@ index 87d3d7da78f8..1d2d8a4b837d 100644 #include "hv_trace.h" -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch b/debian/patches-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch index 3cb5b2836..cda59d942 100644 --- a/debian/patches-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch +++ b/debian/patches-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch @@ -1,8 +1,8 @@ -From bc0b4a3ef268cec82163752ddaa730d8ec193d7c Mon Sep 17 00:00:00 2001 +From 7703034d300cc9f80305b491270093af4772c351 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 11 Oct 2018 16:39:59 +0200 -Subject: [PATCH 017/290] percpu: include irqflags.h for raw_local_irq_save() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 017/319] percpu: include irqflags.h for raw_local_irq_save() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The header percpu.h header file is using raw_local_irq_save() but does not include irqflags.h for its definition. It compiles because the @@ -29,5 +29,5 @@ index 1817a8415a5e..942d64c0476e 100644 #ifdef CONFIG_SMP -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0018-efi-Allow-efi-runtime.patch b/debian/patches-rt/0018-efi-Allow-efi-runtime.patch index 4bf055f03..6199c665d 100644 --- a/debian/patches-rt/0018-efi-Allow-efi-runtime.patch +++ b/debian/patches-rt/0018-efi-Allow-efi-runtime.patch @@ -1,8 +1,8 @@ -From da834999f8c566ac722fd157de36dd3f9335c2ad Mon Sep 17 00:00:00 2001 +From 4cf70c2e7df43e00fae3048cb79a7f525d8ac3aa Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 15:06:10 +0200 -Subject: [PATCH 018/290] efi: Allow efi=runtime -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 018/319] efi: Allow efi=runtime +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In case the option "efi=noruntime" is default at built-time, the user could overwrite its sate by `efi=runtime' and allow it again. @@ -28,5 +28,5 @@ index d54fca902e64..5db20908aa9c 100644 } early_param("efi", parse_efi_cmdline); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch b/debian/patches-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch index b6499b28e..19af2f3bb 100644 --- a/debian/patches-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch +++ b/debian/patches-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch @@ -1,8 +1,8 @@ -From d24d03df7b0c0b6a0975a373a815487a220ee93b Mon Sep 17 00:00:00 2001 +From c15f388fcff8ec127288af90687c7cf8de854553 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 24 Jul 2018 14:48:55 +0200 -Subject: [PATCH 019/290] x86/efi: drop task_lock() from efi_switch_mm() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 019/319] x86/efi: drop task_lock() from efi_switch_mm() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz efi_switch_mm() is a wrapper around switch_mm() which saves current's ->active_mm, sets the requests mm as ->active_mm and invokes @@ -51,5 +51,5 @@ index ee5d08f25ce4..e8da7f492970 100644 #ifdef CONFIG_EFI_MIXED -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch b/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch index 6b663ec42..41bbbed96 100644 --- a/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch +++ b/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch @@ -1,9 +1,9 @@ -From 057073be1e10190ea715360a6fde5478df36b563 Mon Sep 17 00:00:00 2001 +From 9907d44a7495afb6c6888dab406398a280788ab4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 09:13:42 +0200 -Subject: [PATCH 020/290] arm64: KVM: compute_layout before altenates are +Subject: [PATCH 020/319] arm64: KVM: compute_layout before altenates are applied -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz compute_layout() is invoked as part of an alternative fixup under stop_machine() and needs a sleeping lock as part of get_random_long(). @@ -79,5 +79,5 @@ index c712a7376bc1..792da0e125de 100644 * Compute HYP VA by using the same computation as kern_hyp_va() */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch index 42fed655b..bd357ad08 100644 --- a/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch +++ b/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch @@ -1,9 +1,9 @@ -From af2b5f6fa1369ed12974a97865df6125bbede492 Mon Sep 17 00:00:00 2001 +From 7a8711714cf15774878d639964a40203688b74db Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 31 Aug 2018 14:16:30 +0200 -Subject: [PATCH 021/290] of: allocate / free phandle cache outside of the +Subject: [PATCH 021/319] of: allocate / free phandle cache outside of the devtree_lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The phandle cache code allocates memory while holding devtree_lock which is a raw_spinlock_t. Memory allocation (and free()) is not possible on @@ -99,5 +99,5 @@ index f0dbb7ad88cf..c59b30bab0e0 100644 void __init of_core_init(void) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch b/debian/patches-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch index 2d140c539..117ab09a9 100644 --- a/debian/patches-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch +++ b/debian/patches-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch @@ -1,8 +1,8 @@ -From 90ac218590a6d692a0dafeeef4bfafea30811be3 Mon Sep 17 00:00:00 2001 +From 4434befd6ff3790ce40983f39d70df4585266ad8 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Tue, 18 Sep 2018 10:29:31 -0500 -Subject: [PATCH 022/290] mm/kasan: make quarantine_lock a raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 022/319] mm/kasan: make quarantine_lock a raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The static lock quarantine_lock is used in quarantine.c to protect the quarantine queue datastructures. It is taken inside quarantine queue @@ -94,5 +94,5 @@ index 3a8ddf8baf7d..b209dbaefde8 100644 qlist_free_all(&to_free, cache); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch b/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch index b022cda37..8ab2f79f6 100644 --- a/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch +++ b/debian/patches-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch @@ -1,9 +1,9 @@ -From 37d23bb703b59574c9eeebba86f8c8b9cf382d15 Mon Sep 17 00:00:00 2001 +From 546655545a0342a811c432b3fee50ef790f47974 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 29 Oct 2018 11:53:01 +0100 -Subject: [PATCH 023/290] EXP rcu: Revert expedited GP parallelization +Subject: [PATCH 023/319] EXP rcu: Revert expedited GP parallelization cleverness -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz (Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu) @@ -47,5 +47,5 @@ index 0b2c2ad69629..a0486414edb4 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch b/debian/patches-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch index 00308a357..6be4fd38a 100644 --- a/debian/patches-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch +++ b/debian/patches-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch @@ -1,11 +1,11 @@ -From 5a62bb92ae6df828a0b6cf14c0e1d55558f0002e Mon Sep 17 00:00:00 2001 +From 26a8d93db88cf4afa5468006c102755a94658ffd Mon Sep 17 00:00:00 2001 From: He Zhe Date: Wed, 19 Dec 2018 16:30:57 +0100 -Subject: [PATCH 024/290] kmemleak: Turn kmemleak_lock to raw spinlock on RT +Subject: [PATCH 024/319] kmemleak: Turn kmemleak_lock to raw spinlock on RT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz kmemleak_lock, as a rwlock on RT, can possibly be held in atomic context and causes the follow BUG. @@ -165,5 +165,5 @@ index 5eeabece0c17..92ce99b15f2b 100644 /* -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/debian/patches-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch index 6435bc90d..1ff878267 100644 --- a/debian/patches-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch +++ b/debian/patches-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch @@ -1,8 +1,8 @@ -From 78cfba1f8655c35ecd538b02a09000d4d7d18af9 Mon Sep 17 00:00:00 2001 +From 72ee900a2ae6252f72561df983c8dd464ccadb3c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 28 Oct 2016 23:05:11 +0200 -Subject: [PATCH 025/290] NFSv4: replace seqcount_t with a seqlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 025/319] NFSv4: replace seqcount_t with a seqlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me because it maps to preempt_disable() in -RT which I can't have at this @@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c -index 74ff459b75ef..6b422d1b5ae1 100644 +index b0c0c2fc2fba..26565ba05dc1 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -162,11 +162,11 @@ static int nfs_delegation_claim_opens(struct inode *inode, @@ -58,7 +58,7 @@ index 5b61520dce88..2771aafaca19 100644 }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c -index 792f8821b5d6..041d235cf5ef 100644 +index 7834b325394f..026164678f37 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2870,7 +2870,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, @@ -132,5 +132,5 @@ index b3086e99420c..c9bf1eb7e1b2 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index cdedd1420..93e7d070c 100644 --- a/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -1,12 +1,12 @@ -From f8da0dd0f5a6dd0097a13f5f59567b9d7a91d0f4 Mon Sep 17 00:00:00 2001 +From 489a86a9a99c1385e72e12322642def91e35cadb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 4 Apr 2017 12:50:16 +0200 -Subject: [PATCH 026/290] kernel: sched: Provide a pointer to the valid CPU +Subject: [PATCH 026/319] kernel: sched: Provide a pointer to the valid CPU mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not @@ -271,7 +271,7 @@ index 9eb99a43f849..e4d0cfebaac5 100644 static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 20f5ba262cc0..68e673278301 100644 +index 0530de9a4efc..4298a87b9de6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -660,7 +660,8 @@ struct task_struct { @@ -321,10 +321,10 @@ index ff956ccbb6df..7bb129c5b412 100644 } diff --git a/kernel/fork.c b/kernel/fork.c -index 8cb5cd7c97e1..8c285876eb52 100644 +index 1a2d18e98bf9..bc182d6fa2a9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -840,6 +840,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) +@@ -850,6 +850,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) #ifdef CONFIG_STACKPROTECTOR tsk->stack_canary = get_random_canary(); #endif @@ -573,10 +573,10 @@ index ebec37cb3be9..4b13df38c069 100644 !dl_task(task) || !task_on_rq_queued(task))) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index b5e03fc56d65..6e6d9e999814 100644 +index 0f1ba3d72336..27f9f9a785c1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -1662,7 +1662,7 @@ static void task_numa_compare(struct task_numa_env *env, +@@ -1678,7 +1678,7 @@ static void task_numa_compare(struct task_numa_env *env, * be incurred if the tasks were swapped. */ /* Skip this swap candidate if cannot move to the source cpu */ @@ -585,7 +585,7 @@ index b5e03fc56d65..6e6d9e999814 100644 goto unlock; /* -@@ -1760,7 +1760,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, +@@ -1776,7 +1776,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ @@ -594,7 +594,7 @@ index b5e03fc56d65..6e6d9e999814 100644 continue; env->dst_cpu = cpu; -@@ -5743,7 +5743,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, +@@ -5782,7 +5782,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -603,7 +603,7 @@ index b5e03fc56d65..6e6d9e999814 100644 continue; local_group = cpumask_test_cpu(this_cpu, -@@ -5875,7 +5875,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this +@@ -5914,7 +5914,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -612,7 +612,7 @@ index b5e03fc56d65..6e6d9e999814 100644 if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -5915,7 +5915,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p +@@ -5954,7 +5954,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p { int new_cpu = cpu; @@ -621,7 +621,7 @@ index b5e03fc56d65..6e6d9e999814 100644 return prev_cpu; /* -@@ -6032,7 +6032,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int +@@ -6071,7 +6071,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int if (!test_idle_cores(target, false)) return -1; @@ -630,7 +630,7 @@ index b5e03fc56d65..6e6d9e999814 100644 for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6066,7 +6066,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t +@@ -6105,7 +6105,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -639,7 +639,7 @@ index b5e03fc56d65..6e6d9e999814 100644 continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6129,7 +6129,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t +@@ -6168,7 +6168,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; @@ -648,7 +648,7 @@ index b5e03fc56d65..6e6d9e999814 100644 continue; if (available_idle_cpu(cpu)) break; -@@ -6166,7 +6166,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) +@@ -6205,7 +6205,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -657,7 +657,7 @@ index b5e03fc56d65..6e6d9e999814 100644 /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6384,7 +6384,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f +@@ -6423,7 +6423,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) @@ -666,7 +666,7 @@ index b5e03fc56d65..6e6d9e999814 100644 } rcu_read_lock(); -@@ -7123,14 +7123,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -7162,14 +7162,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -683,7 +683,7 @@ index b5e03fc56d65..6e6d9e999814 100644 int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7150,7 +7150,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -7189,7 +7189,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -692,7 +692,7 @@ index b5e03fc56d65..6e6d9e999814 100644 env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7747,7 +7747,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) +@@ -7786,7 +7786,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -701,7 +701,7 @@ index b5e03fc56d65..6e6d9e999814 100644 * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8362,7 +8362,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) +@@ -8401,7 +8401,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -710,7 +710,7 @@ index b5e03fc56d65..6e6d9e999814 100644 */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -8758,7 +8758,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, +@@ -8797,7 +8797,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * if the curr task on busiest CPU can't be * moved to this_cpu: */ @@ -781,5 +781,5 @@ index 5522692100ba..8b4be8e1802a 100644 trace_foo_with_template_simple("HELLO", cnt); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch b/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch index b36c46001..b7a3bab6b 100644 --- a/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch +++ b/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch @@ -1,8 +1,8 @@ -From a73bbe4b98f56b8c69e3932df7e083d1d94f5109 Mon Sep 17 00:00:00 2001 +From c336aaa1bcc073572073c3ce1629aafd37220d87 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 -Subject: [PATCH 027/290] kernel/sched/core: add migrate_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 027/319] kernel/sched/core: add migrate_disable() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz --- include/linux/preempt.h | 23 +++++++ @@ -54,7 +54,7 @@ index c01813c3fbe9..3196d0e76719 100644 #ifdef MODULE diff --git a/include/linux/sched.h b/include/linux/sched.h -index 68e673278301..038d0faaa1d5 100644 +index 4298a87b9de6..0489d3e0e78c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -662,6 +662,13 @@ struct task_struct { @@ -262,5 +262,5 @@ index 78fadf0438ea..5027158d3908 100644 #undef PN #undef __PN -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/debian/patches-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch index 4090a1b99..cb042f9ba 100644 --- a/debian/patches-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch +++ b/debian/patches-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch @@ -1,9 +1,9 @@ -From ac059eda6d9782325d4d8432b5bee7638eafd90a Mon Sep 17 00:00:00 2001 +From d3a1cee03ad56c7b66d8dfab844cff48e8050fd7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 9 Oct 2018 17:34:50 +0200 -Subject: [PATCH 028/290] sched/migrate_disable: Add export_symbol_gpl for +Subject: [PATCH 028/319] sched/migrate_disable: Add export_symbol_gpl for __migrate_disabled -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Jonathan reported that lttng/modules can't use __migrate_disabled(). This function is only used by sched/core itself and the tracing @@ -34,5 +34,5 @@ index d0450f06612c..e6022cc2605b 100644 static void __do_set_cpus_allowed_tail(struct task_struct *p, -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch b/debian/patches-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch index 920978aff..7ba86bf82 100644 --- a/debian/patches-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch +++ b/debian/patches-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch @@ -1,8 +1,8 @@ -From b4f795e23aa449ae7116c445646a5ee51f90c022 Mon Sep 17 00:00:00 2001 +From 0ba8ac28e3e8714926e40651b2f898178bdbeec7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 9 Mar 2016 10:51:06 +0100 -Subject: [PATCH 029/290] arm: at91: do not disable/enable clocks in a row -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 029/319] arm: at91: do not disable/enable clocks in a row +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Currently the driver will disable the clock and enable it one line later if it is switching from periodic mode into one shot. @@ -94,5 +94,5 @@ index 43f4d5c4d6fa..de6baf564dfe 100644 .set_state_oneshot = tc_set_oneshot, }, -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch b/debian/patches-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch index e7b45d9af..3507be01f 100644 --- a/debian/patches-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch +++ b/debian/patches-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch @@ -1,12 +1,12 @@ -From 45b7375a7348d45d17515da0b294d479805fd855 Mon Sep 17 00:00:00 2001 +From a848e5afd3ed780d117cbc41dd8e6656ec93218d Mon Sep 17 00:00:00 2001 From: Benedikt Spranger Date: Mon, 8 Mar 2010 18:57:04 +0100 -Subject: [PATCH 030/290] clocksource: TCLIB: Allow higher clock rates for +Subject: [PATCH 030/319] clocksource: TCLIB: Allow higher clock rates for clock events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz As default the TCLIB uses the 32KiHz base clock rate for clock events. Add a compile time selection to allow higher clock resulution. @@ -166,5 +166,5 @@ index 3726eacdf65d..0900dec7ec04 100644 tristate "Dummy IRQ handler" default n -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0031-timekeeping-Split-jiffies-seqlock.patch b/debian/patches-rt/0031-timekeeping-Split-jiffies-seqlock.patch index 371719dbc..52195b294 100644 --- a/debian/patches-rt/0031-timekeeping-Split-jiffies-seqlock.patch +++ b/debian/patches-rt/0031-timekeeping-Split-jiffies-seqlock.patch @@ -1,8 +1,8 @@ -From fe98a4f2036225a3ccb28c230f5245fa0eddec4c Mon Sep 17 00:00:00 2001 +From e63f98f50f1d848783a9b8612216ecb52f2f4f2d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 14 Feb 2013 22:36:59 +0100 -Subject: [PATCH 031/290] timekeeping: Split jiffies seqlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 031/319] timekeeping: Split jiffies seqlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so it can be taken in atomic context on RT. @@ -77,10 +77,10 @@ index a02e0f6b287c..32f5101f07ce 100644 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 5b33e2f5c0ed..54fd344ef973 100644 +index 48403fb653c2..e774a49176cc 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c -@@ -67,7 +67,8 @@ static void tick_do_update_jiffies64(ktime_t now) +@@ -68,7 +68,8 @@ static void tick_do_update_jiffies64(ktime_t now) return; /* Reevaluate with jiffies_lock held */ @@ -90,7 +90,7 @@ index 5b33e2f5c0ed..54fd344ef973 100644 delta = ktime_sub(now, last_jiffies_update); if (delta >= tick_period) { -@@ -90,10 +91,12 @@ static void tick_do_update_jiffies64(ktime_t now) +@@ -94,10 +95,12 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } else { @@ -105,7 +105,7 @@ index 5b33e2f5c0ed..54fd344ef973 100644 update_wall_time(); } -@@ -104,12 +107,14 @@ static ktime_t tick_init_jiffy_update(void) +@@ -108,12 +111,14 @@ static ktime_t tick_init_jiffy_update(void) { ktime_t period; @@ -122,7 +122,7 @@ index 5b33e2f5c0ed..54fd344ef973 100644 return period; } -@@ -652,10 +657,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) +@@ -656,10 +661,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) /* Read jiffies and the time when jiffies were updated last */ do { @@ -167,5 +167,5 @@ index 141ab3ab0354..099737f6f10c 100644 #define CS_NAME_LEN 32 -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0032-signal-Revert-ptrace-preempt-magic.patch b/debian/patches-rt/0032-signal-Revert-ptrace-preempt-magic.patch index 44cd95f22..3d90ead44 100644 --- a/debian/patches-rt/0032-signal-Revert-ptrace-preempt-magic.patch +++ b/debian/patches-rt/0032-signal-Revert-ptrace-preempt-magic.patch @@ -1,8 +1,8 @@ -From fc38cdec8c7fa26cc2b0520c1cd0eb9ff96c969a Mon Sep 17 00:00:00 2001 +From 39f1c01a90cdd97225664c5820f5a8032aeff563 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 21 Sep 2011 19:57:12 +0200 -Subject: [PATCH 032/290] signal: Revert ptrace preempt magic -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 032/319] signal: Revert ptrace preempt magic +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more than a bandaid around the ptrace design trainwreck. It's not a @@ -14,10 +14,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 8 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c -index 7278302e3485..6b9d4bbfa9df 100644 +index 08911bb6fe9a..5e278f1540ad 100644 --- a/kernel/signal.c +++ b/kernel/signal.c -@@ -2098,15 +2098,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) +@@ -2103,15 +2103,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); @@ -34,5 +34,5 @@ index 7278302e3485..6b9d4bbfa9df 100644 } else { /* -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0033-net-sched-Use-msleep-instead-of-yield.patch b/debian/patches-rt/0033-net-sched-Use-msleep-instead-of-yield.patch index 54fc9ba11..90b43fb74 100644 --- a/debian/patches-rt/0033-net-sched-Use-msleep-instead-of-yield.patch +++ b/debian/patches-rt/0033-net-sched-Use-msleep-instead-of-yield.patch @@ -1,8 +1,8 @@ -From 280627bfccdc36087f174418d56e57d027552a14 Mon Sep 17 00:00:00 2001 +From ad9cabe151325834cb53c0535b2954637e40049b Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 5 Mar 2014 00:49:47 +0100 -Subject: [PATCH 033/290] net: sched: Use msleep() instead of yield() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 033/319] net: sched: Use msleep() instead of yield() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50 (by default). If a high priority userspace process tries to shut down a busy @@ -60,5 +60,5 @@ index 8a4d01e427a2..4ab20f1138fd 100644 * unwind stale skb lists and qdisc statistics */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch b/debian/patches-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch index cad53c9b4..5221f27d2 100644 --- a/debian/patches-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch +++ b/debian/patches-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch @@ -1,8 +1,8 @@ -From ec6350958b69d6c98248ee0871b3a7c00c5949c5 Mon Sep 17 00:00:00 2001 +From ecafcc326d4a6bde4dbc2cf1a909644d8d47d63e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 27 Mar 2018 16:24:15 +0200 -Subject: [PATCH 034/290] dm rq: remove BUG_ON(!irqs_disabled) check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 034/319] dm rq: remove BUG_ON(!irqs_disabled) check +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In commit 052189a2ec95 ("dm: remove superfluous irq disablement in dm_request_fn") the spin_lock_irq() was replaced with spin_lock() + a @@ -33,5 +33,5 @@ index 4d36373e1c0f..12ed08245130 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch b/debian/patches-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch index 424d8e480..acaa7b264 100644 --- a/debian/patches-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch +++ b/debian/patches-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch @@ -1,8 +1,8 @@ -From c5248048f310926cc88f9d625126ebb980024a51 Mon Sep 17 00:00:00 2001 +From e9a9bd7f3e0bb4a100f16997d9c754fe1de910b6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 8 Nov 2013 17:34:54 +0100 -Subject: [PATCH 035/290] usb: do no disable interrupts in giveback -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 035/319] usb: do no disable interrupts in giveback +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet context") the USB code disables interrupts before invoking the complete @@ -42,5 +42,5 @@ index b82a7d787add..2f3015356124 100644 usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch b/debian/patches-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch index 2247734ad..1b8132c43 100644 --- a/debian/patches-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch +++ b/debian/patches-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch @@ -1,8 +1,8 @@ -From 4bfca780bcff671d348be592531816808005330c Mon Sep 17 00:00:00 2001 +From beb5f2438466a5a0c2b7391e3e397a7d2cfc1bee Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 17 Jun 2011 12:39:57 +0200 -Subject: [PATCH 036/290] rt: Provide PREEMPT_RT_BASE config switch -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 036/319] rt: Provide PREEMPT_RT_BASE config switch +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Introduce PREEMPT_RT_BASE which enables parts of PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT @@ -60,5 +60,5 @@ index cd1655122ec0..027db5976c2f 100644 \ No newline at end of file + bool -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch b/debian/patches-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch index a4444d160..207759f0d 100644 --- a/debian/patches-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch +++ b/debian/patches-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch @@ -1,8 +1,8 @@ -From 6ea70c3ef4fff78246212f51f67a6e48b159bc50 Mon Sep 17 00:00:00 2001 +From 9e4e9344993ae7d8b043f4944b155e9db050c2e4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 14 Dec 2011 01:03:49 +0100 -Subject: [PATCH 037/290] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 037/319] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz There are "valid" GFP_ATOMIC allocations such as @@ -72,5 +72,5 @@ index a3928d4438b5..a50b2158f7cd 100644 Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0038-jump-label-disable-if-stop_machine-is-used.patch b/debian/patches-rt/0038-jump-label-disable-if-stop_machine-is-used.patch index c9a397bf7..4c573d122 100644 --- a/debian/patches-rt/0038-jump-label-disable-if-stop_machine-is-used.patch +++ b/debian/patches-rt/0038-jump-label-disable-if-stop_machine-is-used.patch @@ -1,8 +1,8 @@ -From cded0d9563b5948a2aa3bddb86f632ea0ec38f0e Mon Sep 17 00:00:00 2001 +From bc3467535e0c999c0d5478bf7244b540c3b2f53a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 8 Jul 2015 17:14:48 +0200 -Subject: [PATCH 038/290] jump-label: disable if stop_machine() is used -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 038/319] jump-label: disable if stop_machine() is used +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Some architectures are using stop_machine() while switching the opcode which leads to latency spikes. @@ -38,5 +38,5 @@ index 185e552f1461..84f36e47e3ab 100644 select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch b/debian/patches-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch index b0b1e02fe..0f827c6b7 100644 --- a/debian/patches-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch +++ b/debian/patches-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch @@ -1,9 +1,9 @@ -From dce06dcc7c4046de51f2d0b0aa53995461a65b82 Mon Sep 17 00:00:00 2001 +From a4131809e22423611cf14841a656dec4a8d31d8c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 24 Jul 2011 12:11:43 +0200 -Subject: [PATCH 039/290] kconfig: Disable config options which are not RT +Subject: [PATCH 039/319] kconfig: Disable config options which are not RT compatible -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Disable stuff which is known to have issues on RT @@ -39,5 +39,5 @@ index b457e94ae618..0dddbb2a3282 100644 select RADIX_TREE_MULTIORDER help -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0040-lockdep-disable-self-test.patch b/debian/patches-rt/0040-lockdep-disable-self-test.patch index 2a640e1ca..9df4cbeb6 100644 --- a/debian/patches-rt/0040-lockdep-disable-self-test.patch +++ b/debian/patches-rt/0040-lockdep-disable-self-test.patch @@ -1,11 +1,11 @@ -From 895584437f1a9e0847ff7ca40873fb3b8bd4be0c Mon Sep 17 00:00:00 2001 +From 3f0f41efff4db706c685f8fdcf1ce963c7ac6546 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 17 Oct 2017 16:36:18 +0200 -Subject: [PATCH 040/290] lockdep: disable self-test +Subject: [PATCH 040/319] lockdep: disable self-test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The self-test wasn't always 100% accurate for RT. We disabled a few tests which failed because they had a different semantic for RT. Some @@ -31,5 +31,5 @@ index 46a910acce3f..38cf7f81daa7 100644 Say Y here if you want the kernel to run a short self-test during bootup. The self-test checks whether common types of locking bugs -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0041-mm-Allow-only-slub-on-RT.patch b/debian/patches-rt/0041-mm-Allow-only-slub-on-RT.patch index 51feadb4e..8d57bd116 100644 --- a/debian/patches-rt/0041-mm-Allow-only-slub-on-RT.patch +++ b/debian/patches-rt/0041-mm-Allow-only-slub-on-RT.patch @@ -1,8 +1,8 @@ -From dcf8f258f572870f67b12f9d9082eebd0561d085 Mon Sep 17 00:00:00 2001 +From 689ef9b6a8d84ed7d24cc353402b0a03d92ece39 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:44:03 -0500 -Subject: [PATCH 041/290] mm: Allow only slub on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 041/319] mm: Allow only slub on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs. @@ -33,5 +33,5 @@ index 47035b5a46f6..ae9a0113a699 100644 SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0042-locking-Disable-spin-on-owner-for-RT.patch b/debian/patches-rt/0042-locking-Disable-spin-on-owner-for-RT.patch index 768d7200f..530945037 100644 --- a/debian/patches-rt/0042-locking-Disable-spin-on-owner-for-RT.patch +++ b/debian/patches-rt/0042-locking-Disable-spin-on-owner-for-RT.patch @@ -1,11 +1,11 @@ -From 4aaf38cf9c2cd085bbee6aec83c66b52b33c9a32 Mon Sep 17 00:00:00 2001 +From 08d750263a76c7ab09af4aeda1ab3b8b3986edff Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:51:45 +0200 -Subject: [PATCH 042/290] locking: Disable spin on owner for RT +Subject: [PATCH 042/319] locking: Disable spin on owner for RT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Drop spin on owner for mutex / rwsem. We are most likely not using it but… @@ -34,5 +34,5 @@ index 84d882f3e299..af27c4000812 100644 config LOCK_SPIN_ON_OWNER def_bool y -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch b/debian/patches-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch index 903ae3ad1..833721a16 100644 --- a/debian/patches-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch +++ b/debian/patches-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch @@ -1,8 +1,8 @@ -From 5f1bd47909aca86dfc97df6f5ceecd67c755b2e8 Mon Sep 17 00:00:00 2001 +From b17f01bc403bbe89565569ee6907af8b7b6f210b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 13:26:09 +0000 -Subject: [PATCH 043/290] rcu: Disable RCU_FAST_NO_HZ on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 043/319] rcu: Disable RCU_FAST_NO_HZ on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz This uses a timer_list timer from the irq disabled guts of the idle code. Disable it for now to prevent wreckage. @@ -26,5 +26,5 @@ index 9210379c0353..644264be90f0 100644 help This option permits CPUs to enter dynticks-idle state even if -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch index 73cf39f76..a182515cd 100644 --- a/debian/patches-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch +++ b/debian/patches-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch @@ -1,8 +1,8 @@ -From 980f69b6bec3356146f6c2d4e28405b3c4de4a87 Mon Sep 17 00:00:00 2001 +From a4b5d5ab4d9ebff99f7e0a302a777d94c3087117 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Mar 2014 20:19:05 +0100 -Subject: [PATCH 044/290] rcu: make RCU_BOOST default on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 044/319] rcu: make RCU_BOOST default on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Since it is no longer invoked from the softirq people run into OOM more often if the priority of the RCU thread is too low. Making boosting @@ -30,5 +30,5 @@ index 644264be90f0..a243a78ff38c 100644 This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch b/debian/patches-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch index 38edf1175..d2bccb7be 100644 --- a/debian/patches-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch +++ b/debian/patches-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch @@ -1,8 +1,8 @@ -From 6c16852b3700b4b98d7ea4b6dffbfbb5e1118c01 Mon Sep 17 00:00:00 2001 +From f43d91c85deca5818a818f255e811f30f66c887b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:03:52 +0200 -Subject: [PATCH 045/290] sched: Disable CONFIG_RT_GROUP_SCHED on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 045/319] sched: Disable CONFIG_RT_GROUP_SCHED on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Carsten reported problems when running: @@ -31,5 +31,5 @@ index ae9a0113a699..61e8b531649b 100644 help This feature lets you explicitly allocate real CPU bandwidth -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch b/debian/patches-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch index 8be99d1b4..342c3b700 100644 --- a/debian/patches-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch +++ b/debian/patches-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch @@ -1,11 +1,11 @@ -From 9822fde0d385af32e0b8550600e3961cc9bb96aa Mon Sep 17 00:00:00 2001 +From 45e0de2a7b8ff0c0873ab1943f39c63967714de4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 -Subject: [PATCH 046/290] net/core: disable NET_RX_BUSY_POLL +Subject: [PATCH 046/319] net/core: disable NET_RX_BUSY_POLL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz sk_busy_loop() does preempt_disable() followed by a few operations which can take sleeping locks and may get long. @@ -34,5 +34,5 @@ index 228dfa382eec..bc8d01996f22 100644 config BQL bool -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0047-arm-disable-NEON-in-kernel-mode.patch b/debian/patches-rt/0047-arm-disable-NEON-in-kernel-mode.patch index 947b149a8..eee89e0d6 100644 --- a/debian/patches-rt/0047-arm-disable-NEON-in-kernel-mode.patch +++ b/debian/patches-rt/0047-arm-disable-NEON-in-kernel-mode.patch @@ -1,8 +1,8 @@ -From c628460359a533e7547887b5212d2291117eeacb Mon Sep 17 00:00:00 2001 +From a0fe53af60c195dcff47d175d212866c18ebace5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 1 Dec 2017 10:42:03 +0100 -Subject: [PATCH 047/290] arm*: disable NEON in kernel mode -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 047/319] arm*: disable NEON in kernel mode +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz NEON in kernel mode is used by the crypto algorithms and raid6 code. While the raid6 code looks okay, the crypto algorithms do not: NEON @@ -162,5 +162,5 @@ index 34b4e3d46aab..ae055cdad8cf 100644 crc32_pmull_algs[1].update = crc32c_pmull_update; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch b/debian/patches-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch index 58cf7cc60..4abdcb746 100644 --- a/debian/patches-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch +++ b/debian/patches-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch @@ -1,8 +1,8 @@ -From 2b0792d20032fb077f028c433ce0575e0b2af952 Mon Sep 17 00:00:00 2001 +From 6a741e6ad17fc25a2c028c19f37ddd728d93eedb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 048/290] powerpc: Use generic rwsem on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 048/319] powerpc: Use generic rwsem on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Use generic code which uses rtmutex @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index a80669209155..9952764db9c5 100644 +index 6f475dc5829b..3d5c86336072 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -105,10 +105,11 @@ config LOCKDEP_SUPPORT @@ -29,5 +29,5 @@ index a80669209155..9952764db9c5 100644 config GENERIC_LOCKBREAK bool -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch index 78a1861b8..b2dd55599 100644 --- a/debian/patches-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch +++ b/debian/patches-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch @@ -1,9 +1,9 @@ -From 59e8b4257278a94fecf207ad131920b7e1364aa7 Mon Sep 17 00:00:00 2001 +From 120bdd703c8752d1334c87dc7be94ba631a0e669 Mon Sep 17 00:00:00 2001 From: Bogdan Purcareata Date: Fri, 24 Apr 2015 15:53:13 +0000 -Subject: [PATCH 049/290] powerpc/kvm: Disable in-kernel MPIC emulation for +Subject: [PATCH 049/319] powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz While converting the openpic emulation code to use a raw_spinlock_t enables guests to run on RT, there's still a performance issue. For interrupts sent in @@ -41,5 +41,5 @@ index 68a0e9d5b440..6f4d5d7615af 100644 select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0050-powerpc-Disable-highmem-on-RT.patch b/debian/patches-rt/0050-powerpc-Disable-highmem-on-RT.patch index 72a3bbe14..f064c8921 100644 --- a/debian/patches-rt/0050-powerpc-Disable-highmem-on-RT.patch +++ b/debian/patches-rt/0050-powerpc-Disable-highmem-on-RT.patch @@ -1,8 +1,8 @@ -From da3f221553e2c41925af06392f6ed660d68543a7 Mon Sep 17 00:00:00 2001 +From 44f749f4e2ae0932e5919147ad1d62ff0b2a30cb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:08:34 +0200 -Subject: [PATCH 050/290] powerpc: Disable highmem on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 050/319] powerpc: Disable highmem on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The current highmem handling on -RT is not compatible and needs fixups. @@ -12,10 +12,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index 9952764db9c5..1563820a37e8 100644 +index 3d5c86336072..1b332f69dd36 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig -@@ -398,7 +398,7 @@ menu "Kernel options" +@@ -399,7 +399,7 @@ menu "Kernel options" config HIGHMEM bool "High memory support" @@ -25,5 +25,5 @@ index 9952764db9c5..1563820a37e8 100644 source kernel/Kconfig.hz -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0051-mips-Disable-highmem-on-RT.patch b/debian/patches-rt/0051-mips-Disable-highmem-on-RT.patch index 0e92ea60c..006abb89c 100644 --- a/debian/patches-rt/0051-mips-Disable-highmem-on-RT.patch +++ b/debian/patches-rt/0051-mips-Disable-highmem-on-RT.patch @@ -1,8 +1,8 @@ -From 33a7e9d57de8e82532d8cdec33795907685b22e7 Mon Sep 17 00:00:00 2001 +From 89616a90372ba2105b30f5d698efde918d0ee972 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:10:12 +0200 -Subject: [PATCH 051/290] mips: Disable highmem on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 051/319] mips: Disable highmem on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The current highmem handling on -RT is not compatible and needs fixups. @@ -25,5 +25,5 @@ index a830a9701e50..3d5fae3891be 100644 config CPU_SUPPORTS_HIGHMEM bool -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch b/debian/patches-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch index 2caa4d36d..5d32b08f8 100644 --- a/debian/patches-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch +++ b/debian/patches-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch @@ -1,8 +1,8 @@ -From 2aeeb18ed2650e5c8d35e52ed37e51f4f112e87e Mon Sep 17 00:00:00 2001 +From 5e58540bb847f41f94980f2b5b62c2ff3cb8e0e7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 26 Jul 2009 02:21:32 +0200 -Subject: [PATCH 052/290] x86: Use generic rwsem_spinlocks on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 052/319] x86: Use generic rwsem_spinlocks on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Simplifies the separation of anon_rw_semaphores and rw_semaphores for -rt. @@ -30,5 +30,5 @@ index e40ba59efe7f..f22e787329cf 100644 config GENERIC_CALIBRATE_DELAY def_bool y -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch index 65923005b..77e78a169 100644 --- a/debian/patches-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch +++ b/debian/patches-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch @@ -1,8 +1,8 @@ -From d438ee6c55b0969d2490f97a6fc897f462f017b0 Mon Sep 17 00:00:00 2001 +From 328a1184b2668305e4ea7b75ae1c3105f62d1f49 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 23 Jan 2014 14:45:59 +0100 -Subject: [PATCH 053/290] leds: trigger: disable CPU trigger on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 053/319] leds: trigger: disable CPU trigger on -RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz as it triggers: |CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141 @@ -37,5 +37,5 @@ index 4018af769969..b4ce8c115949 100644 This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch index c3e626b68..e339d36d5 100644 --- a/debian/patches-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch +++ b/debian/patches-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch @@ -1,8 +1,8 @@ -From 24a05d9b26a5e6f52e0e35f401fd16a50bc3cc2d Mon Sep 17 00:00:00 2001 +From b01df30728ba058ec9fc4cd8fbd97d855c475e02 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 9 Apr 2015 15:23:01 +0200 -Subject: [PATCH 054/290] cpufreq: drop K8's driver from beeing selected -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 054/319] cpufreq: drop K8's driver from beeing selected +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Ralf posted a picture of a backtrace from @@ -35,5 +35,5 @@ index 35f71825b7f3..bb4a6160d0f7 100644 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. Support for K10 and newer processors is now in acpi-cpufreq. -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0055-md-disable-bcache.patch b/debian/patches-rt/0055-md-disable-bcache.patch index 10ba898d1..ae72def2c 100644 --- a/debian/patches-rt/0055-md-disable-bcache.patch +++ b/debian/patches-rt/0055-md-disable-bcache.patch @@ -1,11 +1,11 @@ -From 00fd6babe97f1ea784554baabb1bc74d7f11be16 Mon Sep 17 00:00:00 2001 +From 33f359182904fdbf350968e52fa48df003cba8bc Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 11:48:57 +0200 -Subject: [PATCH 055/290] md: disable bcache +Subject: [PATCH 055/319] md: disable bcache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz It uses anon semaphores |drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’: @@ -37,5 +37,5 @@ index f6e0a8b3a61e..18c03d79a442 100644 help Allows a block device to be used as cache for other devices; uses -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0056-efi-Disable-runtime-services-on-RT.patch b/debian/patches-rt/0056-efi-Disable-runtime-services-on-RT.patch index 0e69cf027..63a405de9 100644 --- a/debian/patches-rt/0056-efi-Disable-runtime-services-on-RT.patch +++ b/debian/patches-rt/0056-efi-Disable-runtime-services-on-RT.patch @@ -1,8 +1,8 @@ -From e2d68290b3efe18e50ef23302fda76bb471d7493 Mon Sep 17 00:00:00 2001 +From 0bb85517c3d651d90da560add0e696ace91255a8 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 15:03:16 +0200 -Subject: [PATCH 056/290] efi: Disable runtime services on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 056/319] efi: Disable runtime services on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Based on meassurements the EFI functions get_variable / get_next_variable take up to 2us which looks okay. @@ -42,5 +42,5 @@ index 5db20908aa9c..1708505fdf5d 100644 { disable_runtime = true; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0057-printk-Add-a-printk-kill-switch.patch b/debian/patches-rt/0057-printk-Add-a-printk-kill-switch.patch index acebca7ec..a57d8403b 100644 --- a/debian/patches-rt/0057-printk-Add-a-printk-kill-switch.patch +++ b/debian/patches-rt/0057-printk-Add-a-printk-kill-switch.patch @@ -1,8 +1,8 @@ -From 3d59550d2bce945d0885bb16ee747edc40521c95 Mon Sep 17 00:00:00 2001 +From a3847095fefea218fb82623cdfd5a6b0335b093f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 22 Jul 2011 17:58:40 +0200 -Subject: [PATCH 057/290] printk: Add a printk kill switch -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 057/319] printk: Add a printk kill switch +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that it does not dead-lock with the early printk code. @@ -31,7 +31,7 @@ index cf3eccfe1543..30ebf5f82a7c 100644 #ifdef CONFIG_PRINTK_NMI diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 845efadaf7ec..0214d876c22d 100644 +index 7a2fdc097c8c..29838e532f46 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -405,6 +405,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); @@ -170,5 +170,5 @@ index 71381168dede..685443375dc0 100644 nmi_panic(regs, "Hard LOCKUP"); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch b/debian/patches-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch index be435d342..87e330e65 100644 --- a/debian/patches-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch +++ b/debian/patches-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch @@ -1,9 +1,9 @@ -From ec95016b0067d4801b743959345d1d2b4897b4c7 Mon Sep 17 00:00:00 2001 +From 1dbd77d11f20fdbe5bf369d35834ee37a84a7e24 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Sep 2011 14:41:29 +0200 -Subject: [PATCH 058/290] printk: Add "force_early_printk" boot param to help +Subject: [PATCH 058/319] printk: Add "force_early_printk" boot param to help with debugging -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Gives me an option to screw printk and actually see what the machine says. @@ -17,7 +17,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org 1 file changed, 7 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 0214d876c22d..9a7f259dbb20 100644 +index 29838e532f46..f934baed564d 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -435,6 +435,13 @@ asmlinkage void early_printk(const char *fmt, ...) @@ -35,5 +35,5 @@ index 0214d876c22d..9a7f259dbb20 100644 { printk_killswitch = true; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch b/debian/patches-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch index 03e3667ee..5d5f5d17f 100644 --- a/debian/patches-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch +++ b/debian/patches-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch @@ -1,8 +1,8 @@ -From 8cdff06e21bab2bdf3a7f633a9e22b890bc43f39 Mon Sep 17 00:00:00 2001 +From dbca42bca5dd88a1a80ce20b2b5d45b53abdbf71 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 24 Jul 2009 12:38:56 +0200 -Subject: [PATCH 059/290] preempt: Provide preempt_*_(no)rt variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 059/319] preempt: Provide preempt_*_(no)rt variants +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz RT needs a few preempt_disable/enable points which are not necessary otherwise. Implement variants to avoid #ifdeffery. @@ -49,5 +49,5 @@ index 3196d0e76719..f7a17fcc3fec 100644 struct preempt_notifier; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch index 6ca359ffe..5ab72736d 100644 --- a/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch +++ b/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch @@ -1,9 +1,9 @@ -From 4c36c4326b5f829aaabd0e2bd105c12797f16449 Mon Sep 17 00:00:00 2001 +From f0fe2fb34e1cf89fa0d726ee8f759af9338802df Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 8 Mar 2017 14:23:35 +0100 -Subject: [PATCH 060/290] futex: workaround migrate_disable/enable in different +Subject: [PATCH 060/319] futex: workaround migrate_disable/enable in different context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz migrate_disable()/migrate_enable() takes a different path in atomic() vs !atomic() context. These little hacks ensure that we don't underflow / overflow @@ -66,5 +66,5 @@ index e75ad30aa7bc..5c8053098fc8 100644 /* -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0061-rt-Add-local-irq-locks.patch b/debian/patches-rt/0061-rt-Add-local-irq-locks.patch index 15e4de7aa..bb2e24537 100644 --- a/debian/patches-rt/0061-rt-Add-local-irq-locks.patch +++ b/debian/patches-rt/0061-rt-Add-local-irq-locks.patch @@ -1,8 +1,8 @@ -From 2bd99b106c35f77357bcaea4546eb89085cc5ac8 Mon Sep 17 00:00:00 2001 +From f12f8a504f11df9667754f10354c4c74c38f5ec4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 20 Jun 2011 09:03:47 +0200 -Subject: [PATCH 061/290] rt: Add local irq locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 061/319] rt: Add local irq locks +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Introduce locallock. For !RT this maps to preempt_disable()/ local_irq_disable() so there is not much that changes. For RT this will @@ -337,5 +337,5 @@ index 70b7123f38c7..24421bf8c4b3 100644 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch b/debian/patches-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch index 0bb5b3c6d..3a89c3a28 100644 --- a/debian/patches-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch +++ b/debian/patches-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch @@ -1,8 +1,8 @@ -From 6b3038f8d30a5cef133ed3791a44e17377c0b052 Mon Sep 17 00:00:00 2001 +From 24b3666ecf5a5f8f454c429a10ffe9babeec190f Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Mon, 7 May 2018 08:58:56 -0500 -Subject: [PATCH 062/290] locallock: provide {get,put}_locked_ptr() variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 062/319] locallock: provide {get,put}_locked_ptr() variants +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Provide a set of locallocked accessors for pointers to per-CPU data; this is useful for dynamically-allocated per-CPU regions, for example. @@ -45,5 +45,5 @@ index d658c2552601..921eab83cd34 100644 #define local_lock_cpu(lvar) get_cpu() #define local_unlock_cpu(lvar) put_cpu() -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch b/debian/patches-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch index 90c850061..7ecaa9fdd 100644 --- a/debian/patches-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch +++ b/debian/patches-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch @@ -1,8 +1,8 @@ -From ed4646fb9f4dde9fa5e97aadbd15073babb2fdb5 Mon Sep 17 00:00:00 2001 +From 41c74791c8608c55cf265e5dc237a224f0c68d9e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:34 -0500 -Subject: [PATCH 063/290] mm/scatterlist: Do not disable irqs on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 063/319] mm/scatterlist: Do not disable irqs on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz For -RT it is enough to keep pagefault disabled (which is currently handled by kmap_atomic()). @@ -26,5 +26,5 @@ index 8c3036c37ba0..336162c2813f 100644 } else kunmap(miter->page); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch b/debian/patches-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch index 317a2f95b..e53e871f9 100644 --- a/debian/patches-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch +++ b/debian/patches-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch @@ -1,8 +1,8 @@ -From 61d171cababe91bd799f750f2c88eca687b65d51 Mon Sep 17 00:00:00 2001 +From 0702d70f996b678c417b86961eb85c5f54258794 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 064/290] signal/x86: Delay calling signals in atomic -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 064/319] signal/x86: Delay calling signals in atomic +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On x86_64 we must disable preemption before we enable interrupts for stack faults, int3 and debugging, because the current task is using @@ -80,7 +80,7 @@ index 33d3c88a7225..fb0438d06ca7 100644 typedef sigset_t compat_sigset_t; #endif diff --git a/include/linux/sched.h b/include/linux/sched.h -index 038d0faaa1d5..c87c11bfd9d9 100644 +index 0489d3e0e78c..e4af260f81c5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -881,6 +881,10 @@ struct task_struct { @@ -95,10 +95,10 @@ index 038d0faaa1d5..c87c11bfd9d9 100644 size_t sas_ss_size; unsigned int sas_ss_flags; diff --git a/kernel/signal.c b/kernel/signal.c -index 6b9d4bbfa9df..3565221b4fac 100644 +index 5e278f1540ad..d5e764bb2444 100644 --- a/kernel/signal.c +++ b/kernel/signal.c -@@ -1272,8 +1272,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, +@@ -1277,8 +1277,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ @@ -109,7 +109,7 @@ index 6b9d4bbfa9df..3565221b4fac 100644 { unsigned long int flags; int ret, blocked, ignored; -@@ -1302,6 +1302,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +@@ -1307,6 +1307,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) return ret; } @@ -150,5 +150,5 @@ index 6b9d4bbfa9df..3565221b4fac 100644 * Nuke all other threads in the group. */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch index e39d7d747..b988408c3 100644 --- a/debian/patches-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch +++ b/debian/patches-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch @@ -1,8 +1,8 @@ -From 159a99e63d62232f0de5a25fe8d92e4e31140c29 Mon Sep 17 00:00:00 2001 +From c67616e94cb3a4e226e6e2db6dec9ce3793f228d Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 10 Dec 2015 10:58:51 -0800 -Subject: [PATCH 065/290] x86/signal: delay calling signals on 32bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 065/319] x86/signal: delay calling signals on 32bit +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz When running some ptrace single step tests on x86-32 machine, the below problem is triggered: @@ -45,5 +45,5 @@ index fb0438d06ca7..c00e27af2205 100644 #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch b/debian/patches-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch index ba90d0991..83f01921f 100644 --- a/debian/patches-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch +++ b/debian/patches-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch @@ -1,8 +1,8 @@ -From 5c50a0cef22d58bcda53aae4df00e10ce6e04903 Mon Sep 17 00:00:00 2001 +From 95d4bb35411f3e96e84b643295cd03c33e1029e8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 09:18:52 +0100 -Subject: [PATCH 066/290] buffer_head: Replace bh_uptodate_lock for -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 066/319] buffer_head: Replace bh_uptodate_lock for -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Wrap the bit_spin_lock calls into a separate inline and add the RT replacements with a real spinlock. @@ -87,7 +87,7 @@ index a550e0d8e965..a5b3a456dbff 100644 __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c -index db7590178dfc..d76364124443 100644 +index 9cc79b7b0df1..3f4ba2011499 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio) @@ -193,5 +193,5 @@ index 96225a77c112..8a1bcfb145d7 100644 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch b/debian/patches-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch index 4906e5f32..a34ad5a4b 100644 --- a/debian/patches-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch +++ b/debian/patches-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch @@ -1,9 +1,9 @@ -From f3f66f83edae0ab6ff7569d6395c5ee3833e8fba Mon Sep 17 00:00:00 2001 +From 34bc88f1af622442a231c1e5bfc9af3ec2d859ac Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 10:11:25 +0100 -Subject: [PATCH 067/290] fs: jbd/jbd2: Make state lock and journal head lock +Subject: [PATCH 067/319] fs: jbd/jbd2: Make state lock and journal head lock rt safe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz bit_spin_locks break under RT. @@ -106,5 +106,5 @@ index 268f3000d1b3..8f5d6ecb802e 100644 #define J_ASSERT(assert) BUG_ON(!(assert)) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch b/debian/patches-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch index f090db122..2f64bf2de 100644 --- a/debian/patches-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch +++ b/debian/patches-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch @@ -1,8 +1,8 @@ -From 3023eb220840f37bbc9d31e9959de567cb1aaaae Mon Sep 17 00:00:00 2001 +From 84d4197815de2e62162c70c090ac057d09be0c21 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Fri, 21 Jun 2013 15:07:25 -0400 -Subject: [PATCH 068/290] list_bl: Make list head locking RT safe -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 068/319] list_bl: Make list head locking RT safe +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz As per changes in include/linux/jbd_common.h for avoiding the bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal @@ -117,5 +117,5 @@ index 3fc2cc57ba1b..69b659259bac 100644 static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch index 1c9edc623..e848375f1 100644 --- a/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch +++ b/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch @@ -1,8 +1,8 @@ -From 819674251514107628d4f10edb7514ab08bb57a9 Mon Sep 17 00:00:00 2001 +From b9cf8dcace585942a12b3cc585dcd52f92b35d44 Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 31 Mar 2016 00:04:25 -0500 -Subject: [PATCH 069/290] list_bl: fixup bogus lockdep warning -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 069/319] list_bl: fixup bogus lockdep warning +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz At first glance, the use of 'static inline' seems appropriate for INIT_HLIST_BL_HEAD(). @@ -100,5 +100,5 @@ index 69b659259bac..0b5de7d9ffcf 100644 static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0070-genirq-Disable-irqpoll-on-rt.patch b/debian/patches-rt/0070-genirq-Disable-irqpoll-on-rt.patch index 90239ea40..8f18eedb6 100644 --- a/debian/patches-rt/0070-genirq-Disable-irqpoll-on-rt.patch +++ b/debian/patches-rt/0070-genirq-Disable-irqpoll-on-rt.patch @@ -1,8 +1,8 @@ -From 672f4d8462083915084e695c9ed600d32fbfcd52 Mon Sep 17 00:00:00 2001 +From 84aead53ab8e5cce53db13bbfa02bf247630495a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:57 -0500 -Subject: [PATCH 070/290] genirq: Disable irqpoll on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 070/319] genirq: Disable irqpoll on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Creates long latencies for no value @@ -39,5 +39,5 @@ index d867d6ddafdd..cd12ee86c01e 100644 printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0071-genirq-Force-interrupt-thread-on-RT.patch b/debian/patches-rt/0071-genirq-Force-interrupt-thread-on-RT.patch index 8178de602..2582ccf18 100644 --- a/debian/patches-rt/0071-genirq-Force-interrupt-thread-on-RT.patch +++ b/debian/patches-rt/0071-genirq-Force-interrupt-thread-on-RT.patch @@ -1,8 +1,8 @@ -From a2049c3364eae5d13eaaa22d9212c631faf58236 Mon Sep 17 00:00:00 2001 +From c5684425630df09b12eb6028c30ec97718eea200 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 3 Apr 2011 11:57:29 +0200 -Subject: [PATCH 071/290] genirq: Force interrupt thread on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 071/319] genirq: Force interrupt thread on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Force threaded_irqs and optimize the code (force_irqthreads) in regard to this. @@ -50,5 +50,5 @@ index 23bcfa71077f..3c26d0708709 100644 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index d203ccd1d..50faa0d5f 100644 --- a/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/debian/patches-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -1,9 +1,9 @@ -From e8c4b5df495a54c8c5fdf5c34fcbb07a2780e3bd Mon Sep 17 00:00:00 2001 +From fe855c70c679d6aea443993cc20a7aa271241120 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:20 +0200 -Subject: [PATCH 072/290] Split IRQ-off and zone->lock while freeing pages from +Subject: [PATCH 072/319] Split IRQ-off and zone->lock while freeing pages from PCP list #1 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Split the IRQ-off section while accessing the PCP list from zone->lock while freeing pages. @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 52 insertions(+), 30 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 74fb5c338e8f..3b51ad4a6089 100644 +index e5c610d711f3..0cfcd42517a4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1095,7 +1095,7 @@ static inline void prefetch_buddy(struct page *page) @@ -169,5 +169,5 @@ index 74fb5c338e8f..3b51ad4a6089 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index 056c2e622..82776e528 100644 --- a/debian/patches-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/debian/patches-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -1,9 +1,9 @@ -From acc79cbb82bc621f748cafcab53e6038456ee194 Mon Sep 17 00:00:00 2001 +From 0b4508f1aaaf6bab53ee3f3b4293890a121a4d32 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 May 2018 15:24:21 +0200 -Subject: [PATCH 073/290] Split IRQ-off and zone->lock while freeing pages from +Subject: [PATCH 073/319] Split IRQ-off and zone->lock while freeing pages from PCP list #2 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Split the IRQ-off section while accessing the PCP list from zone->lock while freeing pages. @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 50 insertions(+), 10 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 3b51ad4a6089..49f7bb170b4d 100644 +index 0cfcd42517a4..9a4d150ea5b7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1105,8 +1105,8 @@ static inline void prefetch_buddy(struct page *page) @@ -168,5 +168,5 @@ index 3b51ad4a6089..49f7bb170b4d 100644 /* -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch index 49407ecdd..7a3d370dc 100644 --- a/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch @@ -1,8 +1,8 @@ -From 110ed1923c52a4d9ec01080e38ec6a48b9254b32 Mon Sep 17 00:00:00 2001 +From 8b7223102188e3162b9ba728d28c2eee2b78a779 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 28 May 2018 15:24:22 +0200 -Subject: [PATCH 074/290] mm/SLxB: change list_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 074/319] mm/SLxB: change list_lock to raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t otherwise the interrupts won't be disabled on -RT. The locking rules remain @@ -615,5 +615,5 @@ index 9c3937c5ce38..ba20c68a9cfd 100644 for (i = 0; i < t.count; i++) { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch index b1fc3a298..d2409f30e 100644 --- a/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch +++ b/debian/patches-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch @@ -1,9 +1,9 @@ -From 4a3bfadbedbdbba560abbf1073deacf963fcbc6a Mon Sep 17 00:00:00 2001 +From 6b0475b39641e91cc6ef93e8c330948b3b8c8e4a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Jun 2018 17:29:19 +0200 -Subject: [PATCH 075/290] mm/SLUB: delay giving back empty slubs to IRQ enabled +Subject: [PATCH 075/319] mm/SLUB: delay giving back empty slubs to IRQ enabled regions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz __free_slab() is invoked with disabled interrupts which increases the irq-off time while __free_pages() is doing the work. @@ -219,5 +219,5 @@ index ba20c68a9cfd..224663e20772 100644 if (debug_guardpage_minorder()) slub_max_order = 0; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch index c2c2cd1f0..26180603a 100644 --- a/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/debian/patches-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -1,8 +1,8 @@ -From b2dcde8f1c9f1175f30723fc2d3fe6aaabe087ea Mon Sep 17 00:00:00 2001 +From 0199b47ec0c654066fd33edf7f08b5266817b8b4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:37 -0500 -Subject: [PATCH 076/290] mm: page_alloc: rt-friendly per-cpu pages -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 076/319] mm: page_alloc: rt-friendly per-cpu pages +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. @@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 43 insertions(+), 20 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 49f7bb170b4d..bff6c04eb359 100644 +index 9a4d150ea5b7..d6f9be9c6635 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -60,6 +60,7 @@ @@ -216,7 +216,7 @@ index 49f7bb170b4d..bff6c04eb359 100644 return NULL; } -@@ -8099,7 +8122,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8129,7 +8152,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -225,7 +225,7 @@ index 49f7bb170b4d..bff6c04eb359 100644 if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -8108,7 +8131,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8138,7 +8161,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } @@ -235,5 +235,5 @@ index 49f7bb170b4d..bff6c04eb359 100644 #ifdef CONFIG_MEMORY_HOTREMOVE -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch b/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch index ace9c4afa..19c2ee0c9 100644 --- a/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch +++ b/debian/patches-rt/0077-mm-swap-Convert-to-percpu-locked.patch @@ -1,8 +1,8 @@ -From 3350882474566687c3916a408fd9097ef4656f0b Mon Sep 17 00:00:00 2001 +From dbcd0cb860fd4d945f06f64b9ec092579871bf5f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:51 -0500 -Subject: [PATCH 077/290] mm/swap: Convert to percpu locked -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 077/319] mm/swap: Convert to percpu locked +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Replace global locks (get_cpu + local_irq_save) with "local_locks()". Currently there is one of for "rotate" and one for "swap". @@ -56,10 +56,10 @@ index 5079ddbec8f9..c40d3a13cbbd 100644 cc->last_migrated_pfn = 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index bff6c04eb359..f7d30f995bc5 100644 +index d6f9be9c6635..a1547f1be42c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -7212,8 +7212,9 @@ void __init free_area_init(unsigned long *zones_size) +@@ -7242,8 +7242,9 @@ void __init free_area_init(unsigned long *zones_size) static int page_alloc_cpu_dead(unsigned int cpu) { @@ -207,5 +207,5 @@ index 45fdbfb6b2a6..92f994b962f0 100644 #ifdef CONFIG_SMP -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch b/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch index a85d27997..d46165a8e 100644 --- a/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch +++ b/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch @@ -1,8 +1,8 @@ -From 86cb58e9a8ac63e967bb1b1cfce05718c20cacb8 Mon Sep 17 00:00:00 2001 +From b615a51c11738fcd8a09c66740fb603e386875bc Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Fri, 27 May 2016 15:03:28 +0200 -Subject: [PATCH 078/290] mm: perform lru_add_drain_all() remotely -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 078/319] mm: perform lru_add_drain_all() remotely +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run on all CPUs that have non-empty LRU pagevecs and then waiting for @@ -105,5 +105,5 @@ index 92f994b962f0..3885645a45ce 100644 mutex_unlock(&lock); } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch b/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch index e77cf055d..84f6e17b0 100644 --- a/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch +++ b/debian/patches-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch @@ -1,9 +1,9 @@ -From 23db125cbdadfa2ec3477b09b2735a77aa683c64 Mon Sep 17 00:00:00 2001 +From 389837345d64c1c381eca305e828b264327f3ebe Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:13 -0500 -Subject: [PATCH 079/290] mm/vmstat: Protect per cpu variables with preempt +Subject: [PATCH 079/319] mm/vmstat: Protect per cpu variables with preempt disable on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Disable preemption on -RT for the vmstat code. On vanila the code runs in IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the @@ -141,5 +141,5 @@ index ce81b0a7d018..cfa2a3bbdf91 100644 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch b/debian/patches-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch index 091e10b75..1c7a3b0eb 100644 --- a/debian/patches-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch +++ b/debian/patches-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch @@ -1,9 +1,9 @@ -From 361d54ae02b6b48ee93c86c2abdeee149a8d04be Mon Sep 17 00:00:00 2001 +From dc3c6ccd34f3c8de681a9255ed8c2bfbaed18f5e Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Sat, 1 Oct 2011 18:58:13 -0700 -Subject: [PATCH 080/290] ARM: Initialize split page table locks for vector +Subject: [PATCH 080/319] ARM: Initialize split page table locks for vector page -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if PREEMPT_RT_FULL=y because vectors_user_mapping() creates a @@ -72,5 +72,5 @@ index 82ab015bf42b..8d3c7ce34c24 100644 /* * The vectors page is always readable from user space for the -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0081-mm-Enable-SLUB-for-RT.patch b/debian/patches-rt/0081-mm-Enable-SLUB-for-RT.patch index 29d257fd2..2e901a9c8 100644 --- a/debian/patches-rt/0081-mm-Enable-SLUB-for-RT.patch +++ b/debian/patches-rt/0081-mm-Enable-SLUB-for-RT.patch @@ -1,8 +1,8 @@ -From 4c7a8a89b8b16cb2733dc45aae256c0fda1c34c8 Mon Sep 17 00:00:00 2001 +From 192ae1c9b15aa4c895b203333d6d51d4719fb9f7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 Oct 2012 10:32:35 +0100 -Subject: [PATCH 081/290] mm: Enable SLUB for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 081/319] mm: Enable SLUB for RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Avoid the memory allocation in IRQ section @@ -38,5 +38,5 @@ index 224663e20772..cbe47408c6eb 100644 /* -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch b/debian/patches-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch index 4aaed772b..580006555 100644 --- a/debian/patches-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch +++ b/debian/patches-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch @@ -1,8 +1,8 @@ -From 2d3739e2e7cae9db7fe6affbc747b7f06dc42613 Mon Sep 17 00:00:00 2001 +From 5b3608b4f8a4fa9dcdab2eca0b36234658cc1f08 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Jan 2013 12:08:15 +0100 -Subject: [PATCH 082/290] slub: Enable irqs for __GFP_WAIT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 082/319] slub: Enable irqs for __GFP_WAIT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz SYSTEM_RUNNING might be too late for enabling interrupts. Allocations with GFP_WAIT can happen before that. So use this as an indicator. @@ -44,5 +44,5 @@ index cbe47408c6eb..81c32ceab228 100644 if (!page) return NULL; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch b/debian/patches-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch index 46eee9c36..a8e3d2a17 100644 --- a/debian/patches-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch +++ b/debian/patches-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch @@ -1,8 +1,8 @@ -From d4993fa5aea9eed8f9fce53e94e226f2504eda18 Mon Sep 17 00:00:00 2001 +From 532e68352d09b125582eb796c2366bcef46c0f44 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 15 Apr 2015 19:00:47 +0200 -Subject: [PATCH 083/290] slub: Disable SLUB_CPU_PARTIAL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 083/319] slub: Disable SLUB_CPU_PARTIAL +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 |in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7 @@ -50,5 +50,5 @@ index 61e8b531649b..b4e88fb19c26 100644 help Per cpu partial caches accellerate objects allocation and freeing -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch index b98b76910..9c48f2b40 100644 --- a/debian/patches-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch +++ b/debian/patches-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch @@ -1,9 +1,9 @@ -From 17e8b2696494bfeee71425d3acac4608ac97b45d Mon Sep 17 00:00:00 2001 +From 24bbef6004d0d445a228dcc3b159314c3e5426b6 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 30 Oct 2013 11:48:33 -0700 -Subject: [PATCH 084/290] mm/memcontrol: Don't call schedule_work_on in +Subject: [PATCH 084/319] mm/memcontrol: Don't call schedule_work_on in preemption disabled context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The following trace is triggered when running ltp oom test cases: @@ -71,5 +71,5 @@ index 3a3d109dce21..cf9e81fb342d 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch b/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch index 565cf0f74..2f2f4a979 100644 --- a/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch +++ b/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch @@ -1,9 +1,9 @@ -From 43548b6188b27097898381e2f1548ff656f4d127 Mon Sep 17 00:00:00 2001 +From f8036a517754394a4ab63db3ad57b3ab574ea19b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 28 Jan 2015 17:14:16 +0100 -Subject: [PATCH 085/290] mm/memcontrol: Replace local_irq_disable with local +Subject: [PATCH 085/319] mm/memcontrol: Replace local_irq_disable with local locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz There are a few local_irq_disable() which then take sleeping locks. This patch converts them local locks. @@ -120,5 +120,5 @@ index cf9e81fb342d..421ac74450f6 100644 /** -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch b/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch index f2c6fec00..9ee76cbee 100644 --- a/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch +++ b/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch @@ -1,8 +1,8 @@ -From 6b63a10dc279d7aa3134caeb249969ae7af3f010 Mon Sep 17 00:00:00 2001 +From 602b3755a425bfb42f9f1b86f8e5e05f9d2b86bb Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 22 Mar 2016 11:16:09 +0100 -Subject: [PATCH 086/290] mm/zsmalloc: copy with get_cpu_var() and locking -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 086/319] mm/zsmalloc: copy with get_cpu_var() and locking +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz get_cpu_var() disables preemption and triggers a might_sleep() splat later. This is replaced with get_locked_var(). @@ -199,5 +199,5 @@ index 85cc29c93d93..63e83b47fa99 100644 migrate_read_unlock(zspage); unpin_tag(handle); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch b/debian/patches-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch index 2a70f2045..7f3a5eab9 100644 --- a/debian/patches-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch +++ b/debian/patches-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch @@ -1,9 +1,9 @@ -From 3596d839ec05c759a409ec3c9fb058b89bc43de8 Mon Sep 17 00:00:00 2001 +From 25f3aa370d7aec7ec346e563665a523bb244bc9f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 21:53:43 +0100 -Subject: [PATCH 087/290] x86/mm/pat: disable preemption __split_large_page() +Subject: [PATCH 087/319] x86/mm/pat: disable preemption __split_large_page() after spin_lock() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Commit "x86/mm/pat: Disable preemption around __flush_tlb_all()" added a warning if __flush_tlb_all() is invoked in preemptible context. On !RT @@ -58,5 +58,5 @@ index e2d4b25c7aa4..9626ebb9e3c8 100644 return 0; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0088-radix-tree-use-local-locks.patch b/debian/patches-rt/0088-radix-tree-use-local-locks.patch index 96901c9ae..6019f6867 100644 --- a/debian/patches-rt/0088-radix-tree-use-local-locks.patch +++ b/debian/patches-rt/0088-radix-tree-use-local-locks.patch @@ -1,8 +1,8 @@ -From 2ca56bb81f83515755165c1c638655a4120b1b06 Mon Sep 17 00:00:00 2001 +From bb781f6555553a52a39b4d589c88d1a4a1721164 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 25 Jan 2017 16:34:27 +0100 -Subject: [PATCH 088/290] radix-tree: use local locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 088/319] radix-tree: use local locks +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The preload functionality uses per-CPU variables and preempt-disable to ensure that it does not switch CPUs during its usage. This patch adds @@ -172,5 +172,5 @@ index e5cab5c4e383..9309e813bc1f 100644 if (!this_cpu_read(ida_bitmap)) { struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch b/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch index a2bfc31ae..c023fb55e 100644 --- a/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch +++ b/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch @@ -1,8 +1,8 @@ -From c87cd9494b5ba0149620db6bc1655edbe04f9c9d Mon Sep 17 00:00:00 2001 +From cef1d4f498e86d36b7c7b5290432ec0c089b842a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: [PATCH 089/290] timers: Prepare for full preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 089/319] timers: Prepare for full preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz When softirqs can be preempted we need to make sure that cancelling the timer from the active thread can not deadlock vs. a running timer @@ -172,5 +172,5 @@ index ae64cb819a9a..9019c9caf146 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch b/debian/patches-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch index 5e23b80e9..708323a6c 100644 --- a/debian/patches-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch +++ b/debian/patches-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch @@ -1,8 +1,8 @@ -From 70c79c38cb1e81d8cffa19dc562b629dcde98c6c Mon Sep 17 00:00:00 2001 +From 0b5b5f3f20cd3eafe3b93c1ef1d18b9306d0ed78 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 6 Nov 2011 12:26:18 +0100 -Subject: [PATCH 090/290] x86: kvm Require const tsc for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 090/319] x86: kvm Require const tsc for RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Non constant TSC is a nightmare on bare metal already, but with virtualization it becomes a complete disaster because the workarounds @@ -15,10 +15,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 353f63f3b262..2611898419ff 100644 +index ade694f94a49..2dfb7c81743e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -6832,6 +6832,13 @@ int kvm_arch_init(void *opaque) +@@ -6873,6 +6873,13 @@ int kvm_arch_init(void *opaque) goto out; } @@ -33,5 +33,5 @@ index 353f63f3b262..2611898419ff 100644 if (r) goto out_free_percpu; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch b/debian/patches-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch index 0dac9fe70..a3bf27ede 100644 --- a/debian/patches-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch +++ b/debian/patches-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch @@ -1,8 +1,8 @@ -From cffa5c3f97fb6ad83cf3d770f5297f879f6efc36 Mon Sep 17 00:00:00 2001 +From 2b09f96c8a4fe11260aa67ee49b63d7698fafe42 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 4 Oct 2017 10:24:23 +0200 -Subject: [PATCH 091/290] pci/switchtec: Don't use completion's wait queue -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 091/319] pci/switchtec: Don't use completion's wait queue +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The poll callback is using completion's wait_queue_head_t member and puts it in poll_wait() so the poll() caller gets a wakeup after command @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c -index 72db2e0ebced..77d4fb86d05b 100644 +index ceb7ab3ba3d0..f4c39feb5c04 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -43,10 +43,11 @@ struct switchtec_user { @@ -49,7 +49,7 @@ index 72db2e0ebced..77d4fb86d05b 100644 stuser->event_cnt = atomic_read(&stdev->event_cnt); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); -@@ -151,7 +152,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) +@@ -147,7 +148,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) kref_get(&stuser->kref); stuser->read_len = sizeof(stuser->data); stuser_set_state(stuser, MRPC_QUEUED); @@ -58,7 +58,7 @@ index 72db2e0ebced..77d4fb86d05b 100644 list_add_tail(&stuser->list, &stdev->mrpc_queue); mrpc_cmd_submit(stdev); -@@ -188,7 +189,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev) +@@ -184,7 +185,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev) stuser->read_len); out: @@ -68,7 +68,7 @@ index 72db2e0ebced..77d4fb86d05b 100644 list_del_init(&stuser->list); stuser_put(stuser); stdev->mrpc_busy = 0; -@@ -458,10 +460,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data, +@@ -454,10 +456,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data, mutex_unlock(&stdev->mrpc_mutex); if (filp->f_flags & O_NONBLOCK) { @@ -82,7 +82,7 @@ index 72db2e0ebced..77d4fb86d05b 100644 if (rc < 0) return rc; } -@@ -509,7 +512,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) +@@ -505,7 +508,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) struct switchtec_dev *stdev = stuser->stdev; __poll_t ret = 0; @@ -91,7 +91,7 @@ index 72db2e0ebced..77d4fb86d05b 100644 poll_wait(filp, &stdev->event_wq, wait); if (lock_mutex_and_test_alive(stdev)) -@@ -517,7 +520,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) +@@ -513,7 +516,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) mutex_unlock(&stdev->mrpc_mutex); @@ -100,7 +100,7 @@ index 72db2e0ebced..77d4fb86d05b 100644 ret |= EPOLLIN | EPOLLRDNORM; if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) -@@ -1041,7 +1044,8 @@ static void stdev_kill(struct switchtec_dev *stdev) +@@ -1037,7 +1040,8 @@ static void stdev_kill(struct switchtec_dev *stdev) /* Wake up and kill any users waiting on an MRPC request */ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { @@ -111,5 +111,5 @@ index 72db2e0ebced..77d4fb86d05b 100644 stuser_put(stuser); } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0092-wait.h-include-atomic.h.patch b/debian/patches-rt/0092-wait.h-include-atomic.h.patch index 3dc80ace6..dc28321f2 100644 --- a/debian/patches-rt/0092-wait.h-include-atomic.h.patch +++ b/debian/patches-rt/0092-wait.h-include-atomic.h.patch @@ -1,11 +1,11 @@ -From 0aafaa8df18cf87cf491f2592b9c378667766a62 Mon Sep 17 00:00:00 2001 +From 87862c5a8832825e1b19b5c84d8468beb37d38d7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 28 Oct 2013 12:19:57 +0100 -Subject: [PATCH 092/290] wait.h: include atomic.h +Subject: [PATCH 092/319] wait.h: include atomic.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz | CC init/main.o |In file included from include/linux/mmzone.h:9:0, @@ -38,5 +38,5 @@ index ed7c122cb31f..2b5ef8e94d19 100644 typedef struct wait_queue_entry wait_queue_entry_t; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch b/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch index cc44b5f9f..763d1de6f 100644 --- a/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch +++ b/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch @@ -1,8 +1,8 @@ -From eccea3070c1d002aaa2a9cbdaaee36e65f5d06cf Mon Sep 17 00:00:00 2001 +From 58fa131cdf3f847c6724a688b161843bf3cbb2b2 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Fri, 11 Jul 2014 15:26:11 +0200 -Subject: [PATCH 093/290] work-simple: Simple work queue implemenation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 093/319] work-simple: Simple work queue implemenation +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Provides a framework for enqueuing callbacks from irq context PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. @@ -242,5 +242,5 @@ index 000000000000..a5b89fdacf19 +} +EXPORT_SYMBOL_GPL(swork_put); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch b/debian/patches-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch index 36bf7196e..46a78d361 100644 --- a/debian/patches-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch +++ b/debian/patches-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch @@ -1,9 +1,9 @@ -From cb2283a50926db7e39bf3643c28a9c8c73a3263a Mon Sep 17 00:00:00 2001 +From 56f5a7f5be4f4eab4701de1fad53f7d06a347549 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 10 Sep 2018 18:00:31 +0200 -Subject: [PATCH 094/290] work-simple: drop a shit statement in +Subject: [PATCH 094/319] work-simple: drop a shit statement in SWORK_EVENT_PENDING -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Dan Carpenter reported | smatch warnings: @@ -33,5 +33,5 @@ index a5b89fdacf19..c90d14b9b126 100644 static DEFINE_MUTEX(worker_mutex); static struct sworker *glob_worker; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0095-completion-Use-simple-wait-queues.patch b/debian/patches-rt/0095-completion-Use-simple-wait-queues.patch index 0c97b3f2e..a8b930eca 100644 --- a/debian/patches-rt/0095-completion-Use-simple-wait-queues.patch +++ b/debian/patches-rt/0095-completion-Use-simple-wait-queues.patch @@ -1,8 +1,8 @@ -From 93e59514ca04f36ad3f87b3c0da5be1e0b9d238f Mon Sep 17 00:00:00 2001 +From fdcca4158ad138c54f6be04b6843aa4116876cee Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 11 Jan 2013 11:23:51 +0100 -Subject: [PATCH 095/290] completion: Use simple wait queues -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 095/319] completion: Use simple wait queues +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Completions have no long lasting callbacks and therefor do not need the complex waitqueue variant. Use simple waitqueues which reduces the @@ -40,7 +40,7 @@ index e7075aaff1bb..1580464a9d5b 100644 res = -EINTR; if (res) { diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c -index 94ad6fe29e69..52a49f0bbc19 100644 +index 2c7dd2a7350c..0e5cde31d9ac 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, @@ -387,5 +387,5 @@ index 66b59ac77c22..c7cb30cdd1b7 100644 wait->task = current; if (list_empty(&wait->task_list)) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0096-fs-aio-simple-simple-work.patch b/debian/patches-rt/0096-fs-aio-simple-simple-work.patch index 38f660b93..1b369a711 100644 --- a/debian/patches-rt/0096-fs-aio-simple-simple-work.patch +++ b/debian/patches-rt/0096-fs-aio-simple-simple-work.patch @@ -1,8 +1,8 @@ -From 864971842ede76748f6c198f112d6857b19196fa Mon Sep 17 00:00:00 2001 +From 1f99427b2635d8a0157544e26dde28cca81c5b84 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 16 Feb 2015 18:49:10 +0100 -Subject: [PATCH 096/290] fs/aio: simple simple work -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 096/319] fs/aio: simple simple work +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768 |in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2 @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/fs/aio.c b/fs/aio.c -index 911e23087dfb..16dcf8521c2c 100644 +index b5fbf2061868..93f8cf7fdeab 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -42,6 +42,7 @@ @@ -85,5 +85,5 @@ index 911e23087dfb..16dcf8521c2c 100644 { unsigned i, new_nr; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch index 27a51b3b6..ab56b69fe 100644 --- a/debian/patches-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch +++ b/debian/patches-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch @@ -1,9 +1,9 @@ -From 06003e1c96291d4d372f2e31c988999c2ef34f31 Mon Sep 17 00:00:00 2001 +From 252be885793422121dda4aec9e2219180d14e18e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 21 Aug 2013 17:48:46 +0200 -Subject: [PATCH 097/290] genirq: Do not invoke the affinity callback via a +Subject: [PATCH 097/319] genirq: Do not invoke the affinity callback via a workqueue on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Joe Korty reported, that __irq_set_affinity_locked() schedules a workqueue while holding a rawlock which results in a might_sleep() @@ -138,5 +138,5 @@ index 3c26d0708709..eadcbfbd434a 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch b/debian/patches-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch index e827aa9b0..46b89e7b4 100644 --- a/debian/patches-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch +++ b/debian/patches-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch @@ -1,9 +1,9 @@ -From cd74856820b82cb80ddd3b12dde3caad12642b3c Mon Sep 17 00:00:00 2001 +From 1360063740900a626633ffbd74f1ecc99cc7111f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 15 Nov 2017 17:29:51 +0100 -Subject: [PATCH 098/290] time/hrtimer: avoid schedule_work() with interrupts +Subject: [PATCH 098/319] time/hrtimer: avoid schedule_work() with interrupts disabled -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The NOHZ code tries to schedule a workqueue with interrupts disabled. Since this does not work -RT I am switching it to swork instead. @@ -56,5 +56,5 @@ index 9019c9caf146..3fab1c50bf1b 100644 void __user *buffer, size_t *lenp, loff_t *ppos) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/debian/patches-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch index 597487347..de17f4a65 100644 --- a/debian/patches-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch +++ b/debian/patches-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch @@ -1,9 +1,9 @@ -From 8553725681eb8c0f78dad5fb1ef542301586e9ff Mon Sep 17 00:00:00 2001 +From 71c999b8882a041039be2f3dcdaa16c217ddfcfb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 3 Jul 2018 11:25:41 +0200 -Subject: [PATCH 099/290] hrtimer: consolidate hrtimer_init() + +Subject: [PATCH 099/319] hrtimer: consolidate hrtimer_init() + hrtimer_init_sleeper() calls -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz hrtimer_init_sleeper() calls require a prior initialisation of the hrtimer object with hrtimer_init(). Lets make the initialisation of @@ -44,10 +44,10 @@ index 684acaa96db7..4aa3284874f6 100644 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) break; diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c -index 22571abcaa4e..78a529d363f3 100644 +index 034d86869772..d089b2cb5dd7 100644 --- a/drivers/staging/android/vsoc.c +++ b/drivers/staging/android/vsoc.c -@@ -437,12 +437,10 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) +@@ -438,12 +438,10 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) return -EINVAL; wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); @@ -286,5 +286,5 @@ index 092fa3d75b32..9d472d626aaa 100644 set_current_state(TASK_INTERRUPTIBLE); hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0100-hrtimers-Prepare-full-preemption.patch b/debian/patches-rt/0100-hrtimers-Prepare-full-preemption.patch index 78c052474..9acc326b9 100644 --- a/debian/patches-rt/0100-hrtimers-Prepare-full-preemption.patch +++ b/debian/patches-rt/0100-hrtimers-Prepare-full-preemption.patch @@ -1,8 +1,8 @@ -From e033fc3f9345af04f20c1d45fa8ee453caaf5b1c Mon Sep 17 00:00:00 2001 +From a04b0dfa026adab75ac8797547ef368264245993 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: [PATCH 100/290] hrtimers: Prepare full preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 100/319] hrtimers: Prepare full preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Make cancellation of a running callback in softirq context safe against preemption. @@ -95,10 +95,10 @@ index ee7e987ea1b4..0571b498db73 100644 void run_posix_cpu_timers(struct task_struct *task); diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index f4255a65c44b..1d1f077cffb3 100644 +index 9eece67f29f3..a465564367ec 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c -@@ -436,7 +436,7 @@ int alarm_cancel(struct alarm *alarm) +@@ -438,7 +438,7 @@ int alarm_cancel(struct alarm *alarm) int ret = alarm_try_to_cancel(alarm); if (ret >= 0) return ret; @@ -286,5 +286,5 @@ index 5a01c4fdbfef..a5ec421e3437 100644 } list_del(&timer->list); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch b/debian/patches-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch index ac4ef8535..8fb97395d 100644 --- a/debian/patches-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch +++ b/debian/patches-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch @@ -1,9 +1,9 @@ -From b26ce46313b85d2d4f52b053c1602b3c6d3e3f9f Mon Sep 17 00:00:00 2001 +From f4a30ca23f3c352dd834d9cd398022ece0fadd1e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 3 Jul 2009 08:44:31 -0500 -Subject: [PATCH 101/290] hrtimer: by timers by default into the softirq +Subject: [PATCH 101/319] hrtimer: by timers by default into the softirq context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz We can't have hrtimers callbacks running in hardirq context on RT. Therefore the timers are deferred to the softirq context by default. @@ -29,10 +29,10 @@ Signed-off-by: Sebastian Andrzej Siewior 11 files changed, 37 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c -index 262e49301cae..c2f51b6e8974 100644 +index 05905961ecca..9f08b74cda59 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c -@@ -2257,7 +2257,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) +@@ -2262,7 +2262,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, @@ -66,7 +66,7 @@ index 8714f1a37d84..082147c07831 100644 /* diff --git a/kernel/events/core.c b/kernel/events/core.c -index 460d5fd3ec4e..60a883088961 100644 +index 8c70ee23fbe9..9804b1a8b0fa 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1102,7 +1102,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) @@ -78,7 +78,7 @@ index 460d5fd3ec4e..60a883088961 100644 timer->function = perf_mux_hrtimer_handler; } -@@ -9239,7 +9239,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) +@@ -9269,7 +9269,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) if (!is_sampling_event(event)) return; @@ -114,10 +114,10 @@ index 4b13df38c069..974a8f9b615a 100644 } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 6e6d9e999814..21cf60c360e8 100644 +index 27f9f9a785c1..d801e6c763ee 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4914,9 +4914,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +@@ -4930,9 +4930,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -207,10 +207,10 @@ index a836efd34589..c50e8f3262de 100644 clockevents_register_device(&ce_broadcast_hrtimer); } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 54fd344ef973..c217af74dddf 100644 +index e774a49176cc..012bc81879bf 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c -@@ -1310,7 +1310,7 @@ void tick_setup_sched_timer(void) +@@ -1314,7 +1314,7 @@ void tick_setup_sched_timer(void) /* * Emulate tick processing via per-CPU hrtimers: */ @@ -233,5 +233,5 @@ index bbc4940f21af..defd493ba967 100644 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL_PINNED); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch b/debian/patches-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch index 42d86ba35..343edeeae 100644 --- a/debian/patches-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch +++ b/debian/patches-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch @@ -1,8 +1,8 @@ -From 1f9048fd4cb26be3c10ac92b6c9b2e90d5005888 Mon Sep 17 00:00:00 2001 +From fe966abd3c6bb417ca4bac41124f80c76414f7d4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 8 Jan 2019 12:31:06 +0100 -Subject: [PATCH 102/290] sched/fair: Make the hrtimers non-hard again -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 102/319] sched/fair: Make the hrtimers non-hard again +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Since commit "sched/fair: Robustify CFS-bandwidth timer locking" both hrtimer can run in softirq context because now interrupts are disabled @@ -14,10 +14,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 21cf60c360e8..6e6d9e999814 100644 +index d801e6c763ee..27f9f9a785c1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4914,9 +4914,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +@@ -4930,9 +4930,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -30,5 +30,5 @@ index 21cf60c360e8..6e6d9e999814 100644 cfs_b->distribute_running = 0; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch index 9bfa5a35b..23149f852 100644 --- a/debian/patches-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch +++ b/debian/patches-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch @@ -1,8 +1,8 @@ -From 1669cbc2eb152b7ae7f94cb966fbfd888a191cc8 Mon Sep 17 00:00:00 2001 +From 1829af263afc43931c3868ff77a4dab2dad625be Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Mon, 16 Sep 2013 14:09:19 -0700 -Subject: [PATCH 103/290] hrtimer: Move schedule_work call to helper thread -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 103/319] hrtimer: Move schedule_work call to helper thread +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz When run ltp leapsec_timer test, the following call trace is caught: @@ -94,5 +94,5 @@ index 1fd5ec39e7f4..9f3412acdb16 100644 #else -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch b/debian/patches-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch index fa523682f..ab142a46b 100644 --- a/debian/patches-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch +++ b/debian/patches-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch @@ -1,9 +1,9 @@ -From b62f8583e5a6a8a86192ed4bc3cef3e923f295d0 Mon Sep 17 00:00:00 2001 +From 43ee0863a6c13ad3494587d570d0bf286210f33f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 6 Dec 2018 10:15:13 +0100 -Subject: [PATCH 104/290] hrtimer: move state change before hrtimer_cancel in +Subject: [PATCH 104/319] hrtimer: move state change before hrtimer_cancel in do_nanosleep() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz There is a small window between setting t->task to NULL and waking the task up (which would set TASK_RUNNING). So the timer would fire, run and @@ -47,5 +47,5 @@ index 9f3412acdb16..b800efb64238 100644 if (!t->task) return 0; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch b/debian/patches-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch index 22bc9182e..23677641e 100644 --- a/debian/patches-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch +++ b/debian/patches-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch @@ -1,8 +1,8 @@ -From e63ab953f4da79eb0d2767e029e61f4d8491f131 Mon Sep 17 00:00:00 2001 +From ed0602a145e7fef5837c80d42f2f236c9114683f Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 3 Jul 2009 08:29:58 -0500 -Subject: [PATCH 105/290] posix-timers: Thread posix-cpu-timers on -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 105/319] posix-timers: Thread posix-cpu-timers on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz posix-cpu-timer code takes non -rt safe locks in hard irq context. Move it to a thread. @@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner 4 files changed, 164 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index c87c11bfd9d9..ba37d39d5c6b 100644 +index e4af260f81c5..a90b6be626cd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -832,6 +832,9 @@ struct task_struct { @@ -58,10 +58,10 @@ index 0b49b9cf5571..9e3362748214 100644 .thread_group = LIST_HEAD_INIT(init_task.thread_group), .thread_node = LIST_HEAD_INIT(init_signals.thread_head), diff --git a/kernel/fork.c b/kernel/fork.c -index 8c285876eb52..b3744a043f46 100644 +index bc182d6fa2a9..ccfcd44a370f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1580,6 +1580,9 @@ static void rt_mutex_init_task(struct task_struct *p) +@@ -1590,6 +1590,9 @@ static void rt_mutex_init_task(struct task_struct *p) */ static void posix_cpu_timers_init(struct task_struct *tsk) { @@ -265,5 +265,5 @@ index d62d7ae5201c..8d95e8de98b2 100644 * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch b/debian/patches-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch index d35f8ab1a..406bce97e 100644 --- a/debian/patches-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch +++ b/debian/patches-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch @@ -1,8 +1,8 @@ -From 64754a230b30e0beeda510f8a0dfcf82075166a7 Mon Sep 17 00:00:00 2001 +From f831a4cdf836e6ecd9d579c32b5cf7af54df9b89 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 May 2011 16:59:16 +0200 -Subject: [PATCH 106/290] sched: Move task_struct cleanup to RCU -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 106/319] sched: Move task_struct cleanup to RCU +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz __put_task_struct() does quite some expensive work. We don't want to burden random tasks with that. @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index ba37d39d5c6b..a1ef00db6baa 100644 +index a90b6be626cd..0b8850b6093b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1194,6 +1194,9 @@ struct task_struct { @@ -58,10 +58,10 @@ index 44c6f15800ff..d2b33e57c636 100644 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT diff --git a/kernel/fork.c b/kernel/fork.c -index b3744a043f46..4b85282d9a07 100644 +index ccfcd44a370f..309f4a20d4ac 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -666,7 +666,9 @@ static inline void put_signal_struct(struct signal_struct *sig) +@@ -676,7 +676,9 @@ static inline void put_signal_struct(struct signal_struct *sig) if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } @@ -72,7 +72,7 @@ index b3744a043f46..4b85282d9a07 100644 void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); -@@ -683,7 +685,18 @@ void __put_task_struct(struct task_struct *tsk) +@@ -693,7 +695,18 @@ void __put_task_struct(struct task_struct *tsk) if (!profile_handoff_task(tsk)) free_task(tsk); } @@ -92,5 +92,5 @@ index b3744a043f46..4b85282d9a07 100644 void __init __weak arch_task_cache_init(void) { } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch b/debian/patches-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch index cbf4adfcd..888d7d39b 100644 --- a/debian/patches-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch +++ b/debian/patches-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch @@ -1,8 +1,8 @@ -From 5d814c2f67298a28fc845d4444a083e1438c621a Mon Sep 17 00:00:00 2001 +From 704aa74a16b788832e386e06e697ce337661026e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 12:12:51 +0200 -Subject: [PATCH 107/290] sched: Limit the number of task migrations per batch -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 107/319] sched: Limit the number of task migrations per batch +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Put an upper limit on the number of tasks which are migrated per batch to avoid large latencies. @@ -29,5 +29,5 @@ index 2f6b4365d070..5c23d1272429 100644 /* * period over which we measure -rt task CPU usage in us. -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch b/debian/patches-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch index 9c7f75f46..814ea4b6d 100644 --- a/debian/patches-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch +++ b/debian/patches-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch @@ -1,8 +1,8 @@ -From 265373d21c09c71609a28e9412050091f086ffd0 Mon Sep 17 00:00:00 2001 +From 55e6ee67814ee43c99e6a18d88a36cc734a01258 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 12:20:33 +0200 -Subject: [PATCH 108/290] sched: Move mmdrop to RCU on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 108/319] sched: Move mmdrop to RCU on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Takes sleeping locks and calls into the memory allocator, so nothing we want to do in task switch and oder atomic contexts. @@ -60,10 +60,10 @@ index e9d4e389aed9..fb59f96fdd2e 100644 * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_sem for writing before modifying the diff --git a/kernel/fork.c b/kernel/fork.c -index 4b85282d9a07..e05834c2420c 100644 +index 309f4a20d4ac..d4ec53c72577 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -632,6 +632,19 @@ void __mmdrop(struct mm_struct *mm) +@@ -642,6 +642,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); @@ -136,5 +136,5 @@ index 5c23d1272429..cb89c90513dd 100644 } #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/debian/patches-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch index 8b7b23ced..c76be9916 100644 --- a/debian/patches-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch +++ b/debian/patches-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch @@ -1,9 +1,9 @@ -From ebf1a9a382707d58a0bba64660e90f2d9088d8f6 Mon Sep 17 00:00:00 2001 +From 9460855fbfc3ee2b111fb877befd65304f028290 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 21 Nov 2016 19:31:08 +0100 -Subject: [PATCH 109/290] kernel/sched: move stack + kprobe clean up to +Subject: [PATCH 109/319] kernel/sched: move stack + kprobe clean up to __put_task_struct() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz There is no need to free the stack before the task struct (except for reasons mentioned in commit 68f24b08ee89 ("sched/core: Free the stack early if @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c -index e05834c2420c..a6f39cbb71c3 100644 +index d4ec53c72577..29b54a64daf5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -40,6 +40,7 @@ @@ -29,7 +29,7 @@ index e05834c2420c..a6f39cbb71c3 100644 #include #include #include -@@ -688,6 +689,15 @@ void __put_task_struct(struct task_struct *tsk) +@@ -698,6 +699,15 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); @@ -66,5 +66,5 @@ index cb89c90513dd..79e0d052e848 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/debian/patches-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch index f2b4d6d65..e8d148a84 100644 --- a/debian/patches-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch +++ b/debian/patches-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch @@ -1,9 +1,9 @@ -From 321628fe4e281be121ed55fdf60c24c6d58cc051 Mon Sep 17 00:00:00 2001 +From bfe1d3e5e10a87b70762389b764f620cedf66c2e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jun 2011 09:21:04 +0200 -Subject: [PATCH 110/290] sched: Add saved_state for tasks blocked on sleeping +Subject: [PATCH 110/319] sched: Add saved_state for tasks blocked on sleeping locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Spinlocks are state preserving in !RT. RT changes the state when a task gets blocked on a lock. So we need to remember the state before @@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index a1ef00db6baa..c073a3273beb 100644 +index 0b8850b6093b..f180bfadff33 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -600,6 +600,8 @@ struct task_struct { @@ -104,5 +104,5 @@ index 94bec97bd5e2..c79e32488940 100644 /* * To aid in avoiding the subversion of "niceness" due to uneven distribution -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch b/debian/patches-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch index 5f37aaf8a..448400cf5 100644 --- a/debian/patches-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch +++ b/debian/patches-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch @@ -1,9 +1,9 @@ -From 26013bccba0816bd1eb6c1f7233f1f3600d89093 Mon Sep 17 00:00:00 2001 +From ea6144ee0e293f29e410ee033d98b2801ab73600 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jun 2011 09:19:06 +0200 -Subject: [PATCH 111/290] sched: Do not account rcu_preempt_depth on RT in +Subject: [PATCH 111/319] sched: Do not account rcu_preempt_depth on RT in might_sleep() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz RT changes the rcu_preempt_depth semantics, so we cannot check for it in might_sleep(). @@ -53,5 +53,5 @@ index d1c564acff76..59d43c084023 100644 return (nested == preempt_offset); } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch b/debian/patches-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch index 0f9241f18..f58bb1101 100644 --- a/debian/patches-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch +++ b/debian/patches-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch @@ -1,8 +1,8 @@ -From e1018349e034a1bc9768c8870ae436d76d07d3b4 Mon Sep 17 00:00:00 2001 +From ca9ca6366a49a742e9144dc778e211e8777b2665 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 22:51:33 +0200 -Subject: [PATCH 112/290] sched: Use the proper LOCK_OFFSET for cond_resched() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 112/319] sched: Use the proper LOCK_OFFSET for cond_resched() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz RT does not increment preempt count when a 'sleeping' spinlock is locked. Update PREEMPT_LOCK_OFFSET for that case. @@ -29,5 +29,5 @@ index f7a17fcc3fec..b7fe717eb1f4 100644 /* * The preempt_count offset needed for things like: -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch b/debian/patches-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch index 3cbbe5fbc..78cfe4ec7 100644 --- a/debian/patches-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch +++ b/debian/patches-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch @@ -1,8 +1,8 @@ -From 8f8e3eb8db6bc5ae01fe12e677a6e95df0366beb Mon Sep 17 00:00:00 2001 +From e99a74a38d0c9a494df787b282c53fe333f2d92c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 13 Sep 2011 16:42:35 +0200 -Subject: [PATCH 113/290] sched: Disable TTWU_QUEUE on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 113/319] sched: Disable TTWU_QUEUE on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The queued remote wakeup mechanism can introduce rather large latencies if the number of migrated tasks is high. Disable it for RT. @@ -34,5 +34,5 @@ index 85ae8488039c..68de18405857 100644 /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain. -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch index 3e157ceec..c29a1b5d7 100644 --- a/debian/patches-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch +++ b/debian/patches-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch @@ -1,9 +1,9 @@ -From 43f8cfb3d7b686b307dbde08375843c434206f95 Mon Sep 17 00:00:00 2001 +From 9178362c1367d35e1c7f618e8bd6f85ea42ff923 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 18 Mar 2013 15:12:49 -0400 -Subject: [PATCH 114/290] sched/workqueue: Only wake up idle workers if not +Subject: [PATCH 114/319] sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In -rt, most spin_locks() turn into mutexes. One of these spin_lock conversions is performed on the workqueue gcwq->lock. When the idle @@ -41,5 +41,5 @@ index 59d43c084023..e792543de8eb 100644 to_wakeup = wq_worker_sleeping(prev); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/debian/patches-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch index 88dc98d69..28d9bb459 100644 --- a/debian/patches-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch +++ b/debian/patches-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch @@ -1,9 +1,9 @@ -From c53c9ce9f8bc7a16fadae5e065224163e69b07ff Mon Sep 17 00:00:00 2001 +From 87a2398e611289d3a257ae0c0b11c9cdcba8a9c5 Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Mon, 26 Jun 2017 17:07:15 +0200 -Subject: [PATCH 115/290] rt: Increase/decrease the nr of migratory tasks when +Subject: [PATCH 115/319] rt: Increase/decrease the nr of migratory tasks when enabling/disabling migration -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz There is a problem in the migrate_disable()/enable() implementation regarding the number of migratory tasks in the rt/dl RQs. The problem @@ -158,5 +158,5 @@ index e792543de8eb..57617777c4ba 100644 if (p->migrate_disable_update) { struct rq *rq; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0116-hotplug-Lightweight-get-online-cpus.patch b/debian/patches-rt/0116-hotplug-Lightweight-get-online-cpus.patch index 6b557bc9f..017a40bda 100644 --- a/debian/patches-rt/0116-hotplug-Lightweight-get-online-cpus.patch +++ b/debian/patches-rt/0116-hotplug-Lightweight-get-online-cpus.patch @@ -1,8 +1,8 @@ -From 65664847f66b61a354b87a68ee4538850100b07f Mon Sep 17 00:00:00 2001 +From 0481fa6108576bced438b2d4ac9e5afaaf6b85c8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 15 Jun 2011 12:36:06 +0200 -Subject: [PATCH 116/290] hotplug: Lightweight get online cpus -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 116/319] hotplug: Lightweight get online cpus +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz get_online_cpus() is a heavy weight function which involves a global mutex. migrate_disable() wants a simpler construct which prevents only @@ -97,5 +97,5 @@ index 57617777c4ba..42b42ebf52bc 100644 } EXPORT_SYMBOL(migrate_enable); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch b/debian/patches-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch index 5425eda02..8bec95cc1 100644 --- a/debian/patches-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch +++ b/debian/patches-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch @@ -1,8 +1,8 @@ -From b1443b83989e9c493e9d82d8a8a88a3862ea2286 Mon Sep 17 00:00:00 2001 +From 81a929dd2254c032daedf2ad8d9d8d3eb7b90397 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:56:42 +0200 -Subject: [PATCH 117/290] trace: Add migrate-disabled counter to tracing output -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 117/319] trace: Add migrate-disabled counter to tracing output +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Signed-off-by: Thomas Gleixner --- @@ -82,5 +82,5 @@ index 6e6cc64faa38..46c96744f09d 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0118-lockdep-Make-it-RT-aware.patch b/debian/patches-rt/0118-lockdep-Make-it-RT-aware.patch index de5bfdb0b..8b8b8e396 100644 --- a/debian/patches-rt/0118-lockdep-Make-it-RT-aware.patch +++ b/debian/patches-rt/0118-lockdep-Make-it-RT-aware.patch @@ -1,8 +1,8 @@ -From 25df2718875a1f8e4d5fc79fd0720e5000074ccb Mon Sep 17 00:00:00 2001 +From 10a72db76daf156c9b80018007e459c9280b61e5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 18:51:23 +0200 -Subject: [PATCH 118/290] lockdep: Make it RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 118/319] lockdep: Make it RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz teach lockdep that we don't really do softirqs on -RT. @@ -74,5 +74,5 @@ index 1e272f6a01e7..1938b4bfb098 100644 if (!debug_locks) print_irqtrace_events(current); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch b/debian/patches-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch index 5a4e64da8..3e121d553 100644 --- a/debian/patches-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch +++ b/debian/patches-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch @@ -1,9 +1,9 @@ -From 30b45605a910b13cdacec56a13b7715aa4aef8f9 Mon Sep 17 00:00:00 2001 +From 7c86de325540ff5042f16a794a489107b8b1e1fd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 29 Nov 2011 20:18:22 -0500 -Subject: [PATCH 119/290] tasklet: Prevent tasklets from going into infinite +Subject: [PATCH 119/319] tasklet: Prevent tasklets from going into infinite spin in RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, and spinlocks turn are mutexes. But this can cause issues with @@ -299,5 +299,5 @@ index 6f584861d329..1d3a482246cc 100644 { return local_softirq_pending(); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch b/debian/patches-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch index 0f7afb43a..a6d32440e 100644 --- a/debian/patches-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch +++ b/debian/patches-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch @@ -1,8 +1,8 @@ -From 8e833dc09a89e60167a67518738fc02a174737bd Mon Sep 17 00:00:00 2001 +From 5fa29130b2defd1c01e16ed8ac99aa31c6564dc9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 13 Nov 2011 17:17:09 +0100 -Subject: [PATCH 120/290] softirq: Check preemption after reenabling interrupts -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 120/319] softirq: Check preemption after reenabling interrupts +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz raise_softirq_irqoff() disables interrupts and wakes the softirq daemon, but after reenabling interrupts there is no preemption check, @@ -117,7 +117,7 @@ index 86a709954f5a..9c069ef83d6d 100644 return 0; } diff --git a/net/core/dev.c b/net/core/dev.c -index a26d87073f71..f64e9d036915 100644 +index 1c0224e8fc78..77a757b363d1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2727,6 +2727,7 @@ static void __netif_reschedule(struct Qdisc *q) @@ -144,7 +144,7 @@ index a26d87073f71..f64e9d036915 100644 atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); -@@ -5830,12 +5833,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) +@@ -5801,12 +5804,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) sd->rps_ipi_list = NULL; local_irq_enable(); @@ -159,7 +159,7 @@ index a26d87073f71..f64e9d036915 100644 } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -5913,6 +5918,7 @@ void __napi_schedule(struct napi_struct *n) +@@ -5884,6 +5889,7 @@ void __napi_schedule(struct napi_struct *n) local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); @@ -167,7 +167,7 @@ index a26d87073f71..f64e9d036915 100644 } EXPORT_SYMBOL(__napi_schedule); -@@ -9504,6 +9510,7 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -9486,6 +9492,7 @@ static int dev_cpu_dead(unsigned int oldcpu) raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); @@ -176,5 +176,5 @@ index a26d87073f71..f64e9d036915 100644 #ifdef CONFIG_RPS remsd = oldsd->rps_ipi_list; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch b/debian/patches-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch index a43e828dc..45e20e8b5 100644 --- a/debian/patches-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch +++ b/debian/patches-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch @@ -1,8 +1,8 @@ -From bfa59221785f7f02d4c4439aef840c3c84aeb0b1 Mon Sep 17 00:00:00 2001 +From dda6c899e80d750a9cf7bf388cd0283d5a479bc7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 13:59:17 +0200 -Subject: [PATCH 121/290] softirq: Disable softirq stacks for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 121/319] softirq: Disable softirq stacks for RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Disable extra stacks for softirqs. We want to preempt softirqs and having them on special IRQ-stack does not make this easier. @@ -173,5 +173,5 @@ index e74936c7be48..cb2d1384cb0d 100644 #else static inline void do_softirq_own_stack(void) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0122-softirq-Split-softirq-locks.patch b/debian/patches-rt/0122-softirq-Split-softirq-locks.patch index affdd5500..51bd7cf35 100644 --- a/debian/patches-rt/0122-softirq-Split-softirq-locks.patch +++ b/debian/patches-rt/0122-softirq-Split-softirq-locks.patch @@ -1,8 +1,8 @@ -From e530715caf9839d15d550ed5ed57d4d928ca3806 Mon Sep 17 00:00:00 2001 +From d4f1824f0a5dd2cc42903658466ff80436616fc7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Oct 2012 14:20:47 +0100 -Subject: [PATCH 122/290] softirq: Split softirq locks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 122/319] softirq: Split softirq locks +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The 3.x RT series removed the split softirq implementation in favour of pushing softirq processing into the context of the thread which @@ -178,7 +178,7 @@ index 9984f2b75b73..27c3176d88d2 100644 #define in_task() (!(preempt_count() & \ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) diff --git a/include/linux/sched.h b/include/linux/sched.h -index c073a3273beb..baa5fceea0ff 100644 +index f180bfadff33..f4ff928e6be3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1198,6 +1198,8 @@ struct task_struct { @@ -808,10 +808,10 @@ index 1d3a482246cc..fd89f8ab85ac 100644 .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index c217af74dddf..6482945f8ae8 100644 +index 012bc81879bf..2b0ddd50e879 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c -@@ -891,14 +891,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) +@@ -895,14 +895,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { @@ -828,5 +828,5 @@ index c217af74dddf..6482945f8ae8 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch b/debian/patches-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch index 45b10cd16..2d0ece11f 100644 --- a/debian/patches-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch +++ b/debian/patches-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch @@ -1,8 +1,8 @@ -From 506e743f138f50503ba86a10f91b8d6ee2855e69 Mon Sep 17 00:00:00 2001 +From 4e1827d4c8a215eabba854198f63f395e9a86177 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 16 Jun 2017 19:03:16 +0200 -Subject: [PATCH 123/290] net/core: use local_bh_disable() in netif_rx_ni() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 123/319] net/core: use local_bh_disable() in netif_rx_ni() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In 2004 netif_rx_ni() gained a preempt_disable() section around netif_rx() and its do_softirq() + testing for it. The do_softirq() part @@ -19,10 +19,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index f64e9d036915..4926a260bf0e 100644 +index 77a757b363d1..c9e199232f21 100644 --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4541,11 +4541,9 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -4529,11 +4529,9 @@ int netif_rx_ni(struct sk_buff *skb) trace_netif_rx_ni_entry(skb); @@ -37,5 +37,5 @@ index f64e9d036915..4926a260bf0e 100644 return err; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch b/debian/patches-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch index 248e4e5f4..364bc4dac 100644 --- a/debian/patches-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch +++ b/debian/patches-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch @@ -1,9 +1,9 @@ -From 393c57f1d000ec029efc1fcedfbacc5a6195d15c Mon Sep 17 00:00:00 2001 +From 71036023dc35e2af50596bee6375aaae9fb33b45 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 Jan 2012 13:01:27 +0100 -Subject: [PATCH 124/290] genirq: Allow disabling of softirq processing in irq +Subject: [PATCH 124/319] genirq: Allow disabling of softirq processing in irq thread context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The processing of softirqs in irq thread context is a performance gain for the non-rt workloads of a system, but it's counterproductive for @@ -41,7 +41,7 @@ index 6c25b962ba89..99f8b7ace7c9 100644 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) diff --git a/include/linux/irq.h b/include/linux/irq.h -index c9bffda04a45..73d3146db74d 100644 +index 6ecaf056ab63..d2487df7ae89 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -69,6 +69,7 @@ enum irqchip_irq_state; @@ -157,5 +157,5 @@ index fd89f8ab85ac..3e9333d148ad 100644 { return current->flags & PF_IN_SOFTIRQ; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch index fc4a162dd..425688a27 100644 --- a/debian/patches-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch +++ b/debian/patches-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch @@ -1,8 +1,8 @@ -From 74b99df23856c57791eccaa46030824066095f30 Mon Sep 17 00:00:00 2001 +From 62f68fbe67a9142457a30320d7f0515287db58a9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jan 2016 16:34:17 +0100 -Subject: [PATCH 125/290] softirq: split timer softirqs out of ksoftirqd -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 125/319] softirq: split timer softirqs out of ksoftirqd +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with timer wakeup which can not happen in hardirq context. The prio has been @@ -211,5 +211,5 @@ index 3e9333d148ad..fe4e59c80a08 100644 } early_initcall(spawn_ksoftirqd); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch b/debian/patches-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch index 64c3c8c74..fa8181662 100644 --- a/debian/patches-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch +++ b/debian/patches-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch @@ -1,9 +1,9 @@ -From ece86c041648cc912cb0e5f6c921ce25c8ffc506 Mon Sep 17 00:00:00 2001 +From 29faaa554b2303ff65a5a3e720f88a7eaf0460d8 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 18 Feb 2019 13:19:59 +0100 -Subject: [PATCH 126/290] softirq: Avoid "local_softirq_pending" messages if +Subject: [PATCH 126/319] softirq: Avoid "local_softirq_pending" messages if ksoftirqd is blocked -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz If the ksoftirqd thread has a softirq pending and is blocked on the `local_softirq_locks' lock then softirq_check_pending_idle() won't @@ -108,5 +108,5 @@ index fe4e59c80a08..1920985eeb09 100644 if (warnpending) { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch b/debian/patches-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch index cfcbe2339..a4a2ca595 100644 --- a/debian/patches-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch +++ b/debian/patches-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch @@ -1,9 +1,9 @@ -From 7bc071d971f2caab6d8cf678fa51ac0f0f84be57 Mon Sep 17 00:00:00 2001 +From 013ad065472c892496cf826d70802dc88c44d86b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Feb 2019 16:49:29 +0100 -Subject: [PATCH 127/290] softirq: Avoid "local_softirq_pending" messages if +Subject: [PATCH 127/319] softirq: Avoid "local_softirq_pending" messages if task is in cpu_chill() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz If the softirq thread enters cpu_chill() then ->state is UNINTERRUPTIBLE and has no ->pi_blocked_on set and so its mask is not taken into account. @@ -37,5 +37,5 @@ index 1920985eeb09..27a4bb2303d0 100644 *pending &= ~(tsk->softirqs_raised); ret = true; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0128-rtmutex-trylock-is-okay-on-RT.patch b/debian/patches-rt/0128-rtmutex-trylock-is-okay-on-RT.patch index f01e720f7..31fc33e35 100644 --- a/debian/patches-rt/0128-rtmutex-trylock-is-okay-on-RT.patch +++ b/debian/patches-rt/0128-rtmutex-trylock-is-okay-on-RT.patch @@ -1,8 +1,8 @@ -From f883f2d061fa3f01c9c2657223aa1cfb02b9aef3 Mon Sep 17 00:00:00 2001 +From c686d579a73a3b4a061b8b4729ff581efe3da75f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 2 Dec 2015 11:34:07 +0100 -Subject: [PATCH 128/290] rtmutex: trylock is okay on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 128/319] rtmutex: trylock is okay on -RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On -RT we don't run softirqs in IRQ context but in thread context so it is @@ -30,5 +30,5 @@ index 9562aaa2afdc..72abe7c121fa 100644 ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/debian/patches-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch index e5b7e7ca1..5a6767457 100644 --- a/debian/patches-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch +++ b/debian/patches-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch @@ -1,8 +1,8 @@ -From e3f69c0b583258222d76fafb057b054796b9ad31 Mon Sep 17 00:00:00 2001 +From b9742e6bc784e27b49af5a584442a38d89587e2f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 15 Sep 2016 10:51:27 +0200 -Subject: [PATCH 129/290] fs/nfs: turn rmdir_sem into a semaphore -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 129/319] fs/nfs: turn rmdir_sem into a semaphore +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The RW semaphore had a reader side which used the _non_owner version because it most likely took the reader lock in one thread and released it @@ -22,10 +22,10 @@ Signed-off-by: Sebastian Andrzej Siewior 4 files changed, 43 insertions(+), 4 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c -index b8d686087952..17f8a9259971 100644 +index 4ae726e70d87..c60b20884c45 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c -@@ -1815,7 +1815,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) +@@ -1836,7 +1836,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) trace_nfs_rmdir_enter(dir, dentry); if (d_really_is_positive(dentry)) { @@ -37,7 +37,7 @@ index b8d686087952..17f8a9259971 100644 error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { -@@ -1825,7 +1829,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) +@@ -1846,7 +1850,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) case -ENOENT: nfs_dentry_handle_enoent(dentry); } @@ -147,5 +147,5 @@ index a0831e9d19c9..94b6fefd90b0 100644 #if IS_ENABLED(CONFIG_NFS_V4) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch b/debian/patches-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch index d7256bc40..672c58beb 100644 --- a/debian/patches-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch +++ b/debian/patches-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch @@ -1,8 +1,8 @@ -From ad0da5a36ccd4cb2f3a320244327b586e3c435c3 Mon Sep 17 00:00:00 2001 +From ed2e9afc28b9ab6676bee532a91b6f8cd6ac4fd6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 10 Jun 2011 11:04:15 +0200 -Subject: [PATCH 130/290] rtmutex: Handle the various new futex race conditions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 130/319] rtmutex: Handle the various new futex race conditions +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz RT opens a few new interesting race conditions in the rtmutex/futex combo due to futex hash bucket lock being a 'sleeping' spinlock and @@ -251,5 +251,5 @@ index d1d62f942be2..f4b6596d224a 100644 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch b/debian/patches-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch index 58ff0f63f..2ae95da0a 100644 --- a/debian/patches-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch +++ b/debian/patches-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch @@ -1,8 +1,8 @@ -From 7edf91c5189d3b406d53b139fd703354cc43dd16 Mon Sep 17 00:00:00 2001 +From 0cd3c25ec5ed8f44777abcd60b992fd55f5a5dd6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 131/290] futex: Fix bug on when a requeued RT task times out -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 131/319] futex: Fix bug on when a requeued RT task times out +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Requeue with timeout causes a bug with PREEMPT_RT_FULL. @@ -117,5 +117,5 @@ index f4b6596d224a..461527f3f7af 100644 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch index 7731e3e1d..1c4e61269 100644 --- a/debian/patches-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch +++ b/debian/patches-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch @@ -1,9 +1,9 @@ -From 6a2f8ae29dd9f3697e918a18a871efdaa4d919d9 Mon Sep 17 00:00:00 2001 +From 1e391bda4007233d76d225b9e6e4fdd465fab8e4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Mar 2013 11:17:42 +0100 -Subject: [PATCH 132/290] futex: Ensure lock/unlock symetry versus pi_lock and +Subject: [PATCH 132/319] futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In exit_pi_state_list() we have the following locking construct: @@ -46,5 +46,5 @@ index 5ec49f862c53..60be4530c767 100644 continue; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0133-pid.h-include-atomic.h.patch b/debian/patches-rt/0133-pid.h-include-atomic.h.patch index eaef5a792..97770536a 100644 --- a/debian/patches-rt/0133-pid.h-include-atomic.h.patch +++ b/debian/patches-rt/0133-pid.h-include-atomic.h.patch @@ -1,8 +1,8 @@ -From 5e16582aa3ade481ac6b23ffe90016f2d285f18a Mon Sep 17 00:00:00 2001 +From 3a12e37a58075cd73b24e5d10ecfedda0bb55d84 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jul 2015 19:43:56 +0300 -Subject: [PATCH 133/290] pid.h: include atomic.h -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 133/319] pid.h: include atomic.h +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz This patch fixes build error: CC kernel/pid_namespace.o @@ -39,5 +39,5 @@ index 14a9a39da9c7..a9026a5da196 100644 enum pid_type { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0134-arm-include-definition-for-cpumask_t.patch b/debian/patches-rt/0134-arm-include-definition-for-cpumask_t.patch index e9e416999..34d823bc2 100644 --- a/debian/patches-rt/0134-arm-include-definition-for-cpumask_t.patch +++ b/debian/patches-rt/0134-arm-include-definition-for-cpumask_t.patch @@ -1,8 +1,8 @@ -From e104dba071027b30e97bc63e57da11ad26516141 Mon Sep 17 00:00:00 2001 +From f35c6e11a94c7484c2aa30fc64b63b5a11fc8dfb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 22 Dec 2016 17:28:33 +0100 -Subject: [PATCH 134/290] arm: include definition for cpumask_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 134/319] arm: include definition for cpumask_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz This definition gets pulled in by other files. With the (later) split of RCU and spinlock.h it won't compile anymore. @@ -27,5 +27,5 @@ index 46d41140df27..c421b5b81946 100644 struct pt_regs; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch index 5defd741e..b7c8e1f9c 100644 --- a/debian/patches-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch +++ b/debian/patches-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch @@ -1,9 +1,9 @@ -From 002db8d9c11d02194c1e281091b8b577b9f36014 Mon Sep 17 00:00:00 2001 +From a5680695b591e9138b6ddad150a8a375f7dd3dd8 Mon Sep 17 00:00:00 2001 From: "Wolfgang M. Reimer" Date: Tue, 21 Jul 2015 16:20:07 +0200 -Subject: [PATCH 135/290] locking: locktorture: Do NOT include rwlock.h +Subject: [PATCH 135/319] locking: locktorture: Do NOT include rwlock.h directly -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Including rwlock.h directly will cause kernel builds to fail if CONFIG_PREEMPT_RT_FULL is defined. The correct header file @@ -30,5 +30,5 @@ index 7d0b0ed74404..a81e6ef33a04 100644 #include #include -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch b/debian/patches-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch index 6ffdb416f..e966be4bd 100644 --- a/debian/patches-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch +++ b/debian/patches-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch @@ -1,8 +1,8 @@ -From bdcf220d6064da9072193b604f5fdaf9f7cf952f Mon Sep 17 00:00:00 2001 +From 3084b89e004344a1e78fdc772c45479dab746489 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 9 Jun 2011 11:43:52 +0200 -Subject: [PATCH 136/290] rtmutex: Add rtmutex_lock_killable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 136/319] rtmutex: Add rtmutex_lock_killable() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Add "killable" type to rtmutex. We need this since rtmutex are used as "normal" mutexes which do use this type. @@ -56,5 +56,5 @@ index 1c3f56d3d9b6..a4b2af7718f8 100644 * rt_mutex_timed_lock - lock a rt_mutex interruptible * the timeout structure is provided -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0137-rtmutex-Make-lock_killable-work.patch b/debian/patches-rt/0137-rtmutex-Make-lock_killable-work.patch index 2f42342f3..419662032 100644 --- a/debian/patches-rt/0137-rtmutex-Make-lock_killable-work.patch +++ b/debian/patches-rt/0137-rtmutex-Make-lock_killable-work.patch @@ -1,8 +1,8 @@ -From 6320c2a328b759909197f3ff76944b43141bc563 Mon Sep 17 00:00:00 2001 +From 956d097b23e3e2a88975189bf2fc9cb264f048c7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 1 Apr 2017 12:50:59 +0200 -Subject: [PATCH 137/290] rtmutex: Make lock_killable work -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 137/319] rtmutex: Make lock_killable work +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Locking an rt mutex killable does not work because signal handling is restricted to TASK_INTERRUPTIBLE. @@ -47,5 +47,5 @@ index a4b2af7718f8..f058bb976212 100644 raw_spin_unlock_irq(&lock->wait_lock); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0138-spinlock-Split-the-lock-types-header.patch b/debian/patches-rt/0138-spinlock-Split-the-lock-types-header.patch index 1f3d7394a..5da0bf7cb 100644 --- a/debian/patches-rt/0138-spinlock-Split-the-lock-types-header.patch +++ b/debian/patches-rt/0138-spinlock-Split-the-lock-types-header.patch @@ -1,8 +1,8 @@ -From 575aa1d2041b59a87bb62ae844b43779bb1114e6 Mon Sep 17 00:00:00 2001 +From d813c8be52aa936f41b4cdd2ac8c3f32fbac1166 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 19:34:01 +0200 -Subject: [PATCH 138/290] spinlock: Split the lock types header -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 138/319] spinlock: Split the lock types header +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Split raw_spinlock into its own file and the remaining spinlock_t into its own non-RT header. The non-RT header will be replaced later by sleeping @@ -217,5 +217,5 @@ index 000000000000..822bf64a61d3 + +#endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0139-rtmutex-Avoid-include-hell.patch b/debian/patches-rt/0139-rtmutex-Avoid-include-hell.patch index 155690753..cac598643 100644 --- a/debian/patches-rt/0139-rtmutex-Avoid-include-hell.patch +++ b/debian/patches-rt/0139-rtmutex-Avoid-include-hell.patch @@ -1,8 +1,8 @@ -From 66e71d123479ba1870a7b7ab6935f7f982379ca8 Mon Sep 17 00:00:00 2001 +From 31d43fa8c5ffd110ebe5501c6dfa154d6f280979 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 20:06:39 +0200 -Subject: [PATCH 139/290] rtmutex: Avoid include hell -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 139/319] rtmutex: Avoid include hell +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Include only the required raw types. This avoids pulling in the complete spinlock header which in turn requires rtmutex.h at some point. @@ -26,5 +26,5 @@ index 81ece6a8291a..a355289b1fa1 100644 extern int max_lock_depth; /* for sysctl */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0140-rbtree-don-t-include-the-rcu-header.patch b/debian/patches-rt/0140-rbtree-don-t-include-the-rcu-header.patch index e2c3288ae..d0cb51434 100644 --- a/debian/patches-rt/0140-rbtree-don-t-include-the-rcu-header.patch +++ b/debian/patches-rt/0140-rbtree-don-t-include-the-rcu-header.patch @@ -1,11 +1,11 @@ -From a848373d9df624fd8692e4419ae43107453a6ee5 Mon Sep 17 00:00:00 2001 +From ebe0774f3283782a6e2b45ef217624fea787efdd Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 16:56:02 +0100 -Subject: [PATCH 140/290] rbtree: don't include the rcu header +Subject: [PATCH 140/319] rbtree: don't include the rcu header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The RCU header pulls in spinlock.h and fails due not yet defined types: @@ -171,5 +171,5 @@ index 027c58cdbb6e..e6733d7911e9 100644 * rcu_swap_protected() - swap an RCU and a regular pointer * @rcu_ptr: RCU pointer -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/debian/patches-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch index b57b0356f..6cd4afc1c 100644 --- a/debian/patches-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch +++ b/debian/patches-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch @@ -1,8 +1,8 @@ -From 0a9e717c46c47cc03fb028892cea5e36feda7230 Mon Sep 17 00:00:00 2001 +From bd8fa60c2d5fbdb72d940ea494db255342022b7b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 16:14:22 +0200 -Subject: [PATCH 141/290] rtmutex: Provide rt_mutex_slowlock_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 141/319] rtmutex: Provide rt_mutex_slowlock_locked() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt. @@ -141,5 +141,5 @@ index 461527f3f7af..cb9815f0c766 100644 #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch b/debian/patches-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch index 3a1cfbd8a..f3a85c78e 100644 --- a/debian/patches-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch +++ b/debian/patches-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch @@ -1,9 +1,9 @@ -From bee16dd12cbcf6b35c16d789870c4b5bd5e26436 Mon Sep 17 00:00:00 2001 +From 775ff55569e9c312593ff9dab042f609f9eb66f0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 16:36:39 +0200 -Subject: [PATCH 142/290] rtmutex: export lockdep-less version of rt_mutex's +Subject: [PATCH 142/319] rtmutex: export lockdep-less version of rt_mutex's lock, trylock and unlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Required for lock implementation ontop of rtmutex. @@ -149,5 +149,5 @@ index cb9815f0c766..5955ad2aa2a8 100644 struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk, -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0143-rtmutex-add-sleeping-lock-implementation.patch b/debian/patches-rt/0143-rtmutex-add-sleeping-lock-implementation.patch index 85d548166..c1ecce496 100644 --- a/debian/patches-rt/0143-rtmutex-add-sleeping-lock-implementation.patch +++ b/debian/patches-rt/0143-rtmutex-add-sleeping-lock-implementation.patch @@ -1,8 +1,8 @@ -From c156cbabd8b7d07e6ddf840f29799447a47d4758 Mon Sep 17 00:00:00 2001 +From 49d6c96f9a4e33031497734adba2275935eada9a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:11:19 +0200 -Subject: [PATCH 143/290] rtmutex: add sleeping lock implementation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 143/319] rtmutex: add sleeping lock implementation +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -103,7 +103,7 @@ index a355289b1fa1..138bd1e183e0 100644 * rt_mutex_is_locked - is the mutex locked * @lock: the mutex to be queried diff --git a/include/linux/sched.h b/include/linux/sched.h -index baa5fceea0ff..f25c9566ee92 100644 +index f4ff928e6be3..527d04f9163e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -134,6 +134,9 @@ struct task_group; @@ -396,10 +396,10 @@ index 000000000000..3e3d8c5f7a9a + +#endif diff --git a/kernel/fork.c b/kernel/fork.c -index a6f39cbb71c3..7399bff2e08d 100644 +index 29b54a64daf5..ecec0f8bef7e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -890,6 +890,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) +@@ -900,6 +900,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; @@ -1204,5 +1204,5 @@ index 42b42ebf52bc..6a0ccaea2b42 100644 } } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch b/debian/patches-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch index beb16fe26..fa4e70962 100644 --- a/debian/patches-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch @@ -1,8 +1,8 @@ -From 99c5293588e06d70bf0da65917ba3f7ab80dcf30 Mon Sep 17 00:00:00 2001 +From e2b6310d709604f0b4521118eebf64a2109bee5f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:17:03 +0200 -Subject: [PATCH 144/290] rtmutex: add mutex implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 144/319] rtmutex: add mutex implementation based on rtmutex +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -379,5 +379,5 @@ index 000000000000..4f81595c0f52 +} +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch b/debian/patches-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch index 785ea3aaa..55fbe6f5c 100644 --- a/debian/patches-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch @@ -1,8 +1,8 @@ -From 45c0ef033d0844c47d08e2caf85dc92ab5143eb2 Mon Sep 17 00:00:00 2001 +From 4129defdfb91febbac91b10c48acde5ee137c9a7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:28:34 +0200 -Subject: [PATCH 145/290] rtmutex: add rwsem implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 145/319] rtmutex: add rwsem implementation based on rtmutex +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The RT specific R/W semaphore implementation restricts the number of readers to one because a writer cannot block on multiple readers and inherit its @@ -423,5 +423,5 @@ index 000000000000..7d3c5cf3d23d + __up_write_unlock(sem, WRITER_BIAS - 1, flags); +} -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch b/debian/patches-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch index 6c9874039..c90502704 100644 --- a/debian/patches-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch +++ b/debian/patches-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch @@ -1,8 +1,8 @@ -From 18ad344168ec1fcb667f20962bfe16c9a7c3e019 Mon Sep 17 00:00:00 2001 +From 08c9fd49c85c7d0a7e16fab4636e010ab107ad0d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:18:06 +0200 -Subject: [PATCH 146/290] rtmutex: add rwlock implementation based on rtmutex -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 146/319] rtmutex: add rwlock implementation based on rtmutex +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The implementation is bias-based, similar to the rwsem implementation. @@ -578,5 +578,5 @@ index 000000000000..aebb7ce25bc6 +} +EXPORT_SYMBOL(__rt_rwlock_init); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch b/debian/patches-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch index 561f56ac8..af553cda6 100644 --- a/debian/patches-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch +++ b/debian/patches-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch @@ -1,8 +1,8 @@ -From c4a03078339c1b08038e82f98ad3db155663a23c Mon Sep 17 00:00:00 2001 +From 5afbc726a0f6712cdcb25f4f8f7d51b5d70acf40 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 11 Jan 2019 21:16:31 +0100 -Subject: [PATCH 147/290] rtmutex/rwlock: preserve state like a sleeping lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 147/319] rtmutex/rwlock: preserve state like a sleeping lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The rwlock is spinning while acquiring a lock. Therefore it must become a sleeping lock on RT and preserve its task state while sleeping and @@ -29,5 +29,5 @@ index aebb7ce25bc6..8f90afe111ce 100644 /* * The slowlock() above is guaranteed to return with the rtmutex is -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0148-rtmutex-wire-up-RT-s-locking.patch b/debian/patches-rt/0148-rtmutex-wire-up-RT-s-locking.patch index a5cbf01b2..8c7a8925b 100644 --- a/debian/patches-rt/0148-rtmutex-wire-up-RT-s-locking.patch +++ b/debian/patches-rt/0148-rtmutex-wire-up-RT-s-locking.patch @@ -1,8 +1,8 @@ -From 9e87c4922c3ae6f3e073a0ecb1dc4b0665c4b4be Mon Sep 17 00:00:00 2001 +From 39c84b2a950c19cdc5f92a2e3cf5ad888c385e35 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:31:14 +0200 -Subject: [PATCH 148/290] rtmutex: wire up RT's locking -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 148/319] rtmutex: wire up RT's locking +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior @@ -233,7 +233,7 @@ index 936f3d14dd6b..e89b70f474af 100644 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c -index 9aa0fccd5d43..76d0b40d9193 100644 +index 03595c29c566..d63df281b495 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, @@ -267,5 +267,5 @@ index 9aa0fccd5d43..76d0b40d9193 100644 + +#endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/debian/patches-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch index 6daa0e9d1..9d9f250e9 100644 --- a/debian/patches-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch +++ b/debian/patches-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch @@ -1,8 +1,8 @@ -From 64764668902a060fa582a410b499d57d0777b24c Mon Sep 17 00:00:00 2001 +From ce20b77d7a96117cc50cf64c753ad122b284615c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 12 Oct 2017 17:34:38 +0200 -Subject: [PATCH 149/290] rtmutex: add ww_mutex addon for mutex-rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 149/319] rtmutex: add ww_mutex addon for mutex-rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Signed-off-by: Sebastian Andrzej Siewior --- @@ -441,5 +441,5 @@ index 7d3c5cf3d23d..660e22caf709 100644 * The slowlock() above is guaranteed to return with the rtmutex (for * ret = 0) is now held, so there can't be a writer active. Increment -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch b/debian/patches-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch index a783095ce..5dab60b9e 100644 --- a/debian/patches-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch +++ b/debian/patches-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch @@ -1,8 +1,8 @@ -From 3df2347f3344f0d953425b31f1bf75d408a4b872 Mon Sep 17 00:00:00 2001 +From 64941e7799938ddb759928a74bd4e40f5eee2443 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 14:58:57 +0200 -Subject: [PATCH 150/290] kconfig: Add PREEMPT_RT_FULL -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 150/319] kconfig: Add PREEMPT_RT_FULL +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Introduce the final symbol for PREEMPT_RT_FULL. @@ -65,5 +65,5 @@ index 87f1fc9801d7..f67b15236936 100755 # Truncate to maximum length -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/debian/patches-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch index a6a574a12..87db2e889 100644 --- a/debian/patches-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch +++ b/debian/patches-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch @@ -1,9 +1,9 @@ -From d195cae60a247824d8be07574d6fcc425b35d074 Mon Sep 17 00:00:00 2001 +From e56a3ddda95173ad3acab9e9d3880dfcf5288934 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 13 Nov 2017 12:56:53 -0500 -Subject: [PATCH 151/290] locking/rt-mutex: fix deadlock in device mapper / +Subject: [PATCH 151/319] locking/rt-mutex: fix deadlock in device mapper / block-IO -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz When some block device driver creates a bio and submits it to another block device driver, the bio is added to current->bio_list (in order to @@ -77,5 +77,5 @@ index 1f2dc2dfe2e7..b38c3a92dce8 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch b/debian/patches-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch index c0c0d2afd..3e443e999 100644 --- a/debian/patches-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch +++ b/debian/patches-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch @@ -1,8 +1,8 @@ -From d501d64d4060f7957c5e2d38cb3b200cf3558c49 Mon Sep 17 00:00:00 2001 +From 86cac26a702227800c53ab6583cd396151de9b8e Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 4 Jan 2019 15:33:21 -0500 -Subject: [PATCH 152/290] locking/rt-mutex: Flush block plug on __down_read() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 152/319] locking/rt-mutex: Flush block plug on __down_read() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz __down_read() bypasses the rtmutex frontend to call rt_mutex_slowlock_locked() directly, and thus it needs to call @@ -42,5 +42,5 @@ index 660e22caf709..f518495bd6cc 100644 might_sleep(); raw_spin_lock_irq(&m->wait_lock); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch b/debian/patches-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch index f790e4fcc..a753fd1c2 100644 --- a/debian/patches-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch +++ b/debian/patches-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch @@ -1,9 +1,9 @@ -From d0199e56eaa78d4c3026051cb0281d3a810d1ae3 Mon Sep 17 00:00:00 2001 +From 03e2d66f2ae87a596892829b69b92763f74f4c04 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 16 Nov 2017 16:48:48 +0100 -Subject: [PATCH 153/290] locking/rtmutex: re-init the wait_lock in +Subject: [PATCH 153/319] locking/rtmutex: re-init the wait_lock in rt_mutex_init_proxy_locked() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz We could provide a key-class for the lockdep (and fixup all callers) or move the init to all callers (like it was) in order to avoid lockdep @@ -35,5 +35,5 @@ index b38c3a92dce8..94788662b2f2 100644 rt_mutex_set_owner(lock, proxy_owner); } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch index 012c74b01..73fb53e19 100644 --- a/debian/patches-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ b/debian/patches-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -1,8 +1,8 @@ -From e7e712a633a103d1d141dba845f847dcf20a9f47 Mon Sep 17 00:00:00 2001 +From 8d74d6f05755783b08058cf62506b794116f4d92 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 18:21:04 +0200 -Subject: [PATCH 154/290] ptrace: fix ptrace vs tasklist_lock race -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 154/319] ptrace: fix ptrace vs tasklist_lock race +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz As explained by Alexander Fyodorov : @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior 3 files changed, 68 insertions(+), 7 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index f25c9566ee92..32a9ca5dff59 100644 +index 527d04f9163e..dd47fd913997 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -101,12 +101,8 @@ struct task_group; @@ -100,7 +100,7 @@ index f25c9566ee92..32a9ca5dff59 100644 * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index fed682a01a75..ace2839323de 100644 +index b93eb4eaf7ac..a38b304fb9fd 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -174,7 +174,14 @@ static bool ptrace_freeze_traced(struct task_struct *task) @@ -162,5 +162,5 @@ index 6a0ccaea2b42..6bab30347081 100644 task_rq_unlock(rq, p, &rf); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0155-rtmutex-annotate-sleeping-lock-context.patch b/debian/patches-rt/0155-rtmutex-annotate-sleeping-lock-context.patch index 12b3545fd..d9990f3f4 100644 --- a/debian/patches-rt/0155-rtmutex-annotate-sleeping-lock-context.patch +++ b/debian/patches-rt/0155-rtmutex-annotate-sleeping-lock-context.patch @@ -1,8 +1,8 @@ -From 73e797275adecb403374a97cd6d6737f6a6f8671 Mon Sep 17 00:00:00 2001 +From ac8b43984252b61d87433b2664f1225135a6ae51 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 21 Sep 2017 14:25:13 +0200 -Subject: [PATCH 155/290] rtmutex: annotate sleeping lock context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 155/319] rtmutex: annotate sleeping lock context +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The RCU code complains on schedule() within a rcu_readlock() section. The valid scenario on -RT is if a sleeping is held. In order to suppress @@ -54,7 +54,7 @@ index 27c3176d88d2..9eafc34898b4 100644 #define migrate_disable() barrier() #define migrate_enable() barrier() diff --git a/include/linux/sched.h b/include/linux/sched.h -index 32a9ca5dff59..c5f050afff18 100644 +index dd47fd913997..682cf84a7e1b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -673,6 +673,15 @@ struct task_struct { @@ -231,7 +231,7 @@ index 8f90afe111ce..c3b91205161c 100644 EXPORT_SYMBOL(rt_write_unlock); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h -index a97c20ea9bce..564e3927e7b0 100644 +index 5f6de49dc78e..35f3552b7463 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -337,9 +337,13 @@ static void rcu_preempt_note_context_switch(bool preempt) @@ -304,5 +304,5 @@ index 6bab30347081..d49580cb0eb2 100644 +EXPORT_SYMBOL(migrate_enable); #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch b/debian/patches-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch index a3748cf9b..0d39fa1aa 100644 --- a/debian/patches-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch +++ b/debian/patches-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch @@ -1,9 +1,9 @@ -From 65a47d500d54261e050d9f73a554a1ad527ea6d7 Mon Sep 17 00:00:00 2001 +From e2610a2a0fb6fb2138336d411c65ec23f1c963e9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 5 Jul 2018 14:44:51 +0200 -Subject: [PATCH 156/290] sched/migrate_disable: fallback to preempt_disable() +Subject: [PATCH 156/319] sched/migrate_disable: fallback to preempt_disable() instead barrier() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On SMP + !RT migrate_disable() is still around. It is not part of spin_lock() anymore so it has almost no users. However the futex code has a workaround for @@ -71,7 +71,7 @@ index 9eafc34898b4..ed8413e7140f 100644 { return 0; diff --git a/include/linux/sched.h b/include/linux/sched.h -index c5f050afff18..53d92153700b 100644 +index 682cf84a7e1b..a38a2c2a8fe4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -667,7 +667,7 @@ struct task_struct { @@ -200,5 +200,5 @@ index 5027158d3908..dd6c364d6f01 100644 #endif P(nr_cpus_allowed); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/debian/patches-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch index 77748c8ef..1df15490f 100644 --- a/debian/patches-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch +++ b/debian/patches-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch @@ -1,9 +1,9 @@ -From 5b55069e90dd13bbd2376868c9ae8dab6a2e72d1 Mon Sep 17 00:00:00 2001 +From 5b9c8f815e46c5ac20c8663a7e01e2a546e11037 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 4 Aug 2017 17:40:42 +0200 -Subject: [PATCH 157/290] locking: don't check for __LINUX_SPINLOCK_TYPES_H on +Subject: [PATCH 157/319] locking: don't check for __LINUX_SPINLOCK_TYPES_H on -RT archs -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Upstream uses arch_spinlock_t within spinlock_t and requests that spinlock_types.h header file is included first. @@ -178,5 +178,5 @@ index c09b6407ae1b..b0243ba07fb7 100644 * include/linux/spinlock_types_up.h - spinlock type definitions for UP * -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0158-rcu-Frob-softirq-test.patch b/debian/patches-rt/0158-rcu-Frob-softirq-test.patch index c586f96ec..b8058dcf4 100644 --- a/debian/patches-rt/0158-rcu-Frob-softirq-test.patch +++ b/debian/patches-rt/0158-rcu-Frob-softirq-test.patch @@ -1,8 +1,8 @@ -From 9baac22c59df91638ab1dacfebd3290c5bfd73ca Mon Sep 17 00:00:00 2001 +From f85218846b07974817abd1dd361a01207911133a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 13 Aug 2011 00:23:17 +0200 -Subject: [PATCH 158/290] rcu: Frob softirq test -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 158/319] rcu: Frob softirq test +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz With RT_FULL we get the below wreckage: @@ -156,7 +156,7 @@ Signed-off-by: Peter Zijlstra 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h -index 564e3927e7b0..429a2f144e19 100644 +index 35f3552b7463..e9ce51e19e87 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -524,7 +524,7 @@ static void rcu_read_unlock_special(struct task_struct *t) @@ -169,5 +169,5 @@ index 564e3927e7b0..429a2f144e19 100644 "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch b/debian/patches-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch index 11e05a9c4..056ed58b4 100644 --- a/debian/patches-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch +++ b/debian/patches-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch @@ -1,8 +1,8 @@ -From 901bc5821a73d1fc6bbfe65cf07194fd04845366 Mon Sep 17 00:00:00 2001 +From efa94839087590d84d45a6c77b1ba0714f8a8a8f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Oct 2011 11:59:38 -0700 -Subject: [PATCH 159/290] rcu: Merge RCU-bh into RCU-preempt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 159/319] rcu: Merge RCU-bh into RCU-preempt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The Linux kernel has long RCU-bh read-side critical sections that intolerably increase scheduling latency under mainline's RCU-bh rules, @@ -347,5 +347,5 @@ index 81688a133552..6ffafb1b1584 100644 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch b/debian/patches-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch index 737da3cc1..03ef518de 100644 --- a/debian/patches-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch +++ b/debian/patches-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch @@ -1,8 +1,8 @@ -From 8a484b91ae77a0557e92b9e07f3fe82b7ff735cb Mon Sep 17 00:00:00 2001 +From e2468ac4518753aa765f669dc553126ab5ed8372 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 5 Oct 2011 11:45:18 -0700 -Subject: [PATCH 160/290] rcu: Make ksoftirqd do RCU quiescent states -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 160/319] rcu: Make ksoftirqd do RCU quiescent states +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable to network-based denial-of-service attacks. This patch therefore @@ -65,7 +65,7 @@ index 1456a3d97971..1a40e3d44cb8 100644 /* diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h -index 429a2f144e19..bee9bffeb0ce 100644 +index e9ce51e19e87..938e64c69d18 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -29,6 +29,7 @@ @@ -76,7 +76,7 @@ index 429a2f144e19..bee9bffeb0ce 100644 #include #include #include "../time/tick-internal.h" -@@ -1407,7 +1408,7 @@ static void rcu_prepare_kthreads(int cpu) +@@ -1408,7 +1409,7 @@ static void rcu_prepare_kthreads(int cpu) #endif /* #else #ifdef CONFIG_RCU_BOOST */ @@ -85,7 +85,7 @@ index 429a2f144e19..bee9bffeb0ce 100644 /* * Check to see if any future RCU-related work will need to be done -@@ -1423,7 +1424,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) +@@ -1424,7 +1425,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) *nextevt = KTIME_MAX; return rcu_cpu_has_callbacks(NULL); } @@ -95,7 +95,7 @@ index 429a2f144e19..bee9bffeb0ce 100644 /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up * after it. -@@ -1520,6 +1523,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) +@@ -1521,6 +1524,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) return cbs_ready; } @@ -104,7 +104,7 @@ index 429a2f144e19..bee9bffeb0ce 100644 /* * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready * to invoke. If the CPU has callbacks, try to advance them. Tell the -@@ -1562,6 +1567,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) +@@ -1563,6 +1568,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) *nextevt = basemono + dj * TICK_NSEC; return 0; } @@ -113,5 +113,5 @@ index 429a2f144e19..bee9bffeb0ce 100644 /* * Prepare a CPU for idle from an RCU perspective. The first major task -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch index 9d396e939..470a0cf74 100644 --- a/debian/patches-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch +++ b/debian/patches-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch @@ -1,8 +1,8 @@ -From 5c4e32e88d430654b1f529185331235d5b745a0d Mon Sep 17 00:00:00 2001 +From 47dd0d466654b4e15710c38340022485c2f5cc15 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 4 Nov 2013 13:21:10 -0800 -Subject: [PATCH 161/290] rcu: Eliminate softirq processing from rcutree -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 161/319] rcu: Eliminate softirq processing from rcutree +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Running RCU out of softirq is a problem for some workloads that would like to manage RCU core processing independently of other softirq work, @@ -211,7 +211,7 @@ index fbbff7c21148..98257d20feb2 100644 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h -index bee9bffeb0ce..2e8737f1010f 100644 +index 938e64c69d18..56639c8ed550 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -24,42 +24,16 @@ @@ -257,7 +257,7 @@ index bee9bffeb0ce..2e8737f1010f 100644 #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ -@@ -1027,18 +1001,21 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) +@@ -1028,18 +1002,21 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ @@ -287,7 +287,7 @@ index bee9bffeb0ce..2e8737f1010f 100644 /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the -@@ -1176,23 +1153,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) +@@ -1177,23 +1154,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) } } @@ -311,7 +311,7 @@ index bee9bffeb0ce..2e8737f1010f 100644 /* * Is the current CPU running the RCU-callbacks kthread? * Caller must have preemption disabled. -@@ -1247,67 +1207,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, +@@ -1248,67 +1208,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, return 0; } @@ -379,7 +379,7 @@ index bee9bffeb0ce..2e8737f1010f 100644 /* * Set the per-rcu_node kthread's affinity to cover all CPUs that are * served by the rcu_node in question. The CPU hotplug lock is still -@@ -1338,26 +1237,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) +@@ -1339,26 +1238,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) free_cpumask_var(cm); } @@ -406,7 +406,7 @@ index bee9bffeb0ce..2e8737f1010f 100644 rcu_for_each_leaf_node(rcu_state_p, rnp) (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); } -@@ -1380,11 +1265,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) +@@ -1381,11 +1266,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } @@ -419,5 +419,5 @@ index bee9bffeb0ce..2e8737f1010f 100644 { return false; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0162-srcu-use-cpu_online-instead-custom-check.patch b/debian/patches-rt/0162-srcu-use-cpu_online-instead-custom-check.patch index ef0b38af2..d092f5e6d 100644 --- a/debian/patches-rt/0162-srcu-use-cpu_online-instead-custom-check.patch +++ b/debian/patches-rt/0162-srcu-use-cpu_online-instead-custom-check.patch @@ -1,8 +1,8 @@ -From 68c6703bb92019392658036dcf0a667b1388a3b0 Mon Sep 17 00:00:00 2001 +From 799124bea9ca6de248f6295282fb5b6f925db812 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 14:43:41 +0200 -Subject: [PATCH 162/290] srcu: use cpu_online() instead custom check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 162/319] srcu: use cpu_online() instead custom check +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The current check via srcu_online is slightly racy because after looking at srcu_online there could be an interrupt that interrupted us long @@ -92,5 +92,5 @@ index ae716ca783bc..f162a4f54b05 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch b/debian/patches-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch index 1e77fc316..663efc91f 100644 --- a/debian/patches-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch +++ b/debian/patches-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch @@ -1,8 +1,8 @@ -From 5bc56052951df498807fec250c48debf7cc3a55b Mon Sep 17 00:00:00 2001 +From 27c41d1644ba6495be3b4ed8c7e466cf309116f4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 12 Oct 2017 18:37:12 +0200 -Subject: [PATCH 163/290] srcu: replace local_irqsave() with a locallock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 163/319] srcu: replace local_irqsave() with a locallock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz There are two instances which disable interrupts in order to become a stable this_cpu_ptr() pointer. The restore part is coupled with @@ -73,5 +73,5 @@ index df0375453ba1..0f09a1a9e17c 100644 srcu_funnel_gp_start(sp, sdp, s, do_norm); else if (needexp) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/debian/patches-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch index 8557735a2..790f0ba6c 100644 --- a/debian/patches-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch +++ b/debian/patches-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch @@ -1,8 +1,8 @@ -From 72d24a41a24c422c75947391b6fb8419bce49da3 Mon Sep 17 00:00:00 2001 +From 22f6bb02afdf2907e79656e8aeff7941d69688a3 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Wed, 12 Oct 2016 11:21:14 -0500 -Subject: [PATCH 164/290] rcu: enable rcu_normal_after_boot by default for RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 164/319] rcu: enable rcu_normal_after_boot by default for RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The forcing of an expedited grace period is an expensive and very RT-application unfriendly operation, as it forcibly preempts all running @@ -33,5 +33,5 @@ index 6ffafb1b1584..16d8dba23329 100644 #endif /* #ifndef CONFIG_TINY_RCU */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch b/debian/patches-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch index c7cc4a53f..f372af8fb 100644 --- a/debian/patches-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch +++ b/debian/patches-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch @@ -1,8 +1,8 @@ -From d23352f4c6e06f5629a0d48714ac071a78b7669d Mon Sep 17 00:00:00 2001 +From 5b341a3d891d21f25f350ff8eb30ba2e80de4ed4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 28 Jul 2011 13:32:57 +0200 -Subject: [PATCH 165/290] tty/serial/omap: Make the locking RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 165/319] tty/serial/omap: Make the locking RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The lock is a sleeping lock and local_irq_save() is not the optimsation we are looking for. Redo it to make it work on -RT and @@ -45,5 +45,5 @@ index 6420ae581a80..0f4f41ed9ffa 100644 static int __init -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch b/debian/patches-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch index b305e2394..5e9e669e3 100644 --- a/debian/patches-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch +++ b/debian/patches-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch @@ -1,8 +1,8 @@ -From e98f40187d6c4ce468ae727682ccccff520ad931 Mon Sep 17 00:00:00 2001 +From f4135db75c4faa91667301a294948874aafce06b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 8 Jan 2013 21:36:51 +0100 -Subject: [PATCH 166/290] tty/serial/pl011: Make the locking work on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 166/319] tty/serial/pl011: Make the locking work on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The lock is a sleeping lock and local_irq_save() is not the optimsation we are looking for. Redo it to make it work on -RT and non-RT. @@ -50,5 +50,5 @@ index af21122dfade..183e8b731d6a 100644 clk_disable(uap->clk); } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch b/debian/patches-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch index f1d617046..502d0aaa7 100644 --- a/debian/patches-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch +++ b/debian/patches-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch @@ -1,12 +1,12 @@ -From af3d56abb15953cb66d7ab6ec70e7289699c06b3 Mon Sep 17 00:00:00 2001 +From 66efa8efe10d5249b886ba981b6a0225f5c92598 Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Mon, 24 Sep 2018 10:29:01 +0200 -Subject: [PATCH 167/290] tty: serial: pl011: explicitly initialize the flags +Subject: [PATCH 167/319] tty: serial: pl011: explicitly initialize the flags variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Silence the following gcc warning: @@ -41,5 +41,5 @@ index 183e8b731d6a..2cc6b24bc88d 100644 clk_enable(uap->clk); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch b/debian/patches-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch index 625b36a12..66dd67805 100644 --- a/debian/patches-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch +++ b/debian/patches-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch @@ -1,11 +1,11 @@ -From 8a62565dce3fc35c4f8409e5b790505c56d7488a Mon Sep 17 00:00:00 2001 +From 41cfb37745af85f9b8e385a0d1b79148e599089c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 14 Dec 2011 13:05:54 +0100 -Subject: [PATCH 168/290] rt: Improve the serial console PASS_LIMIT +Subject: [PATCH 168/319] rt: Improve the serial console PASS_LIMIT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Beyond the warning: @@ -43,5 +43,5 @@ index 69aaee5d7fe1..cf88317a95fc 100644 #include /* -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/debian/patches-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch index 0632ac7f7..3f24b7906 100644 --- a/debian/patches-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch +++ b/debian/patches-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch @@ -1,8 +1,8 @@ -From cfdff0a4d70b2b13d7088037e29e4795d031a127 Mon Sep 17 00:00:00 2001 +From 8ed21eef043e64b2df5e6fc12408c07e22278278 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Apr 2016 16:55:02 +0200 -Subject: [PATCH 169/290] tty: serial: 8250: don't take the trylock during oops -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 169/319] tty: serial: 8250: don't take the trylock during oops +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz An oops with irqs off (panic() from irqsafe hrtimer like the watchdog timer) will lead to a lockdep warning on each invocation and as such @@ -31,5 +31,5 @@ index aa4de6907f77..6b1d46c1df3b 100644 spin_lock_irqsave(&port->lock, flags); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch b/debian/patches-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch index f671ed1e3..081db79db 100644 --- a/debian/patches-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch +++ b/debian/patches-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch @@ -1,8 +1,8 @@ -From 5e95b56124341d5c51f1b53b804367a02c4b706b Mon Sep 17 00:00:00 2001 +From eca9904f0437bf12bcf97c50157a8609c76e0c2d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Nov 2016 16:29:32 +0100 -Subject: [PATCH 170/290] locking/percpu-rwsem: Remove preempt_disable variants -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 170/319] locking/percpu-rwsem: Remove preempt_disable variants +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Effective revert commit: @@ -222,5 +222,5 @@ index 79b99d653e03..fb44e237316d 100644 extern void percpu_up_write(struct percpu_rw_semaphore *); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch b/debian/patches-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch index ce8ee35e3..d066126ac 100644 --- a/debian/patches-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch +++ b/debian/patches-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch @@ -1,9 +1,9 @@ -From f321beb42f46e0be7912e2d8cc67f922477c4787 Mon Sep 17 00:00:00 2001 +From 0c558dcf23a7c38a5b708271c1a5f27a46dc431b Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Tue, 15 May 2012 13:53:56 +0800 -Subject: [PATCH 171/290] mm: Protect activate_mm() by +Subject: [PATCH 171/319] mm: Protect activate_mm() by preempt_[disable&enable]_rt() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz User preempt_*_rt instead of local_irq_*_rt or otherwise there will be warning on ARM like below: @@ -77,5 +77,5 @@ index 3e612ae748e9..d0ccc070979f 100644 #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch b/debian/patches-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch index 4b2f06386..c994c9f70 100644 --- a/debian/patches-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch +++ b/debian/patches-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch @@ -1,9 +1,9 @@ -From 6657c7f6ea61fd3fea378532fcae38776afc9fc1 Mon Sep 17 00:00:00 2001 +From 3e9ea8f396df9728ed7114c51326a58498a21490 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Sep 2017 12:32:34 +0200 -Subject: [PATCH 172/290] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD +Subject: [PATCH 172/319] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD init -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Commit 3d375d78593c ("mm: update callers to use HASH_ZERO flag") removed INIT_HLIST_BL_HEAD and uses the ZERO flag instead for the init. However @@ -57,5 +57,5 @@ index 6e0022326afe..10225a9135fb 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/debian/patches-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch index 458f1116b..26ecd0a0d 100644 --- a/debian/patches-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch +++ b/debian/patches-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch @@ -1,9 +1,9 @@ -From f7fc6eb795b824aba9a02869fb2f399bb51a880c Mon Sep 17 00:00:00 2001 +From f127aa0f4e6708ea9a7a25d0b6a7948476ec8628 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 20 Oct 2017 11:29:53 +0200 -Subject: [PATCH 173/290] fs/dcache: disable preemption on i_dir_seq's write +Subject: [PATCH 173/319] fs/dcache: disable preemption on i_dir_seq's write side -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz i_dir_seq is an opencoded seqcounter. Based on the code it looks like we could have two writers in parallel despite the fact that the d_lock is @@ -69,7 +69,7 @@ index 10225a9135fb..dcde8ffe384c 100644 rcu_read_unlock(); goto retry; diff --git a/fs/inode.c b/fs/inode.c -index 5c63693326bb..c3e17dcbb558 100644 +index 9c50521c9fe4..40114e8b6c7b 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -155,7 +155,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) @@ -95,5 +95,5 @@ index 92420009b9bc..9b2b707e9112 100644 __u32 i_generation; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch b/debian/patches-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch index 1a7ab171d..4901e856b 100644 --- a/debian/patches-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch +++ b/debian/patches-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch @@ -1,9 +1,9 @@ -From cd5771603fe1942769706821fac18040226fec7c Mon Sep 17 00:00:00 2001 +From 4f4518ed98107afff0e736ec18ef34cbcaf3fc63 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Mon, 7 May 2018 08:58:57 -0500 -Subject: [PATCH 174/290] squashfs: make use of local lock in multi_cpu +Subject: [PATCH 174/319] squashfs: make use of local lock in multi_cpu decompressor -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Currently, the squashfs multi_cpu decompressor makes use of get_cpu_ptr()/put_cpu_ptr(), which unconditionally disable preemption @@ -68,5 +68,5 @@ index 23a9c28ad8ea..6a73c4fa88e7 100644 if (res < 0) ERROR("%s decompression failed, data probably corrupt\n", -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch index e1ba3e5a4..4973395c7 100644 --- a/debian/patches-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch +++ b/debian/patches-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch @@ -1,8 +1,8 @@ -From 902a47642519ca8ebec20cd12baa43b7985d5d5e Mon Sep 17 00:00:00 2001 +From abe00f50ce5d7caf0101ac496ee633e60fbb9cdf Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Tue, 17 Feb 2015 09:37:44 +0100 -Subject: [PATCH 175/290] thermal: Defer thermal wakups to threads -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 175/319] thermal: Defer thermal wakups to threads +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will call schedule while we run in irq context. @@ -133,5 +133,5 @@ index 1ef937d799e4..a5991cbb408f 100644 module_exit(pkg_temp_thermal_exit) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch b/debian/patches-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch index cf39007a5..b9cf78edc 100644 --- a/debian/patches-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch +++ b/debian/patches-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch @@ -1,8 +1,8 @@ -From b2c28ae019437b1f5b43bb57a2898cfb0c03397c Mon Sep 17 00:00:00 2001 +From fc0eb24ab46ff0b84d3ce15fe081bdc42b5c2854 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 15:10:33 +0100 -Subject: [PATCH 176/290] x86/fpu: Disable preemption around local_bh_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 176/319] x86/fpu: Disable preemption around local_bh_disable() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz __fpu__restore_sig() restores the content of the FPU state in the CPUs and in order to avoid concurency it disbles BH. On !RT it also disables @@ -34,5 +34,5 @@ index d99a8ee9e185..5e0274a94133 100644 return err; } else { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch b/debian/patches-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch index 51c65d9fe..e188b9c91 100644 --- a/debian/patches-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch +++ b/debian/patches-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch @@ -1,8 +1,8 @@ -From 5916e5218923335c20252ab8309a16d071a54a80 Mon Sep 17 00:00:00 2001 +From 4c460987a7977a9f54c174eac82230d282d9ba78 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Jul 2011 16:35:35 +0200 -Subject: [PATCH 177/290] fs/epoll: Do not disable preemption on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 177/319] fs/epoll: Do not disable preemption on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz ep_call_nested() takes a sleeping lock so we can't disable preemption. The light version is enough since ep_call_nested() doesn't mind beeing @@ -33,5 +33,5 @@ index 58f48ea0db23..a41120a34e6d 100644 #else -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch b/debian/patches-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch index 15c9bf813..1bb131b0e 100644 --- a/debian/patches-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch +++ b/debian/patches-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch @@ -1,9 +1,9 @@ -From 4b23a47feeb6d6b93de283f0502e0ba095173d0d Mon Sep 17 00:00:00 2001 +From 92026444f278a09f16c6c29598ae56bc62653358 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 11:39:36 +0200 -Subject: [PATCH 178/290] mm/vmalloc: Another preempt disable region which +Subject: [PATCH 178/319] mm/vmalloc: Another preempt disable region which sucks -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Avoid the preempt disable version of get_cpu_var(). The inner-lock should provide enough serialisation. @@ -69,5 +69,5 @@ index d8e877365f9f..9b7cf993cada 100644 /* Allocate new block if nothing was found */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0179-block-mq-use-cpu_light.patch b/debian/patches-rt/0179-block-mq-use-cpu_light.patch index be41475d8..f3e652438 100644 --- a/debian/patches-rt/0179-block-mq-use-cpu_light.patch +++ b/debian/patches-rt/0179-block-mq-use-cpu_light.patch @@ -1,8 +1,8 @@ -From 687c233b2f57dfd618d45830dc9af37e3147e81b Mon Sep 17 00:00:00 2001 +From 75e61163121da99d7139f346674c9e5b0f340d35 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 9 Apr 2014 10:37:23 +0200 -Subject: [PATCH 179/290] block: mq: use cpu_light() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 179/319] block: mq: use cpu_light() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz there is a might sleep splat because get_cpu() disables preemption and later we grab a lock. As a workaround for this we use get_cpu_light(). @@ -32,5 +32,5 @@ index 5ad9251627f8..5a96c97991b6 100644 struct blk_mq_alloc_data { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0180-block-mq-do-not-invoke-preempt_disable.patch b/debian/patches-rt/0180-block-mq-do-not-invoke-preempt_disable.patch index 8ae711184..b254bd776 100644 --- a/debian/patches-rt/0180-block-mq-do-not-invoke-preempt_disable.patch +++ b/debian/patches-rt/0180-block-mq-do-not-invoke-preempt_disable.patch @@ -1,8 +1,8 @@ -From 4038b2ec5798b7386eb67a895fda6b5c4febfc36 Mon Sep 17 00:00:00 2001 +From d56abf7a0fea61d827e0de458b0e0af80655873d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 180/290] block/mq: do not invoke preempt_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 180/319] block/mq: do not invoke preempt_disable() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz preempt_disable() and get_cpu() don't play well together with the sleeping locks it tries to allocate later. @@ -54,5 +54,5 @@ index 4aa3284874f6..376fb90de054 100644 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch index 005a3c011..904aab651 100644 --- a/debian/patches-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch +++ b/debian/patches-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch @@ -1,8 +1,8 @@ -From 1e4579941a4ae775b14ca02ef717a5bea1608853 Mon Sep 17 00:00:00 2001 +From a5c48a726c23c81ad7510537e97d40f32f00cd75 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Jan 2015 15:10:08 +0100 -Subject: [PATCH 181/290] block/mq: don't complete requests via IPI -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 181/319] block/mq: don't complete requests via IPI +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The IPI runs in hardirq context and there are sleeping locks. This patch moves the completion into a workqueue. @@ -101,7 +101,7 @@ index 2885dce1ad49..8dbb9ecf9993 100644 void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index d51e10f50e75..f1960add94df 100644 +index 6e67aeb56928..111ab4209797 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -149,6 +149,9 @@ enum mq_rq_state { @@ -115,5 +115,5 @@ index d51e10f50e75..f1960add94df 100644 int cpu; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch b/debian/patches-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch index 03b4d9e28..01d5e97eb 100644 --- a/debian/patches-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch +++ b/debian/patches-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch @@ -1,8 +1,8 @@ -From ea655a0576aff0709c19ff2f8634ac9cb783f0ad Mon Sep 17 00:00:00 2001 +From a2f3a7844fe2a2602eadb79ac50141113e94f19e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 6 Apr 2010 16:51:31 +0200 -Subject: [PATCH 182/290] md: raid5: Make raid5_percpu handling RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 182/319] md: raid5: Make raid5_percpu handling RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz __raid_run_ops() disables preemption with get_cpu() around the access to the raid5_percpu variables. That causes scheduling while atomic @@ -73,5 +73,5 @@ index 8474c224127b..a3bf907ab2af 100644 struct flex_array *scribble; /* space for constructing buffer * lists and performing address -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0183-rt-Introduce-cpu_chill.patch b/debian/patches-rt/0183-rt-Introduce-cpu_chill.patch index a2a646f47..70f29d575 100644 --- a/debian/patches-rt/0183-rt-Introduce-cpu_chill.patch +++ b/debian/patches-rt/0183-rt-Introduce-cpu_chill.patch @@ -1,8 +1,8 @@ -From c021d3e1d9574028eba2cdd5b6c776e9020a0d9d Mon Sep 17 00:00:00 2001 +From ba70887d86b2aadc1a78d2c649b6dcc2db9ba9e4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 20:51:03 +0100 -Subject: [PATCH 183/290] rt: Introduce cpu_chill() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 183/319] rt: Introduce cpu_chill() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Retry loops on RT might loop forever when the modifying side was preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() @@ -109,5 +109,5 @@ index b800efb64238..98a771065818 100644 * Functions related to boot-time initialization: */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch b/debian/patches-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch index 278c3ad22..2c6bdbeb8 100644 --- a/debian/patches-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch +++ b/debian/patches-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch @@ -1,8 +1,8 @@ -From d81a1f846aac209ee8c5c7ccbc613324abeba9c4 Mon Sep 17 00:00:00 2001 +From 2adab83a434fa5f5bba59cfb60e4da7da2910062 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Feb 2019 16:59:15 +0100 -Subject: [PATCH 184/290] hrtimer: Don't lose state in cpu_chill() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 184/319] hrtimer: Don't lose state in cpu_chill() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In cpu_chill() the state is set to TASK_UNINTERRUPTIBLE and a timer is programmed. On return the state is always TASK_RUNNING which means we @@ -44,5 +44,5 @@ index 98a771065818..21a454557c8a 100644 EXPORT_SYMBOL(cpu_chill); #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch b/debian/patches-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch index db609b70d..2d0451607 100644 --- a/debian/patches-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch +++ b/debian/patches-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch @@ -1,9 +1,9 @@ -From fe55dde1efd56a8afb667d104442861cde103feb Mon Sep 17 00:00:00 2001 +From 0a0d701dd9255c4b04994a2248b8d3751e31e950 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 12:31:10 +0100 -Subject: [PATCH 185/290] hrtimer: cpu_chill(): save task state in +Subject: [PATCH 185/319] hrtimer: cpu_chill(): save task state in ->saved_state() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In the previous change I saved the current task state on stack. This was bad because while the task is scheduled-out it might receive a wake-up. @@ -59,5 +59,5 @@ index 21a454557c8a..f16cbc98c47a 100644 EXPORT_SYMBOL(cpu_chill); #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/debian/patches-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch index 9404344c7..fc238cbac 100644 --- a/debian/patches-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ b/debian/patches-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch @@ -1,9 +1,9 @@ -From a315826e2a7f0937429019a4fc122b56a4a827b9 Mon Sep 17 00:00:00 2001 +From 816ac51c679faf08419006a8995b97c87e0c5c46 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 13 Mar 2018 13:49:16 +0100 -Subject: [PATCH 186/290] block: blk-mq: move blk_queue_usage_counter_release() +Subject: [PATCH 186/319] block: blk-mq: move blk_queue_usage_counter_release() into process context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 | in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6 @@ -96,7 +96,7 @@ index 4860cd26cd5a..13bf37156bb0 100644 sizeof(struct request), 0, SLAB_PANIC, NULL); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index f1960add94df..7b7c0bc6a514 100644 +index 111ab4209797..2f3b2e5196eb 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -27,6 +27,7 @@ @@ -116,5 +116,5 @@ index f1960add94df..7b7c0bc6a514 100644 struct list_head all_q_node; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0187-block-Use-cpu_chill-for-retry-loops.patch b/debian/patches-rt/0187-block-Use-cpu_chill-for-retry-loops.patch index 0fca1ed17..2b2187d0e 100644 --- a/debian/patches-rt/0187-block-Use-cpu_chill-for-retry-loops.patch +++ b/debian/patches-rt/0187-block-Use-cpu_chill-for-retry-loops.patch @@ -1,8 +1,8 @@ -From effa2a0476d7f71cd3c5007874c228db414f3318 Mon Sep 17 00:00:00 2001 +From 292e48d01d48932d99df9e83852d69bfb61a6886 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 20 Dec 2012 18:28:26 +0100 -Subject: [PATCH 187/290] block: Use cpu_chill() for retry loops -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 187/319] block: Use cpu_chill() for retry loops +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Retry loops on RT might loop forever when the modifying side was preempted. Steven also observed a live lock when there was a @@ -47,5 +47,5 @@ index 01580f88fcb3..98d87e52ccdc 100644 } } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch b/debian/patches-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch index f072574a6..8a36093bb 100644 --- a/debian/patches-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch +++ b/debian/patches-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch @@ -1,8 +1,8 @@ -From 3757c05ac9fc60a97b20abf3681d7f869c368be6 Mon Sep 17 00:00:00 2001 +From 5dc500bf23f6371abc21bd10dc41f3202f0597ed Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 21:00:34 +0100 -Subject: [PATCH 188/290] fs: dcache: Use cpu_chill() in trylock loops -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 188/319] fs: dcache: Use cpu_chill() in trylock loops +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system @@ -62,5 +62,5 @@ index 1fce41ba3535..5dc970027e30 100644 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch b/debian/patches-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch index 3770bd3d7..f59ae30e0 100644 --- a/debian/patches-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch +++ b/debian/patches-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch @@ -1,8 +1,8 @@ -From 8f296bb8916125380ca66e63999aac396f5ab698 Mon Sep 17 00:00:00 2001 +From b8141f1c760ea975d899cf29b5d218f2dddacbda Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 21:10:04 +0100 -Subject: [PATCH 189/290] net: Use cpu_chill() instead of cpu_relax() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 189/319] net: Use cpu_chill() instead of cpu_relax() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c -index ac65e66d1d72..b4267ce1f869 100644 +index ddf90e6fac51..65befa5ee0c3 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -63,6 +63,7 @@ @@ -66,5 +66,5 @@ index 0b347f46b2f4..f395f06031bc 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch index f2d01e43e..f3aabbd69 100644 --- a/debian/patches-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch +++ b/debian/patches-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch @@ -1,8 +1,8 @@ -From 558647aa05135aa727be9198f29a041686e683af Mon Sep 17 00:00:00 2001 +From 0c8cd26ab04b1cb1503429536154336d3a92f1b5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 14 Sep 2016 14:35:49 +0200 -Subject: [PATCH 190/290] fs/dcache: use swait_queue instead of waitqueue -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 190/319] fs/dcache: use swait_queue instead of waitqueue +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz __d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock() which disables preemption. As a workaround convert it to swait. @@ -98,10 +98,10 @@ index 6244345a5745..7ee10b7cc808 100644 if (!o->nodeid) { /* diff --git a/fs/namei.c b/fs/namei.c -index 914178cdbe94..2a8c41bc227f 100644 +index c00a7e1da4c0..742e7935f777 100644 --- a/fs/namei.c +++ b/fs/namei.c -@@ -1645,7 +1645,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, +@@ -1646,7 +1646,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, { struct dentry *dentry, *old; struct inode *inode = dir->d_inode; @@ -110,7 +110,7 @@ index 914178cdbe94..2a8c41bc227f 100644 /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) -@@ -3135,7 +3135,7 @@ static int lookup_open(struct nameidata *nd, struct path *path, +@@ -3136,7 +3136,7 @@ static int lookup_open(struct nameidata *nd, struct path *path, struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; @@ -120,10 +120,10 @@ index 914178cdbe94..2a8c41bc227f 100644 if (unlikely(IS_DEADDIR(dir_inode))) return -ENOENT; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c -index 17f8a9259971..6642f0c321ef 100644 +index c60b20884c45..7e653c14c7ed 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c -@@ -445,7 +445,7 @@ static +@@ -457,7 +457,7 @@ static void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) { struct qstr filename = QSTR_INIT(entry->name, entry->len); @@ -132,7 +132,7 @@ index 17f8a9259971..6642f0c321ef 100644 struct dentry *dentry; struct dentry *alias; struct inode *dir = d_inode(parent); -@@ -1495,7 +1495,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, +@@ -1516,7 +1516,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned open_flags, umode_t mode) { @@ -237,5 +237,5 @@ index c7cb30cdd1b7..119a56d7f739 100644 list_splice_init(&q->task_list, &tmp); while (!list_empty(&tmp)) { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0191-workqueue-Use-normal-rcu.patch b/debian/patches-rt/0191-workqueue-Use-normal-rcu.patch index 8a14b265e..362da8e28 100644 --- a/debian/patches-rt/0191-workqueue-Use-normal-rcu.patch +++ b/debian/patches-rt/0191-workqueue-Use-normal-rcu.patch @@ -1,8 +1,8 @@ -From a65796914416941351ac0cd0271c5da76bed792a Mon Sep 17 00:00:00 2001 +From 9e10a8e692e112f54a451f9c0e2ec35bdc9fd4fd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Jul 2013 15:26:54 +0200 -Subject: [PATCH 191/290] workqueue: Use normal rcu -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 191/319] workqueue: Use normal rcu +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz There is no need for sched_rcu. The undocumented reason why sched_rcu is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by @@ -356,5 +356,5 @@ index 493908464b9e..544007905706 100644 return written; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch b/debian/patches-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch index d5f42d921..e069f8e65 100644 --- a/debian/patches-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch +++ b/debian/patches-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch @@ -1,9 +1,9 @@ -From 670717ab259111dcf589c98670ff74a206bf87d3 Mon Sep 17 00:00:00 2001 +From 5dd89977f6471e9dd1a807f1c6063bb6384ec3cf Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:42:26 +0200 -Subject: [PATCH 192/290] workqueue: Use local irq lock instead of irq disable +Subject: [PATCH 192/319] workqueue: Use local irq lock instead of irq disable regions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Use a local_irq_lock as a replacement for irq off regions. We keep the semantic of irq-off in regard to the pool->lock and remain preemptible. @@ -182,5 +182,5 @@ index 544007905706..0efb8d25d940 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch b/debian/patches-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch index b97673193..89f57c094 100644 --- a/debian/patches-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch +++ b/debian/patches-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch @@ -1,8 +1,8 @@ -From c6d8c462a41a7c4c64ade776387c5b9394464de6 Mon Sep 17 00:00:00 2001 +From 4d853bb1de88920f53a2ce4b1588cf25a112d6c1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 1 Jul 2013 11:02:42 +0200 -Subject: [PATCH 193/290] workqueue: Prevent workqueue versus ata-piix livelock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 193/319] workqueue: Prevent workqueue versus ata-piix livelock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz An Intel i7 system regularly detected rcu_preempt stalls after the kernel was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no @@ -135,5 +135,5 @@ index 0efb8d25d940..34734cdb5cb6 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch b/debian/patches-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch index bd7042d6d..67f564cae 100644 --- a/debian/patches-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch +++ b/debian/patches-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch @@ -1,8 +1,8 @@ -From 811c45a6eb4df7a785f9fe8b20461bd72fee9d2c Mon Sep 17 00:00:00 2001 +From 5368851a09ec1c6318e839214f7edcdf21b3a961 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jun 2011 19:47:03 +0200 -Subject: [PATCH 194/290] sched: Distangle worker accounting from rqlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 194/319] sched: Distangle worker accounting from rqlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The worker accounting for cpu bound workers is plugged into the core scheduler code and the wakeup code. This is not a hard requirement and @@ -287,5 +287,5 @@ index 66fbb5a9e633..30cfed226b39 100644 #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0195-debugobjects-Make-RT-aware.patch b/debian/patches-rt/0195-debugobjects-Make-RT-aware.patch index d910a399c..b79e780aa 100644 --- a/debian/patches-rt/0195-debugobjects-Make-RT-aware.patch +++ b/debian/patches-rt/0195-debugobjects-Make-RT-aware.patch @@ -1,8 +1,8 @@ -From 0afdb35f56928f5bde82e37d5a90197756bbbc21 Mon Sep 17 00:00:00 2001 +From f21322916ff581c43b89885c1271ba24f86cc2e5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:41:35 +0200 -Subject: [PATCH 195/290] debugobjects: Make RT aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 195/319] debugobjects: Make RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Avoid filling the pool / allocating memory with irqs off(). @@ -28,5 +28,5 @@ index 14afeeb7d6ef..e28481c402ae 100644 db = get_bucket((unsigned long) addr); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0196-seqlock-Prevent-rt-starvation.patch b/debian/patches-rt/0196-seqlock-Prevent-rt-starvation.patch index adaec45e2..71a9b8688 100644 --- a/debian/patches-rt/0196-seqlock-Prevent-rt-starvation.patch +++ b/debian/patches-rt/0196-seqlock-Prevent-rt-starvation.patch @@ -1,8 +1,8 @@ -From 77d71203cae68a6d582b3c05f375bd68caea10e8 Mon Sep 17 00:00:00 2001 +From 20bfbd138f08356f255e87a13b4f3f1d1588d845 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Feb 2012 12:03:30 +0100 -Subject: [PATCH 196/290] seqlock: Prevent rt starvation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 196/319] seqlock: Prevent rt starvation +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz If a low prio writer gets preempted while holding the seqlock write locked, a high prio reader spins forever on RT. @@ -191,5 +191,5 @@ index 5ce035984a4d..1166fc17b757 100644 { unsigned int seq; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch index 10a6d7a0b..aa526bd84 100644 --- a/debian/patches-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch +++ b/debian/patches-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch @@ -1,9 +1,9 @@ -From 9d2425d84597125b000c785903c4afd36f00673c Mon Sep 17 00:00:00 2001 +From 166f76bae8984db2f7b0a75215a214cffc29fb89 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 18 Feb 2015 16:05:28 +0100 -Subject: [PATCH 197/290] sunrpc: Make svc_xprt_do_enqueue() use +Subject: [PATCH 197/319] sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 |in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd @@ -57,5 +57,5 @@ index 6cf0fd37cbf0..48c0a0b90946 100644 } EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0198-net-Use-skbufhead-with-raw-lock.patch b/debian/patches-rt/0198-net-Use-skbufhead-with-raw-lock.patch index 80d2401c6..794ee290e 100644 --- a/debian/patches-rt/0198-net-Use-skbufhead-with-raw-lock.patch +++ b/debian/patches-rt/0198-net-Use-skbufhead-with-raw-lock.patch @@ -1,8 +1,8 @@ -From fc3e6e30f2ab8454e5d71c17c1893c38785c1271 Mon Sep 17 00:00:00 2001 +From c2a100b10eef0a1804cbc466d4d900c5fa02b31c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 15:38:34 +0200 -Subject: [PATCH 198/290] net: Use skbufhead with raw lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 198/319] net: Use skbufhead with raw lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Use the rps lock as rawlock so we can keep irq-off regions. It looks low latency. However we can't kfree() from this context therefore we defer this @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner 3 files changed, 33 insertions(+), 8 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h -index d5527e3828d1..9aba444d7df0 100644 +index 84bbdcbb199a..b816eb0bc1c4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2982,6 +2982,7 @@ struct softnet_data { @@ -53,7 +53,7 @@ index 25407c206e73..d4d7aea13cc6 100644 struct lock_class_key *class) { diff --git a/net/core/dev.c b/net/core/dev.c -index 4926a260bf0e..430ca0e79afc 100644 +index c9e199232f21..0e70f9a8dcc9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -218,14 +218,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) @@ -73,7 +73,7 @@ index 4926a260bf0e..430ca0e79afc 100644 #endif } -@@ -5275,7 +5275,7 @@ static void flush_backlog(struct work_struct *work) +@@ -5246,7 +5246,7 @@ static void flush_backlog(struct work_struct *work) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); @@ -82,7 +82,7 @@ index 4926a260bf0e..430ca0e79afc 100644 input_queue_head_incr(sd); } } -@@ -5285,11 +5285,14 @@ static void flush_backlog(struct work_struct *work) +@@ -5256,11 +5256,14 @@ static void flush_backlog(struct work_struct *work) skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); @@ -98,7 +98,7 @@ index 4926a260bf0e..430ca0e79afc 100644 } static void flush_all_backlogs(void) -@@ -5868,7 +5871,9 @@ static int process_backlog(struct napi_struct *napi, int quota) +@@ -5839,7 +5842,9 @@ static int process_backlog(struct napi_struct *napi, int quota) while (again) { struct sk_buff *skb; @@ -108,7 +108,7 @@ index 4926a260bf0e..430ca0e79afc 100644 rcu_read_lock(); __netif_receive_skb(skb); rcu_read_unlock(); -@@ -5876,9 +5881,9 @@ static int process_backlog(struct napi_struct *napi, int quota) +@@ -5847,9 +5852,9 @@ static int process_backlog(struct napi_struct *napi, int quota) if (++work >= quota) return work; @@ -119,7 +119,7 @@ index 4926a260bf0e..430ca0e79afc 100644 rps_lock(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { /* -@@ -6343,13 +6348,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) +@@ -6314,13 +6319,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) unsigned long time_limit = jiffies + usecs_to_jiffies(netdev_budget_usecs); int budget = netdev_budget; @@ -141,7 +141,7 @@ index 4926a260bf0e..430ca0e79afc 100644 for (;;) { struct napi_struct *n; -@@ -9522,10 +9535,13 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -9504,10 +9517,13 @@ static int dev_cpu_dead(unsigned int oldcpu) netif_rx_ni(skb); input_queue_head_incr(oldsd); } @@ -156,7 +156,7 @@ index 4926a260bf0e..430ca0e79afc 100644 return 0; } -@@ -9836,8 +9852,9 @@ static int __init net_dev_init(void) +@@ -9818,8 +9834,9 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); @@ -169,5 +169,5 @@ index 4926a260bf0e..430ca0e79afc 100644 skb_queue_head_init(&sd->xfrm_backlog); #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch index 1bbada45d..41c9b468e 100644 --- a/debian/patches-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/debian/patches-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -1,8 +1,8 @@ -From 4614b75f982f6d986f99de8d4d35afd3da7140b4 Mon Sep 17 00:00:00 2001 +From 3c6601516d097cea1fe870fe8fa399bc99f02f76 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jan 2016 15:55:02 +0100 -Subject: [PATCH 199/291] net: move xmit_recursion to per-task variable on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.90-rt35.tar.xz +Subject: [PATCH 199/319] net: move xmit_recursion to per-task variable on -RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz A softirq on -RT can be preempted. That means one task is in __dev_queue_xmit(), gets preempted and another task may enter @@ -24,9 +24,11 @@ Signed-off-by: Sebastian Andrzej Siewior net/core/filter.c | 6 +-- 4 files changed, 104 insertions(+), 15 deletions(-) +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index b816eb0bc1c4..5de4b66e11fe 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -587,7 +587,11 @@ +@@ -587,7 +587,11 @@ struct netdev_queue { * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; @@ -38,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Time (in jiffies) of last Tx */ -@@ -2620,14 +2624,53 @@ +@@ -2620,14 +2624,53 @@ void netdev_freemem(struct net_device *dev); void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); @@ -93,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); -@@ -3803,10 +3846,48 @@ +@@ -3805,10 +3848,48 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) return (1U << debug_value) - 1; } @@ -143,7 +145,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static inline bool __netif_tx_acquire(struct netdev_queue *txq) -@@ -3823,32 +3904,32 @@ +@@ -3825,32 +3906,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq) static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); @@ -181,21 +183,25 @@ Signed-off-by: Sebastian Andrzej Siewior txq->trans_start = jiffies; } +diff --git a/include/linux/sched.h b/include/linux/sched.h +index a38a2c2a8fe4..756fed8f5994 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1217,6 +1217,9 @@ +@@ -1216,6 +1216,9 @@ struct task_struct { + #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; - #endif ++#endif +#ifdef CONFIG_PREEMPT_RT_FULL + int xmit_recursion; -+#endif + #endif int pagefault_disabled; #ifdef CONFIG_MMU - struct task_struct *oom_reaper_list; +diff --git a/net/core/dev.c b/net/core/dev.c +index 0e70f9a8dcc9..176a3c9531c7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3538,8 +3538,10 @@ +@@ -3538,8 +3538,10 @@ static void skb_update_prio(struct sk_buff *skb) #define skb_update_prio(skb) #endif @@ -206,7 +212,7 @@ Signed-off-by: Sebastian Andrzej Siewior /** * dev_loopback_xmit - loop back @skb -@@ -3830,9 +3832,12 @@ +@@ -3830,9 +3832,12 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ @@ -221,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior goto recursion_alert; skb = validate_xmit_skb(skb, dev, &again); -@@ -3842,9 +3847,9 @@ +@@ -3842,9 +3847,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { @@ -233,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; -@@ -8583,7 +8588,7 @@ +@@ -8563,7 +8568,7 @@ static void netdev_init_one_queue(struct net_device *dev, /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); @@ -242,9 +248,11 @@ Signed-off-by: Sebastian Andrzej Siewior netdev_queue_numa_node_write(queue, NUMA_NO_NODE); queue->dev = dev; #ifdef CONFIG_BQL +diff --git a/net/core/filter.c b/net/core/filter.c +index 9daf1a4118b5..48140f8c010a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c -@@ -2000,7 +2000,7 @@ +@@ -2000,7 +2000,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) { int ret; @@ -253,7 +261,7 @@ Signed-off-by: Sebastian Andrzej Siewior net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); kfree_skb(skb); return -ENETDOWN; -@@ -2009,9 +2009,9 @@ +@@ -2009,9 +2009,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) skb->dev = dev; skb->tstamp = 0; @@ -265,3 +273,6 @@ Signed-off-by: Sebastian Andrzej Siewior return ret; } +-- +2.25.0 + diff --git a/debian/patches-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch index 6c14c9d40..1bc78bdef 100644 --- a/debian/patches-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ b/debian/patches-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch @@ -1,9 +1,9 @@ -From 041b49006ea5c90319dd0e5ba91d255eb59ec635 Mon Sep 17 00:00:00 2001 +From f29055094f4f6c99c2223a8ab25e89061f9a23f3 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jan 2016 15:39:05 +0100 -Subject: [PATCH 200/290] net: provide a way to delegate processing a softirq +Subject: [PATCH 200/319] net: provide a way to delegate processing a softirq to ksoftirqd -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz If the NET_RX uses up all of his budget it moves the following NAPI invocations into the `ksoftirqd`. On -RT it does not do so. Instead it @@ -72,10 +72,10 @@ index 27a4bb2303d0..25bcf2f2714b 100644 * This function must run with irqs disabled! */ diff --git a/net/core/dev.c b/net/core/dev.c -index ed1537cc9ab7..7605c7220020 100644 +index 176a3c9531c7..2dad518c625a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -6397,7 +6397,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) +@@ -6368,7 +6368,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) list_splice_tail(&repoll, &list); list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) @@ -85,5 +85,5 @@ index ed1537cc9ab7..7605c7220020 100644 net_rps_action_and_irq_enable(sd); out: -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch index ebb993b8b..7268ee064 100644 --- a/debian/patches-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch +++ b/debian/patches-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch @@ -1,9 +1,9 @@ -From 50cd61533d1b2d579c54d2acbb14c5e7034b1982 Mon Sep 17 00:00:00 2001 +From a3dbf5473d5aa93714aa123eea6140993e5d47bd Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 30 Mar 2016 13:36:29 +0200 -Subject: [PATCH 201/290] net: dev: always take qdisc's busylock in +Subject: [PATCH 201/319] net: dev: always take qdisc's busylock in __dev_xmit_skb() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The root-lock is dropped before dev_hard_start_xmit() is invoked and after setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 4 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c -index 7605c7220020..1a30cf641e7c 100644 +index 2dad518c625a..cb0cdd4daf02 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3466,7 +3466,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, @@ -38,5 +38,5 @@ index 7605c7220020..1a30cf641e7c 100644 spin_lock(&q->busylock); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch index 961f83580..26e944cf1 100644 --- a/debian/patches-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch +++ b/debian/patches-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch @@ -1,8 +1,8 @@ -From 882fef39240fcd645d3affe57b5f9ed4cfb1aac5 Mon Sep 17 00:00:00 2001 +From 3a53ba169527a1d52a7d427ad5907f7d4b176ad2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 14 Sep 2016 17:36:35 +0200 -Subject: [PATCH 202/290] net/Qdisc: use a seqlock instead seqcount -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 202/319] net/Qdisc: use a seqlock instead seqcount +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The seqcount disables preemption on -RT while it is held which can't remove. Also we don't want the reader to spin for ages if the writer is @@ -106,7 +106,7 @@ index 000000000000..a7034298a82a + +#endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h -index c9cd5086bd54..b6328680dc71 100644 +index d737a6a2600b..2d35b952bf60 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -10,6 +10,7 @@ @@ -166,7 +166,7 @@ index c9cd5086bd54..b6328680dc71 100644 if (qdisc->flags & TCQ_F_NOLOCK) spin_unlock(&qdisc->seqlock); } -@@ -458,7 +473,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) +@@ -459,7 +474,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) return qdisc_lock(root); } @@ -242,7 +242,7 @@ index e2fd8baec65f..8bab88738691 100644 struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c -index 84fdc4857771..3c5c51657e1a 100644 +index 39e319d04bb8..fe99928aff78 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1166,7 +1166,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, @@ -289,5 +289,5 @@ index 4ab20f1138fd..a9ed58ca3924 100644 sch->ops = ops; sch->flags = ops->static_flags; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch b/debian/patches-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch index 5caa39fc0..82756991a 100644 --- a/debian/patches-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch +++ b/debian/patches-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch @@ -1,12 +1,12 @@ -From a3e432c3b1fd867225b7515a94ca7b0fbd48b913 Mon Sep 17 00:00:00 2001 +From 3e37649a09f99ac729e8b3f827f89ac46a1a3f6b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 31 Aug 2016 17:21:56 +0200 -Subject: [PATCH 203/290] net: add back the missing serialization in +Subject: [PATCH 203/319] net: add back the missing serialization in ip_send_unicast_reply() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Some time ago Sami Pietikäinen reported a crash on -RT in ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire @@ -95,5 +95,5 @@ index 6da393016c11..105e94ff1095 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0204-net-add-a-lock-around-icmp_sk.patch b/debian/patches-rt/0204-net-add-a-lock-around-icmp_sk.patch index 88e3c4809..eb5e8c15f 100644 --- a/debian/patches-rt/0204-net-add-a-lock-around-icmp_sk.patch +++ b/debian/patches-rt/0204-net-add-a-lock-around-icmp_sk.patch @@ -1,8 +1,8 @@ -From 4c0fdaffb6e6326c5c6318fcd0feb45d4bf77190 Mon Sep 17 00:00:00 2001 +From 91ab1a173c2ca89ea2d1badb638766bdca1354a2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 31 Aug 2016 17:54:09 +0200 -Subject: [PATCH 204/290] net: add a lock around icmp_sk() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 204/319] net: add a lock around icmp_sk() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz It looks like the this_cpu_ptr() access in icmp_sk() is protected with local_bh_disable(). To avoid missing serialization in -RT I am adding @@ -61,5 +61,5 @@ index 4efa5e33513e..de67d595e298 100644 int sysctl_icmp_msgs_per_sec __read_mostly = 1000; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/debian/patches-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch index 3b7c800f5..87a102c34 100644 --- a/debian/patches-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch +++ b/debian/patches-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch @@ -1,9 +1,9 @@ -From 8da361e596b7594eb5b0f8abea644982a0c9a1d5 Mon Sep 17 00:00:00 2001 +From 8d7d1348452665b2d25f8960b189bce86ebacdab Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 6 Dec 2016 17:50:30 -0500 -Subject: [PATCH 205/290] net: Have __napi_schedule_irqoff() disable interrupts +Subject: [PATCH 205/319] net: Have __napi_schedule_irqoff() disable interrupts on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz A customer hit a crash where the napi sd->poll_list became corrupted. The customer had the bnx2x driver, which does a @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior 2 files changed, 14 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h -index c76c32aea759..49dc2818713a 100644 +index 5de4b66e11fe..1d6bb0ab437f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -422,7 +422,19 @@ typedef enum rx_handler_result rx_handler_result_t; @@ -53,10 +53,10 @@ index c76c32aea759..49dc2818713a 100644 static inline bool napi_disable_pending(struct napi_struct *n) { diff --git a/net/core/dev.c b/net/core/dev.c -index 1a30cf641e7c..950356b8b5b1 100644 +index cb0cdd4daf02..31d51ebff238 100644 --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -5967,6 +5967,7 @@ bool napi_schedule_prep(struct napi_struct *n) +@@ -5938,6 +5938,7 @@ bool napi_schedule_prep(struct napi_struct *n) } EXPORT_SYMBOL(napi_schedule_prep); @@ -64,7 +64,7 @@ index 1a30cf641e7c..950356b8b5b1 100644 /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule -@@ -5978,6 +5979,7 @@ void __napi_schedule_irqoff(struct napi_struct *n) +@@ -5949,6 +5950,7 @@ void __napi_schedule_irqoff(struct napi_struct *n) ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); @@ -73,5 +73,5 @@ index 1a30cf641e7c..950356b8b5b1 100644 bool napi_complete_done(struct napi_struct *n, int work_done) { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0206-irqwork-push-most-work-into-softirq-context.patch b/debian/patches-rt/0206-irqwork-push-most-work-into-softirq-context.patch index e7b6f73ae..a5fd779eb 100644 --- a/debian/patches-rt/0206-irqwork-push-most-work-into-softirq-context.patch +++ b/debian/patches-rt/0206-irqwork-push-most-work-into-softirq-context.patch @@ -1,8 +1,8 @@ -From 6b96ba989ad06199b33100cf7469ebeae299a21e Mon Sep 17 00:00:00 2001 +From adff1f0eed6261cb8fac1f3d4db49d51021df637 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 23 Jun 2015 15:32:51 +0200 -Subject: [PATCH 206/290] irqwork: push most work into softirq context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 206/319] irqwork: push most work into softirq context +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Initially we defered all irqwork into softirq because we didn't want the latency spikes if perf or another user was busy and delayed the RT task. @@ -235,10 +235,10 @@ index 74b694392f2f..fb4d11bab6b7 100644 init_dl_bw(&rd->dl_bw); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 6482945f8ae8..da4a3f8feb56 100644 +index 2b0ddd50e879..4d31ec98e968 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c -@@ -232,6 +232,7 @@ static void nohz_full_kick_func(struct irq_work *work) +@@ -236,6 +236,7 @@ static void nohz_full_kick_func(struct irq_work *work) static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { .func = nohz_full_kick_func, @@ -260,5 +260,5 @@ index 3fab1c50bf1b..2fcd56aa6092 100644 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0207-printk-Make-rt-aware.patch b/debian/patches-rt/0207-printk-Make-rt-aware.patch index 192a38a37..bce4db790 100644 --- a/debian/patches-rt/0207-printk-Make-rt-aware.patch +++ b/debian/patches-rt/0207-printk-Make-rt-aware.patch @@ -1,8 +1,8 @@ -From ddfc55c94d550664260bd755c4f0ec09f13cfed2 Mon Sep 17 00:00:00 2001 +From efdf9180869e6fe025fd2b98552fbef32fca3971 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Sep 2012 14:50:37 +0200 -Subject: [PATCH 207/290] printk: Make rt aware -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 207/319] printk: Make rt aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Drop the lock before calling the console driver and do not disable interrupts while printing to a serial console. @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 9a7f259dbb20..83f386175dcc 100644 +index f934baed564d..c927f89961a8 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1624,6 +1624,7 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) @@ -110,5 +110,5 @@ index 9a7f259dbb20..83f386175dcc 100644 if (do_cond_resched) cond_resched(); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/debian/patches-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch index fef2ee805..1d6310150 100644 --- a/debian/patches-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch +++ b/debian/patches-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch @@ -1,8 +1,8 @@ -From ddc0ed9a091fdcda9dba71990fb9300de72e70bf Mon Sep 17 00:00:00 2001 +From 000dbd85cc1f2101a2339b74fd57071e48338307 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 19 May 2016 17:45:27 +0200 -Subject: [PATCH 208/290] kernel/printk: Don't try to print from IRQ/NMI region -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 208/319] kernel/printk: Don't try to print from IRQ/NMI region +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On -RT we try to acquire sleeping locks which might lead to warnings from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on @@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 10 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 83f386175dcc..217abc6a53e1 100644 +index c927f89961a8..49ea374ba8ea 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1789,6 +1789,11 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, @@ -44,5 +44,5 @@ index 83f386175dcc..217abc6a53e1 100644 * console_unblank can no longer be called in interrupt context unless * oops_in_progress is set to 1.. -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch b/debian/patches-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch index dcad17e0e..2d74aa7e2 100644 --- a/debian/patches-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch +++ b/debian/patches-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch @@ -1,8 +1,8 @@ -From 369d276b8c46a573a55da8fdf568f3cb6522d075 Mon Sep 17 00:00:00 2001 +From f4a7ce2d260ded876500c4dd8df56e6a81683cc2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 21 Mar 2013 19:01:05 +0100 -Subject: [PATCH 209/290] printk: Drop the logbuf_lock more often -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 209/319] printk: Drop the logbuf_lock more often +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The lock is hold with irgs off. The latency drops 500us+ on my arm bugs with a "full" buffer after executing "dmesg" on the shell. @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 28 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 217abc6a53e1..58517c68e896 100644 +index 49ea374ba8ea..acef3657a316 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1427,12 +1427,23 @@ static int syslog_print_all(char __user *buf, int size, bool clear) @@ -79,5 +79,5 @@ index 217abc6a53e1..58517c68e896 100644 kfree(text); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch index da66899d7..0d90037da 100644 --- a/debian/patches-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch +++ b/debian/patches-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch @@ -1,12 +1,12 @@ -From 02d7b4ff5ed37112bbc7cb2cd8ee97f43f8c8800 Mon Sep 17 00:00:00 2001 +From c444418c13055a4ad5e3490c84e769cb5fb9de8e Mon Sep 17 00:00:00 2001 From: "Yadi.hu" Date: Wed, 10 Dec 2014 10:32:09 +0800 -Subject: [PATCH 210/290] ARM: enable irq in translation/section permission +Subject: [PATCH 210/319] ARM: enable irq in translation/section permission fault handlers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Probably happens on all ARM, with CONFIG_PREEMPT_RT_FULL @@ -92,5 +92,5 @@ index a9ee0d9dc740..20b0e146de98 100644 return 0; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch index 6e588df30..1cef2c8b6 100644 --- a/debian/patches-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch +++ b/debian/patches-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch @@ -1,8 +1,8 @@ -From dd9707b2db096f5af75ee3207ac1ced6a8acec2d Mon Sep 17 00:00:00 2001 +From fb8dce6fdf3eb522996555b08f6b53cee8c51514 Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 11 Feb 2016 11:54:00 -0600 -Subject: [PATCH 211/290] genirq: update irq_set_irqchip_state documentation -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 211/319] genirq: update irq_set_irqchip_state documentation +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On -rt kernels, the use of migrate_disable()/migrate_enable() is sufficient to guarantee a task isn't moved to another CPU. Update the @@ -28,5 +28,5 @@ index 3858ac895777..5701774a6d71 100644 */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch index 60da83c5b..77b642b80 100644 --- a/debian/patches-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch +++ b/debian/patches-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch @@ -1,9 +1,9 @@ -From b697d50d198d98ced440f6f1474cb505ff3f70a8 Mon Sep 17 00:00:00 2001 +From 32a33089c1cf0f0221c409175841c6fce57a8906 Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Thu, 11 Feb 2016 11:54:01 -0600 -Subject: [PATCH 212/290] KVM: arm/arm64: downgrade preempt_disable()d region +Subject: [PATCH 212/319] KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating the vgic and timer states to prevent the calling task from migrating to @@ -55,5 +55,5 @@ index d982650deb33..efe2d6c0201c 100644 ret = handle_exit(vcpu, run, ret); } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/debian/patches-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch index d01c314e3..f655f1557 100644 --- a/debian/patches-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch +++ b/debian/patches-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch @@ -1,9 +1,9 @@ -From 3fb1b55dbb6832214ae31cb9f5ed08b8360162e7 Mon Sep 17 00:00:00 2001 +From 18e844b559c80f565f74604742cb39a7c4c05b85 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 25 Jul 2018 14:02:38 +0200 -Subject: [PATCH 213/290] arm64: fpsimd: use preemp_disable in addition to +Subject: [PATCH 213/319] arm64: fpsimd: use preemp_disable in addition to local_bh_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The code disables BH and expects that it is not preemptible. On -RT the @@ -167,5 +167,5 @@ index 58c53bc96928..71252cd8b594 100644 EXPORT_SYMBOL(kernel_neon_begin); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0214-kgdb-serial-Short-term-workaround.patch b/debian/patches-rt/0214-kgdb-serial-Short-term-workaround.patch index 0d9118a10..5cb79de17 100644 --- a/debian/patches-rt/0214-kgdb-serial-Short-term-workaround.patch +++ b/debian/patches-rt/0214-kgdb-serial-Short-term-workaround.patch @@ -1,8 +1,8 @@ -From d1c62c912763a6d7d729f43e63605bb17d7814fe Mon Sep 17 00:00:00 2001 +From ab7ce891fbdc5a21eaae726fb2ce7dd240c33c70 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 28 Jul 2011 12:42:23 -0500 -Subject: [PATCH 214/290] kgdb/serial: Short term workaround -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 214/319] kgdb/serial: Short term workaround +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On 07/27/2011 04:37 PM, Thomas Gleixner wrote: > - KGDB (not yet disabled) is reportedly unusable on -rt right now due @@ -82,5 +82,5 @@ index 6a4b41484afe..197cb422f6e1 100644 return r; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch b/debian/patches-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch index ebe096076..b3013f878 100644 --- a/debian/patches-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch +++ b/debian/patches-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch @@ -1,8 +1,8 @@ -From 1c3cb84e399e50f312537ee0399deefc5557d1ea Mon Sep 17 00:00:00 2001 +From b3e1d341094a10d4fecef1589e403390b7873691 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Sat, 30 Jul 2011 21:55:53 -0500 -Subject: [PATCH 215/290] sysfs: Add /sys/kernel/realtime entry -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 215/319] sysfs: Add /sys/kernel/realtime entry +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Add a /sys/kernel entry to indicate that the kernel is a realtime kernel. @@ -50,5 +50,5 @@ index 46ba853656f6..9a23632b6294 100644 NULL }; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0216-mm-rt-kmap_atomic-scheduling.patch b/debian/patches-rt/0216-mm-rt-kmap_atomic-scheduling.patch index 2a0996feb..bc4857b9b 100644 --- a/debian/patches-rt/0216-mm-rt-kmap_atomic-scheduling.patch +++ b/debian/patches-rt/0216-mm-rt-kmap_atomic-scheduling.patch @@ -1,8 +1,8 @@ -From aff5114bccbb42db5c0f8ada358804caefb8dd8d Mon Sep 17 00:00:00 2001 +From 17638e5e920fd7ea1e08812f53fbc2070344bf75 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Jul 2011 10:43:51 +0200 -Subject: [PATCH 216/290] mm, rt: kmap_atomic scheduling -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 216/319] mm, rt: kmap_atomic scheduling +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In fact, with migrate_disable() existing one could play games with kmap_atomic. You could save/restore the kmap_atomic slots on context @@ -248,7 +248,7 @@ index 0690679832d4..1ac89e4718bf 100644 } diff --git a/include/linux/sched.h b/include/linux/sched.h -index 5e1cc92c2f5c..834f46cb258b 100644 +index 756fed8f5994..dc668524ccff 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -28,6 +28,7 @@ @@ -321,5 +321,5 @@ index 59db3223a5d6..22aa3ddbd87b 100644 unsigned int nr_free_highpages (void) { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch b/debian/patches-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch index bbcfca1bd..117c0308a 100644 --- a/debian/patches-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch +++ b/debian/patches-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch @@ -1,8 +1,8 @@ -From 57772c842e1a3a0de1d2490c20afe90c72d8abfb Mon Sep 17 00:00:00 2001 +From 1c72f5b8223730db5075d0aee9e9097f03f078cd Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 17:09:55 +0100 -Subject: [PATCH 217/290] x86/highmem: Add a "already used pte" check -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 217/319] x86/highmem: Add a "already used pte" check +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz This is a copy from kmap_atomic_prot(). @@ -25,5 +25,5 @@ index d5a48210d0f6..c0ec8d430c02 100644 current->kmap_pte[type] = pte; #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch b/debian/patches-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch index 2a5401c49..be3c5dd84 100644 --- a/debian/patches-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch +++ b/debian/patches-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch @@ -1,8 +1,8 @@ -From fded314d1af3bcdfc535e14068010d160ffc940f Mon Sep 17 00:00:00 2001 +From 78a8cefa72899cf2f2dca96bc087f490d77e8d54 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 21:37:27 +0100 -Subject: [PATCH 218/290] arm/highmem: Flush tlb on unmap -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 218/319] arm/highmem: Flush tlb on unmap +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The tlb should be flushed on unmap and thus make the mapping entry invalid. This is only done in the non-debug case which does not look @@ -30,5 +30,5 @@ index d02f8187b1cc..eb4b225d28c9 100644 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0219-arm-Enable-highmem-for-rt.patch b/debian/patches-rt/0219-arm-Enable-highmem-for-rt.patch index 0a9c4cdfe..0ebbf499e 100644 --- a/debian/patches-rt/0219-arm-Enable-highmem-for-rt.patch +++ b/debian/patches-rt/0219-arm-Enable-highmem-for-rt.patch @@ -1,8 +1,8 @@ -From 5fc0ce79ee0183b4033b86d6824ee0463b6cfa3f Mon Sep 17 00:00:00 2001 +From 21c74b05acdd54c23dc85d4cfaa7a63d8052e071 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Feb 2013 11:03:11 +0100 -Subject: [PATCH 219/290] arm: Enable highmem for rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 219/319] arm: Enable highmem for rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz fixup highmem for ARM. @@ -180,5 +180,5 @@ index 1ac89e4718bf..eaa2ef9bc10e 100644 #include -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0220-scsi-fcoe-Make-RT-aware.patch b/debian/patches-rt/0220-scsi-fcoe-Make-RT-aware.patch index c7d3be1ab..57aba470c 100644 --- a/debian/patches-rt/0220-scsi-fcoe-Make-RT-aware.patch +++ b/debian/patches-rt/0220-scsi-fcoe-Make-RT-aware.patch @@ -1,8 +1,8 @@ -From 8e3f3ca20fb094619a58f24a7503aa7d1e6e8f84 Mon Sep 17 00:00:00 2001 +From d6a4bcaae8a666545dd241364b9051c2ae48da2f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 12 Nov 2011 14:00:48 +0100 -Subject: [PATCH 220/290] scsi/fcoe: Make RT aware. -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 220/319] scsi/fcoe: Make RT aware. +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Do not disable preemption while taking sleeping locks. All user look safe for migrate_diable() only. @@ -95,7 +95,7 @@ index 24cbd0a2cc69..ccf60801fe9d 100644 list_for_each_entry_safe(fcf, next, &del_list, list) { /* Removes fcf from current list */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c -index 42bcf7f3a0f9..2ce045d6860c 100644 +index 6ba257cbc6d9..d2398a19f84c 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, @@ -112,5 +112,5 @@ index 42bcf7f3a0f9..2ce045d6860c 100644 /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch b/debian/patches-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch index fbcb739cc..938568136 100644 --- a/debian/patches-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch +++ b/debian/patches-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch @@ -1,8 +1,8 @@ -From 90de79b5b60daade1df8897b30d56b19dea5d83a Mon Sep 17 00:00:00 2001 +From 630856f6dcbb9cb6e3e5a20f11a7de9dfd6013bb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Nov 2011 18:19:27 +0100 -Subject: [PATCH 221/290] x86: crypto: Reduce preempt disabled regions -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 221/319] x86: crypto: Reduce preempt disabled regions +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Restrict the preempt disabled regions to the actual floating point operations and enable preemption for the administrative actions. @@ -114,5 +114,5 @@ index 917f25e4d0a8..58d8c03fc32d 100644 return err; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch index a5ae6ea59..3473eaa81 100644 --- a/debian/patches-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch +++ b/debian/patches-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch @@ -1,8 +1,8 @@ -From a51e00d991c576ba020f9db8d2c4453cd7f15ecd Mon Sep 17 00:00:00 2001 +From 84e90e53a2a793092583a0aa9d2909cc6469f827 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Feb 2014 17:24:04 +0100 -Subject: [PATCH 222/290] crypto: Reduce preempt disabled regions, more algos -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 222/319] crypto: Reduce preempt disabled regions, more algos +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Don Estabrook reported | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() @@ -259,5 +259,5 @@ index a78ef99a9981..dac489a1c4da 100644 } EXPORT_SYMBOL_GPL(glue_xts_req_128bit); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0223-crypto-limit-more-FPU-enabled-sections.patch b/debian/patches-rt/0223-crypto-limit-more-FPU-enabled-sections.patch index 03b693c97..261e2c157 100644 --- a/debian/patches-rt/0223-crypto-limit-more-FPU-enabled-sections.patch +++ b/debian/patches-rt/0223-crypto-limit-more-FPU-enabled-sections.patch @@ -1,11 +1,11 @@ -From dc93a0747106cd976c1745561130f7249b2f2447 Mon Sep 17 00:00:00 2001 +From b37b2852c94fee4a529a3b90a77a9010d404cdef Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 30 Nov 2017 13:40:10 +0100 -Subject: [PATCH 223/290] crypto: limit more FPU-enabled sections +Subject: [PATCH 223/319] crypto: limit more FPU-enabled sections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Those crypto drivers use SSE/AVX/… for their crypto work and in order to do so in kernel they need to enable the "FPU" in kernel mode which @@ -104,5 +104,5 @@ index 2e5003fef51a..768c53767bb2 100644 * Save the FPU state (mark it for reload if necessary): * -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch b/debian/patches-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch index 6383b25cf..e578b780c 100644 --- a/debian/patches-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch +++ b/debian/patches-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch @@ -1,9 +1,9 @@ -From 6388ffad9f71b6cc1e7074b010a3c0cd24de1ea7 Mon Sep 17 00:00:00 2001 +From 5035f90916e5526fc3ce79c419ce0ef3cf6ddadd Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 11 Jul 2018 17:14:47 +0200 -Subject: [PATCH 224/290] crypto: scompress - serialize RT percpu scratch +Subject: [PATCH 224/319] crypto: scompress - serialize RT percpu scratch buffer access with a local lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz | BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:974 | in_atomic(): 1, irqs_disabled(): 0, pid: 1401, name: cryptomgr_test @@ -79,5 +79,5 @@ index 968bbcf65c94..c2f0077e0801 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/debian/patches-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch index f120dd434..b7b690835 100644 --- a/debian/patches-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch +++ b/debian/patches-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch @@ -1,9 +1,9 @@ -From e154dffb6f88ac74a8ab087c4b884413754e38aa Mon Sep 17 00:00:00 2001 +From f595ab5e665480b98f04964b2dffc01dd153deb3 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Jul 2018 18:52:00 +0200 -Subject: [PATCH 225/290] crypto: cryptd - add a lock instead +Subject: [PATCH 225/319] crypto: cryptd - add a lock instead preempt_disable/local_bh_disable -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz cryptd has a per-CPU lock which protected with local_bh_disable() and preempt_disable(). @@ -81,5 +81,5 @@ index e0c8e907b086..e079f9a70201 100644 if (!req) return; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch b/debian/patches-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch index c28f16c65..af41f8fa2 100644 --- a/debian/patches-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch +++ b/debian/patches-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch @@ -1,9 +1,9 @@ -From 9aa84af05c5298af918741fdd22779fe320edc90 Mon Sep 17 00:00:00 2001 +From f5c9952be8c9b6cd5c5a310f3f73e7c666a1bfc8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 226/290] panic: skip get_random_bytes for RT_FULL in +Subject: [PATCH 226/319] panic: skip get_random_bytes for RT_FULL in init_oops_id -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Disable on -RT. If this is invoked from irq-context we will have problems to acquire the sleeping lock. @@ -30,5 +30,5 @@ index 8138a676fb7d..fa114be5c30f 100644 return 0; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch b/debian/patches-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch index aa38b2f63..7d0388ff9 100644 --- a/debian/patches-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch +++ b/debian/patches-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch @@ -1,8 +1,8 @@ -From 54f488a780d046deaf409f393451114571fddc2b Mon Sep 17 00:00:00 2001 +From 27f7938ca3c0f226f85745ad8e52b00b26d60612 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 16 Dec 2010 14:25:18 +0100 -Subject: [PATCH 227/290] x86: stackprotector: Avoid random pool on rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 227/319] x86: stackprotector: Avoid random pool on rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz CPU bringup calls into the random pool to initialize the stack canary. During boot that works nicely even on RT as the might sleep @@ -47,5 +47,5 @@ index 8ec97a62c245..7bc85841fc56 100644 canary += tsc + (tsc << 32UL); canary &= CANARY_MASK; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0228-random-Make-it-work-on-rt.patch b/debian/patches-rt/0228-random-Make-it-work-on-rt.patch index c2565f863..d34ea7404 100644 --- a/debian/patches-rt/0228-random-Make-it-work-on-rt.patch +++ b/debian/patches-rt/0228-random-Make-it-work-on-rt.patch @@ -1,8 +1,8 @@ -From 56f7ebec5cf501075c40f3d7ff1283a3279f88ca Mon Sep 17 00:00:00 2001 +From 10f49438cce794e7cf27b9393c94adf9ee9fa5dc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 21 Aug 2012 20:38:50 +0200 -Subject: [PATCH 228/290] random: Make it work on rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 228/319] random: Make it work on rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Delegate the random insertion to the forced threaded interrupt handler. Store the return IP of the hard interrupt handler in the irq @@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner 7 files changed, 26 insertions(+), 10 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c -index 86fe1df90239..468a128c8ad8 100644 +index 53e822793d46..5f359b9d248b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1232,28 +1232,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) @@ -163,5 +163,5 @@ index 5701774a6d71..ce86341a9e19 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch b/debian/patches-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch index 23a96e09c..ba5ad4ff5 100644 --- a/debian/patches-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch +++ b/debian/patches-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch @@ -1,8 +1,8 @@ -From 42dc365f8f07fc9531d70fcc94605ab25acc2685 Mon Sep 17 00:00:00 2001 +From 42a2638abd83cbeb0f35b4a4213f76339442c514 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Jul 2017 17:31:20 +0200 -Subject: [PATCH 229/290] cpu/hotplug: Implement CPU pinning -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 229/319] cpu/hotplug: Implement CPU pinning +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Signed-off-by: Thomas Gleixner --- @@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner 2 files changed, 39 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 834f46cb258b..4559d8039c45 100644 +index dc668524ccff..fc9300d0787d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -671,6 +671,7 @@ struct task_struct { @@ -113,5 +113,5 @@ index cf1f2eb6bb90..e6c2d814f7f9 100644 irq_unlock_sparse(); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/debian/patches-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch index 37cfff4d3..3b7fd0e3a 100644 --- a/debian/patches-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch +++ b/debian/patches-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch @@ -1,9 +1,9 @@ -From b4a9b46277740b295834182cfe114d91b0c44e31 Mon Sep 17 00:00:00 2001 +From 937dbf87879ba74afa9aa5850f2491b5669783a5 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 19 Aug 2018 08:28:35 +0200 -Subject: [PATCH 230/290] sched: Allow pinned user tasks to be awakened to the +Subject: [PATCH 230/319] sched: Allow pinned user tasks to be awakened to the CPU they pinned -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Since commit 7af443ee16976 ("sched/core: Require cpu_active() in select_task_rq(), for user tasks") select_fallback_rq() will BUG() if @@ -35,5 +35,5 @@ index dcf2deedd3f8..6ef0dcea94d7 100644 return cpu_active(cpu); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/debian/patches-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch index 128ef01b7..cbf0b7c01 100644 --- a/debian/patches-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch +++ b/debian/patches-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch @@ -1,8 +1,8 @@ -From bd5177f5caaa75365fab66e40f912772cd9da73b Mon Sep 17 00:00:00 2001 +From 1c0db07bc0ea85302c4b3868933f4042f470d19e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 4 Aug 2017 18:31:00 +0200 -Subject: [PATCH 231/290] hotplug: duct-tape RT-rwlock usage for non-RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 231/319] hotplug: duct-tape RT-rwlock usage for non-RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz This type is only available on -RT. We need to craft something for non-RT. Since the only migrate_disable() user is -RT only, there is no @@ -98,5 +98,5 @@ index e6c2d814f7f9..c8631e699ce2 100644 irq_unlock_sparse(); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch b/debian/patches-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch index 8196928b5..055ce7f36 100644 --- a/debian/patches-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch +++ b/debian/patches-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch @@ -1,8 +1,8 @@ -From 9dcfd3052899233a5c4b7593793005967d025b3f Mon Sep 17 00:00:00 2001 +From 944dad5de7431597c62ed6aa089f68af00e2a681 Mon Sep 17 00:00:00 2001 From: Priyanka Jain Date: Thu, 17 May 2012 09:35:11 +0530 -Subject: [PATCH 232/290] net: Remove preemption disabling in netif_rx() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 232/319] net: Remove preemption disabling in netif_rx() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz 1)enqueue_to_backlog() (called from netif_rx) should be bind to a particluar CPU. This can be achieved by @@ -36,10 +36,10 @@ Signed-off-by: Thomas Gleixner 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index 950356b8b5b1..ee8cb6d4f6aa 100644 +index 31d51ebff238..aba305d459be 100644 --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4499,7 +4499,7 @@ static int netif_rx_internal(struct sk_buff *skb) +@@ -4487,7 +4487,7 @@ static int netif_rx_internal(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -48,7 +48,7 @@ index 950356b8b5b1..ee8cb6d4f6aa 100644 rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -4509,14 +4509,14 @@ static int netif_rx_internal(struct sk_buff *skb) +@@ -4497,14 +4497,14 @@ static int netif_rx_internal(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); @@ -67,5 +67,5 @@ index 950356b8b5b1..ee8cb6d4f6aa 100644 return ret; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch b/debian/patches-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch index a3bb2533d..002f2a4d7 100644 --- a/debian/patches-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch +++ b/debian/patches-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch @@ -1,8 +1,8 @@ -From e6babcfe0fa1248a3e14b455738237a4e13b50eb Mon Sep 17 00:00:00 2001 +From 3cb21dc1f1615fa795ab14c7d9165d4d5be63fdc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 26 Sep 2012 16:21:08 +0200 -Subject: [PATCH 233/290] net: Another local_irq_disable/kmalloc headache -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 233/319] net: Another local_irq_disable/kmalloc headache +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Replace it by a local lock. Though that's pretty inefficient :( @@ -61,5 +61,5 @@ index 0629ca89ab74..6ca7cb2b4364 100644 if (unlikely(!data)) return NULL; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch index 771bfdebb..b95784857 100644 --- a/debian/patches-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch +++ b/debian/patches-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch @@ -1,9 +1,9 @@ -From cf637289b14f736be18788b3ba3160ba35949396 Mon Sep 17 00:00:00 2001 +From e5062bfc16de2dddb27bd5ff547d7d0c46d199f7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 15 Jan 2016 16:33:34 +0100 -Subject: [PATCH 234/290] net/core: protect users of napi_alloc_cache against +Subject: [PATCH 234/319] net/core: protect users of napi_alloc_cache against reentrance -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On -RT the code running in BH can not be moved to another CPU so CPU local variable remain local. However the code can be preempted @@ -115,5 +115,5 @@ index 6ca7cb2b4364..c5c0d2095873 100644 void __kfree_skb_defer(struct sk_buff *skb) { -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch b/debian/patches-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch index 4ba4321d3..a451c3962 100644 --- a/debian/patches-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch +++ b/debian/patches-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch @@ -1,9 +1,9 @@ -From 28b182b074b9bb5ea0707e1f674eae8d576357c7 Mon Sep 17 00:00:00 2001 +From 5ecec03d0ccfb0186e960a5c77674726374ea656 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 11:18:08 +0100 -Subject: [PATCH 235/290] net: netfilter: Serialize xt_write_recseq sections on +Subject: [PATCH 235/319] net: netfilter: Serialize xt_write_recseq sections on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The netfilter code relies only on the implicit semantics of local_bh_disable() for serializing wt_write_recseq sections. RT breaks @@ -80,5 +80,5 @@ index 93aaec3a54ec..b364cf8e5776 100644 EXPORT_SYMBOL_GPL(nf_ipv6_ops); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch b/debian/patches-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch index 6d1b09438..23e978d62 100644 --- a/debian/patches-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch +++ b/debian/patches-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch @@ -1,8 +1,8 @@ -From d93ef2fbc7cc016328b4e3cfc2b58960acc4b696 Mon Sep 17 00:00:00 2001 +From c1525de855b108b767798072986139f55e8aadb2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Mar 2013 18:06:20 +0100 -Subject: [PATCH 236/290] net: Add a mutex around devnet_rename_seq -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 236/319] net: Add a mutex around devnet_rename_seq +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On RT write_seqcount_begin() disables preemption and device_rename() allocates memory with GFP_KERNEL and grabs later the sysfs_mutex @@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c -index ee8cb6d4f6aa..fd54d56e1e7d 100644 +index aba305d459be..28a3d0683f4b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -196,6 +196,7 @@ static unsigned int napi_gen_id = NR_CPUS; @@ -109,5 +109,5 @@ index ee8cb6d4f6aa..fd54d56e1e7d 100644 /** -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch b/debian/patches-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch index f35ea3265..0763872dd 100644 --- a/debian/patches-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch +++ b/debian/patches-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch @@ -1,9 +1,9 @@ -From d7695d53a9f2bed0ce8208662ba7f099af5b03ea Mon Sep 17 00:00:00 2001 +From 87a54cc8a1aefbbfc2a9f58aaae0a36580aba9ea Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Mon, 16 Apr 2012 15:01:56 +0800 -Subject: [PATCH 237/290] lockdep: selftest: Only do hardirq context test for +Subject: [PATCH 237/319] lockdep: selftest: Only do hardirq context test for raw spinlock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz On -rt there is no softirq context any more and rwlock is sleepable, disable softirq context test and rwlock+irq test. @@ -58,5 +58,5 @@ index 1e1bbf171eca..5cdf3809905e 100644 ww_tests(); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch index 67abe524e..fc678985a 100644 --- a/debian/patches-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch +++ b/debian/patches-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch @@ -1,9 +1,9 @@ -From 0bcc13d1f486d89eea4550bd9be5b6607a041a85 Mon Sep 17 00:00:00 2001 +From c5d6a6cc6dbbfa1b5366a916a67fb7f5e9ac403b Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Wed, 28 Jan 2015 13:08:45 -0600 -Subject: [PATCH 238/290] lockdep: selftest: fix warnings due to missing +Subject: [PATCH 238/319] lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz "lockdep: Selftest: Only do hardirq context test for raw spinlock" disabled the execution of certain tests with PREEMPT_RT_FULL, but did @@ -145,5 +145,5 @@ index 5cdf3809905e..32db9532ddd4 100644 * read-lock / write-lock recursion that is unsafe. */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0239-sched-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0239-sched-Add-support-for-lazy-preemption.patch index eb7cf0e0c..05faa0275 100644 --- a/debian/patches-rt/0239-sched-Add-support-for-lazy-preemption.patch +++ b/debian/patches-rt/0239-sched-Add-support-for-lazy-preemption.patch @@ -1,8 +1,8 @@ -From 0ea3e027332a92b240723b63d08f875461e0c208 Mon Sep 17 00:00:00 2001 +From 0ad958c36369da7e5c381970cd63af00b313454f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Oct 2012 18:50:54 +0100 -Subject: [PATCH 239/290] sched: Add support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 239/319] sched: Add support for lazy preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz It has become an obsession to mitigate the determinism vs. throughput loss of RT. Looking at the mainline semantics of preemption points @@ -144,7 +144,7 @@ index ed8413e7140f..9c74a019bf57 100644 } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 4559d8039c45..d2386fa9ed0f 100644 +index fc9300d0787d..a8ebd49c4f96 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1733,6 +1733,44 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) @@ -420,10 +420,10 @@ index 6ef0dcea94d7..a17c765d3fcb 100644 } EXPORT_SYMBOL(migrate_enable); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 6e6d9e999814..5e00ddd76749 100644 +index 27f9f9a785c1..77d991e9ba23 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4075,7 +4075,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4091,7 +4091,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -432,7 +432,7 @@ index 6e6d9e999814..5e00ddd76749 100644 /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4099,7 +4099,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4115,7 +4115,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) @@ -441,7 +441,7 @@ index 6e6d9e999814..5e00ddd76749 100644 } static void -@@ -4241,7 +4241,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) +@@ -4257,7 +4257,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { @@ -450,7 +450,7 @@ index 6e6d9e999814..5e00ddd76749 100644 return; } /* -@@ -4375,7 +4375,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) +@@ -4391,7 +4391,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -459,7 +459,7 @@ index 6e6d9e999814..5e00ddd76749 100644 } static __always_inline -@@ -5069,7 +5069,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) +@@ -5091,7 +5091,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) @@ -468,7 +468,7 @@ index 6e6d9e999814..5e00ddd76749 100644 return; } hrtick_start(rq, delta); -@@ -6645,7 +6645,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ +@@ -6684,7 +6684,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: @@ -477,7 +477,7 @@ index 6e6d9e999814..5e00ddd76749 100644 /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -9746,7 +9746,7 @@ static void task_fork_fair(struct task_struct *p) +@@ -9785,7 +9785,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -486,7 +486,7 @@ index 6e6d9e999814..5e00ddd76749 100644 } se->vruntime -= cfs_rq->min_vruntime; -@@ -9770,7 +9770,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) +@@ -9809,7 +9809,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) @@ -602,7 +602,7 @@ index 5eccf1c201db..d13b58073bce 100644 } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h -index d11d7bfc3fa5..6eba8c96f4bc 100644 +index ee0c6a313ed1..5986f494e2b0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -127,6 +127,7 @@ struct kretprobe_trace_entry_head { @@ -667,5 +667,5 @@ index 46c96744f09d..3f78b0afb729 100644 trace_seq_printf(s, "%x", entry->migrate_disable); else -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0240-ftrace-Fix-trace-header-alignment.patch b/debian/patches-rt/0240-ftrace-Fix-trace-header-alignment.patch index 633e9f6f3..edaad7112 100644 --- a/debian/patches-rt/0240-ftrace-Fix-trace-header-alignment.patch +++ b/debian/patches-rt/0240-ftrace-Fix-trace-header-alignment.patch @@ -1,8 +1,8 @@ -From d41ba5694f1fd54574be4bf3a9d7bc859ca6cb40 Mon Sep 17 00:00:00 2001 +From a55fa48e31843f56553ccfaf3bbeb88de95df322 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 16 Oct 2016 05:08:30 +0200 -Subject: [PATCH 240/290] ftrace: Fix trace header alignment -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 240/319] ftrace: Fix trace header alignment +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Line up helper arrows to the right column. @@ -48,5 +48,5 @@ index d13b58073bce..99dde9db6e3e 100644 static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0241-x86-Support-for-lazy-preemption.patch b/debian/patches-rt/0241-x86-Support-for-lazy-preemption.patch index 10f0987cc..57f080401 100644 --- a/debian/patches-rt/0241-x86-Support-for-lazy-preemption.patch +++ b/debian/patches-rt/0241-x86-Support-for-lazy-preemption.patch @@ -1,8 +1,8 @@ -From e4cba91598b3be3d6bcadce513ffd80041895d70 Mon Sep 17 00:00:00 2001 +From bcd492d8695d089a482a25d88de7628040f2c5c3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 11:03:47 +0100 -Subject: [PATCH 241/290] x86: Support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 241/319] x86: Support for lazy preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Implement the x86 pieces for lazy preempt. @@ -236,5 +236,5 @@ index 01de31db300d..ce1c5b9fbd8c 100644 /* TLB state for the entry code */ OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch b/debian/patches-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch index ec7665d37..e6f3df0d0 100644 --- a/debian/patches-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch +++ b/debian/patches-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch @@ -1,9 +1,9 @@ -From 9c314d19ce79b759d4978179077e8aa59cf1b1df Mon Sep 17 00:00:00 2001 +From 43dad4b3508d88984e9b459d8bacd14fa92a3e73 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 18 Feb 2019 16:57:09 +0100 -Subject: [PATCH 242/290] x86: lazy-preempt: properly check against +Subject: [PATCH 242/319] x86: lazy-preempt: properly check against preempt-mask -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz should_resched() should check against preempt_offset after unmasking the need-resched-bit. Otherwise should_resched() won't work for @@ -29,5 +29,5 @@ index 22992c837795..f66708779274 100644 if (current_thread_info()->preempt_lazy_count) return false; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch b/debian/patches-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch index 0a676db3f..3c1ef73c7 100644 --- a/debian/patches-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch +++ b/debian/patches-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch @@ -1,9 +1,9 @@ -From 45c086ec4e89dcf9194c2b88b5ee38ffa79fe4ec Mon Sep 17 00:00:00 2001 +From 329c0ad94ef5c8da4e717fb5ba079f82bb5ccd63 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Feb 2019 14:53:49 +0100 -Subject: [PATCH 243/290] x86: lazy-preempt: use proper return label on +Subject: [PATCH 243/319] x86: lazy-preempt: use proper return label on 32bit-x86 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The lazy-preempt uses the wrong return label in case preemption isn't possible. This results crash while returning to the kernel. @@ -41,5 +41,5 @@ index 985988227877..d880352e410c 100644 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all_kernel -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0244-arm-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0244-arm-Add-support-for-lazy-preemption.patch index 6ff992bd0..ea4df2860 100644 --- a/debian/patches-rt/0244-arm-Add-support-for-lazy-preemption.patch +++ b/debian/patches-rt/0244-arm-Add-support-for-lazy-preemption.patch @@ -1,8 +1,8 @@ -From 61f1fec1af303a55b098a0228a82f9632f220cd8 Mon Sep 17 00:00:00 2001 +From 70b648cf82896ef25a1e979f31cb667c0d19549f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 31 Oct 2012 12:04:11 +0100 -Subject: [PATCH 244/290] arm: Add support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 244/319] arm: Add support for lazy preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Implement the arm pieces for lazy preempt. @@ -164,5 +164,5 @@ index b908382b69ff..339fbc281cf1 100644 } else { if (unlikely(!user_mode(regs))) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0245-powerpc-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0245-powerpc-Add-support-for-lazy-preemption.patch index 94c78f91a..071da471d 100644 --- a/debian/patches-rt/0245-powerpc-Add-support-for-lazy-preemption.patch +++ b/debian/patches-rt/0245-powerpc-Add-support-for-lazy-preemption.patch @@ -1,8 +1,8 @@ -From dbdce4ec0b82c3bbd36c36c374d4f964ecc9f2f1 Mon Sep 17 00:00:00 2001 +From 4e426ca152bccf37f6d3d892d9c15777c0cbb6b0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 10:14:11 +0100 -Subject: [PATCH 245/290] powerpc: Add support for lazy preemption -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 245/319] powerpc: Add support for lazy preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Implement the powerpc pieces for lazy preempt. @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner 5 files changed, 33 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index 1563820a37e8..d4835f8cfcf2 100644 +index 1b332f69dd36..c8c2230c8aff 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -216,6 +216,7 @@ config PPC @@ -192,5 +192,5 @@ index 58b50967b3e5..ef02e41d5d96 100644 /* -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0246-arch-arm64-Add-lazy-preempt-support.patch b/debian/patches-rt/0246-arch-arm64-Add-lazy-preempt-support.patch index b3b8928f9..db2fcbf08 100644 --- a/debian/patches-rt/0246-arch-arm64-Add-lazy-preempt-support.patch +++ b/debian/patches-rt/0246-arch-arm64-Add-lazy-preempt-support.patch @@ -1,8 +1,8 @@ -From f72b8c6debbe5f7e9bd418213de3f00d3c0e7891 Mon Sep 17 00:00:00 2001 +From d54fea06e07469d71862f3fa8ff84d0a0a1d3a4d Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Thu, 14 May 2015 17:52:17 +0200 -Subject: [PATCH 246/290] arch/arm64: Add lazy preempt support -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 246/319] arch/arm64: Add lazy preempt support +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz arm64 is missing support for PREEMPT_RT. The main feature which is lacking is support for lazy preemption. The arch-specific entry code, @@ -129,5 +129,5 @@ index 5dcc942906db..4fec251fe147 100644 local_daif_restore(DAIF_PROCCTX_NOIRQ); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/debian/patches-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch index 4baf86d7f..7e2bf3f09 100644 --- a/debian/patches-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch +++ b/debian/patches-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch @@ -1,9 +1,9 @@ -From f4dc4bd0a39f63d832830cf52a03dc9fdc628234 Mon Sep 17 00:00:00 2001 +From b27b8bc87bed66cafef6d28f28e3d599331c109b Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 16 Oct 2016 05:11:54 +0200 -Subject: [PATCH 247/290] connector/cn_proc: Protect send_msg() with a local +Subject: [PATCH 247/319] connector/cn_proc: Protect send_msg() with a local lock on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931 |in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep @@ -70,5 +70,5 @@ index ad48fd52cb53..c5264b3ee0b0 100644 void proc_fork_connector(struct task_struct *task) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch index 4af43bf0a..42206560c 100644 --- a/debian/patches-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch +++ b/debian/patches-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch @@ -1,9 +1,9 @@ -From 299edbde24f04fa915d7017d674aeee802a4426b Mon Sep 17 00:00:00 2001 +From a49d401a1598ffda5dc7255df4cf12078d9d9563 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 31 Mar 2016 04:08:28 +0200 -Subject: [PATCH 248/290] drivers/block/zram: Replace bit spinlocks with +Subject: [PATCH 248/319] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz They're nondeterministic, and lead to ___might_sleep() splats in -rt. OTOH, they're a lot less wasteful than an rtmutex per page. @@ -100,5 +100,5 @@ index d1095dfdffa8..144e91061df8 100644 ktime_t ac_time; #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/debian/patches-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch index 571c57ee4..c1a70176d 100644 --- a/debian/patches-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch +++ b/debian/patches-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch @@ -1,9 +1,9 @@ -From 62ee8af11e220e642012e3364327cd4510578f07 Mon Sep 17 00:00:00 2001 +From f7439c3631a8a3418de84e5f8152c2365f97000e Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 20 Oct 2016 11:15:22 +0200 -Subject: [PATCH 249/290] drivers/zram: Don't disable preemption in +Subject: [PATCH 249/319] drivers/zram: Don't disable preemption in zcomp_stream_get/put() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz In v4.7, the driver switched to percpu compression streams, disabling preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We @@ -103,5 +103,5 @@ index d6cf9508b80d..71520199226a 100644 /* Should NEVER happen. Return bio error if it does. */ -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch b/debian/patches-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch index 502302f1f..c42775d69 100644 --- a/debian/patches-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch +++ b/debian/patches-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch @@ -1,9 +1,9 @@ -From e68090808bef14283e17ecb7a6549728ffd2240b Mon Sep 17 00:00:00 2001 +From 24583db925f3b6091bd76ebcc5ebdc612bee0ba1 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 23 Aug 2017 11:57:29 +0200 -Subject: [PATCH 250/290] drivers/zram: fix zcomp_stream_get() +Subject: [PATCH 250/319] drivers/zram: fix zcomp_stream_get() smp_processor_id() use in preemptible code -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Use get_local_ptr() instead this_cpu_ptr() to avoid a warning regarding smp_processor_id() in preemptible code. @@ -40,5 +40,5 @@ index dd65a27ae2cc..eece02262000 100644 int zcomp_compress(struct zcomp_strm *zstrm, -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch b/debian/patches-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch index 968dcff93..990965508 100644 --- a/debian/patches-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch +++ b/debian/patches-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch @@ -1,8 +1,8 @@ -From fb36406fa8141b155e8ff2b751a13f9607520c2d Mon Sep 17 00:00:00 2001 +From c1270c10c34f78939acfd298cefedaf1ac6497a0 Mon Sep 17 00:00:00 2001 From: Haris Okanovic Date: Tue, 15 Aug 2017 15:13:08 -0500 -Subject: [PATCH 251/290] tpm_tis: fix stall after iowrite*()s -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 251/319] tpm_tis: fix stall after iowrite*()s +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz ioread8() operations to TPM MMIO addresses can stall the cpu when immediately following a sequence of iowrite*()'s to the same region. @@ -80,5 +80,5 @@ index f08949a5f678..9fefcfcae593 100644 return 0; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch b/debian/patches-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch index bcfd991e4..b324707fa 100644 --- a/debian/patches-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch +++ b/debian/patches-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch @@ -1,8 +1,8 @@ -From 1a6947443058c0374db28d38e92671731ce32711 Mon Sep 17 00:00:00 2001 +From 9f23a68af6bf36d1a1f3bb1773e001b459b940c0 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Fri, 28 Sep 2018 21:03:51 +0000 -Subject: [PATCH 252/290] watchdog: prevent deferral of watchdogd wakeup on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 252/319] watchdog: prevent deferral of watchdogd wakeup on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz When PREEMPT_RT_FULL is enabled, all hrtimer expiry functions are deferred for execution into the context of ktimersoftd unless otherwise @@ -37,10 +37,10 @@ Signed-off-by: Sebastian Andrzej Siewior 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c -index 4b89333e8eb4..ed12734e1e8c 100644 +index e64aa88e99da..11ba2c3b187f 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c -@@ -146,7 +146,7 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd) +@@ -145,7 +145,7 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd) ktime_t t = watchdog_next_keepalive(wdd); if (t > 0) @@ -49,7 +49,7 @@ index 4b89333e8eb4..ed12734e1e8c 100644 } else { hrtimer_cancel(&wd_data->timer); } -@@ -165,7 +165,7 @@ static int __watchdog_ping(struct watchdog_device *wdd) +@@ -164,7 +164,7 @@ static int __watchdog_ping(struct watchdog_device *wdd) if (ktime_after(earliest_keepalive, now)) { hrtimer_start(&wd_data->timer, ktime_sub(earliest_keepalive, now), @@ -58,7 +58,7 @@ index 4b89333e8eb4..ed12734e1e8c 100644 return 0; } -@@ -948,7 +948,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) +@@ -947,7 +947,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) return -ENODEV; kthread_init_work(&wd_data->work, watchdog_ping_work); @@ -67,7 +67,7 @@ index 4b89333e8eb4..ed12734e1e8c 100644 wd_data->timer.function = watchdog_timer_expired; if (wdd->id == 0) { -@@ -1005,7 +1005,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) +@@ -1004,7 +1004,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) __module_get(wdd->ops->owner); get_device(&wd_data->dev); if (handle_boot_enabled) @@ -77,5 +77,5 @@ index 4b89333e8eb4..ed12734e1e8c 100644 pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", wdd->id); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch b/debian/patches-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch index 5c0257ead..d70edbd55 100644 --- a/debian/patches-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch +++ b/debian/patches-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch @@ -1,9 +1,9 @@ -From d8adf6f9943f681b148d93d21014e8e41067067e Mon Sep 17 00:00:00 2001 +From c3c6f6b7964e76c362ef62b4e7d49d5f8968fa16 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 27 Feb 2016 08:09:11 +0100 -Subject: [PATCH 253/290] drm,radeon,i915: Use preempt_disable/enable_rt() +Subject: [PATCH 253/319] drm,radeon,i915: Use preempt_disable/enable_rt() where recommended -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz DRM folks identified the spots, so use them. @@ -57,5 +57,5 @@ index d8e2d7b3b836..072b831aaf4f 100644 /* Decode into vertical and horizontal scanout position. */ *vpos = position & 0x1fff; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch b/debian/patches-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch index 3bae7408d..ccffa274d 100644 --- a/debian/patches-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch +++ b/debian/patches-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch @@ -1,9 +1,9 @@ -From f020bb0cd2272c8d908dfd4eba9afb593eff70ab Mon Sep 17 00:00:00 2001 +From 48dd47af9e1720307129aaa4b5890713aa5039c5 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 27 Feb 2016 09:01:42 +0100 -Subject: [PATCH 254/290] drm,i915: Use local_lock/unlock_irq() in +Subject: [PATCH 254/319] drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ 8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918 [ 8.014041] in_atomic(): 0, irqs_disabled(): 1, pid: 78, name: kworker/u4:4 @@ -124,5 +124,5 @@ index f7026e887fa9..07e4ddebdd80 100644 if (intel_vgpu_active(dev_priv)) return; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0255-drm-i915-disable-tracing-on-RT.patch b/debian/patches-rt/0255-drm-i915-disable-tracing-on-RT.patch index afbfd357c..e0e3f1a62 100644 --- a/debian/patches-rt/0255-drm-i915-disable-tracing-on-RT.patch +++ b/debian/patches-rt/0255-drm-i915-disable-tracing-on-RT.patch @@ -1,8 +1,8 @@ -From 9c63af996fae22f9a543ba63986342b6c6225302 Mon Sep 17 00:00:00 2001 +From 3a1aeaa3d5b520717819dfa703e2b84db9260481 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 6 Dec 2018 09:52:20 +0100 -Subject: [PATCH 255/290] drm/i915: disable tracing on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 255/319] drm/i915: disable tracing on -RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Luca Abeni reported this: | BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003 @@ -43,5 +43,5 @@ index b50c6b829715..cc54ec0ef75c 100644 #include #include -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/debian/patches-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch index bb1f4019d..c1131afa8 100644 --- a/debian/patches-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch +++ b/debian/patches-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch @@ -1,9 +1,9 @@ -From b34fadffa3608307de6514151f16cdd385c14ad8 Mon Sep 17 00:00:00 2001 +From 5ba7788159b2d2b475203bf064224daa907ea545 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 19 Dec 2018 10:47:02 +0100 -Subject: [PATCH 256/290] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with +Subject: [PATCH 256/319] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with NOTRACE -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The order of the header files is important. If this header file is included after tracepoint.h was included then the NOTRACE here becomes a @@ -29,5 +29,5 @@ index cc54ec0ef75c..33028d8f470e 100644 TP_PROTO(struct i915_request *rq), TP_ARGS(rq) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0257-cgroups-use-simple-wait-in-css_release.patch b/debian/patches-rt/0257-cgroups-use-simple-wait-in-css_release.patch index b9f117c7a..e74f2942c 100644 --- a/debian/patches-rt/0257-cgroups-use-simple-wait-in-css_release.patch +++ b/debian/patches-rt/0257-cgroups-use-simple-wait-in-css_release.patch @@ -1,8 +1,8 @@ -From b59ff060d2629b8e98f80af20df9e89d4aa31c5d Mon Sep 17 00:00:00 2001 +From f6bb44ce35eea6fa1f5c4734f2a308eb779fc5d5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 13 Feb 2015 15:52:24 +0100 -Subject: [PATCH 257/290] cgroups: use simple wait in css_release() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 257/319] cgroups: use simple wait in css_release() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz To avoid: |BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 @@ -55,10 +55,10 @@ index a01ebb630abc..34fb541e90be 100644 /* diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c -index 78ef274b036e..30dd5754e62e 100644 +index 08bd40d90066..aceac97f78f4 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c -@@ -4666,10 +4666,10 @@ static void css_free_rwork_fn(struct work_struct *work) +@@ -4667,10 +4667,10 @@ static void css_free_rwork_fn(struct work_struct *work) } } @@ -71,7 +71,7 @@ index 78ef274b036e..30dd5754e62e 100644 struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; -@@ -4731,8 +4731,8 @@ static void css_release(struct percpu_ref *ref) +@@ -4732,8 +4732,8 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); @@ -82,7 +82,7 @@ index 78ef274b036e..30dd5754e62e 100644 } static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5458,6 +5458,7 @@ static int __init cgroup_wq_init(void) +@@ -5459,6 +5459,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); @@ -91,5 +91,5 @@ index 78ef274b036e..30dd5754e62e 100644 } core_initcall(cgroup_wq_init); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch index aa72ca17e..b0564f3f3 100644 --- a/debian/patches-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch @@ -1,8 +1,8 @@ -From 81c16cc87fefa39b56e6c4bca4e83beddf6eac24 Mon Sep 17 00:00:00 2001 +From 48fb150a079be3445e14143191b40b20d3dc0789 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 8 Jan 2017 09:32:25 +0100 -Subject: [PATCH 258/290] cpuset: Convert callback_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 258/319] cpuset: Convert callback_lock to raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The two commits below add up to a cpuset might_sleep() splat for RT: @@ -289,5 +289,5 @@ index 7bb129c5b412..92575cb9b493 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch b/debian/patches-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch index 681069791..e13b721be 100644 --- a/debian/patches-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch +++ b/debian/patches-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch @@ -1,8 +1,8 @@ -From aa1de601999a7958fa83e1dae7b0819b6d5106a9 Mon Sep 17 00:00:00 2001 +From f3878cc087bc8c9cb09fe47ddbf28be5918b696b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 11 Oct 2017 17:43:49 +0200 -Subject: [PATCH 259/290] apparmor: use a locallock instead preempt_disable() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 259/319] apparmor: use a locallock instead preempt_disable() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz get_buffers() disables preemption which acts as a lock for the per-CPU variable. Since we can't disable preemption here on RT, a local_lock is @@ -68,7 +68,7 @@ index b6380c5f0097..12abfddb19c9 100644 #endif /* __AA_PATH_H */ diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c -index 8b8b70620bbe..8330ef57a784 100644 +index 730de4638b4e..edc911ff6a4d 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -45,7 +45,7 @@ @@ -81,5 +81,5 @@ index 8b8b70620bbe..8330ef57a784 100644 /* * LSM hook functions -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch b/debian/patches-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch index c6167b6ec..eb3876421 100644 --- a/debian/patches-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch +++ b/debian/patches-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch @@ -1,8 +1,8 @@ -From 6b39a01e228fa1dd4b6e4a63ee0948b2e925a4c4 Mon Sep 17 00:00:00 2001 +From 506dcbf0558561548a478d23395bfbfa1fa9aa62 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 27 Jun 2014 16:24:52 +0200 -Subject: [PATCH 260/290] workqueue: Prevent deadlock/stall on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 260/319] workqueue: Prevent deadlock/stall on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Austin reported a XFS deadlock/stall on RT where scheduled work gets never exececuted and tasks are waiting for each other for ever. @@ -203,5 +203,5 @@ index 045b82ca0eb5..714d0cfe4d56 100644 wake_up_process(worker->task); } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch index 3615668ae..6dd439368 100644 --- a/debian/patches-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ b/debian/patches-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -1,8 +1,8 @@ -From 3bbe691ca13a581aeb69342fe32522906eefba3c Mon Sep 17 00:00:00 2001 +From 2a7f4ae2436b078eef6ad6a6bd8b54bec6b6bc30 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:56 -0500 -Subject: [PATCH 261/290] signals: Allow rt tasks to cache one sigqueue struct -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 261/319] signals: Allow rt tasks to cache one sigqueue struct +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz To avoid allocation allow rt tasks to cache one sigqueue struct in task struct. @@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner 5 files changed, 70 insertions(+), 5 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h -index d2386fa9ed0f..3c213ec3d3b5 100644 +index a8ebd49c4f96..854a6cb456af 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -895,6 +895,8 @@ struct task_struct { @@ -30,7 +30,7 @@ index d2386fa9ed0f..3c213ec3d3b5 100644 sigset_t real_blocked; /* Restored if set_restore_sigmask() was used: */ diff --git a/include/linux/signal.h b/include/linux/signal.h -index e4d01469ed60..746dd5d28c54 100644 +index 0be5ce2375cb..6495fda18c2c 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -245,6 +245,7 @@ static inline void init_sigpending(struct sigpending *sig) @@ -55,10 +55,10 @@ index 54c3269b8dda..c66f21193cf1 100644 spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c -index 7399bff2e08d..9bcf8f5e77ae 100644 +index ecec0f8bef7e..234e0ca9a74b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1822,6 +1822,7 @@ static __latent_entropy struct task_struct *copy_process( +@@ -1832,6 +1832,7 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); @@ -67,7 +67,7 @@ index 7399bff2e08d..9bcf8f5e77ae 100644 p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME diff --git a/kernel/signal.c b/kernel/signal.c -index 3565221b4fac..c84f13dd8626 100644 +index d5e764bb2444..b3b037f63c8a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -19,6 +19,7 @@ @@ -78,7 +78,7 @@ index 3565221b4fac..c84f13dd8626 100644 #include #include #include -@@ -392,13 +393,30 @@ void task_join_group_stop(struct task_struct *task) +@@ -397,13 +398,30 @@ void task_join_group_stop(struct task_struct *task) } } @@ -110,7 +110,7 @@ index 3565221b4fac..c84f13dd8626 100644 { struct sigqueue *q = NULL; struct user_struct *user; -@@ -415,7 +433,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi +@@ -420,7 +438,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { @@ -122,7 +122,7 @@ index 3565221b4fac..c84f13dd8626 100644 } else { print_dropped_signal(sig); } -@@ -432,6 +453,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi +@@ -437,6 +458,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi return q; } @@ -136,7 +136,7 @@ index 3565221b4fac..c84f13dd8626 100644 static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) -@@ -441,6 +469,21 @@ static void __sigqueue_free(struct sigqueue *q) +@@ -446,6 +474,21 @@ static void __sigqueue_free(struct sigqueue *q) kmem_cache_free(sigqueue_cachep, q); } @@ -158,7 +158,7 @@ index 3565221b4fac..c84f13dd8626 100644 void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; -@@ -453,6 +496,21 @@ void flush_sigqueue(struct sigpending *queue) +@@ -458,6 +501,21 @@ void flush_sigqueue(struct sigpending *queue) } } @@ -180,7 +180,7 @@ index 3565221b4fac..c84f13dd8626 100644 /* * Flush all pending signals for this kthread. */ -@@ -576,7 +634,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, +@@ -581,7 +639,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, (info->si_code == SI_TIMER) && (info->si_sys_private); @@ -189,7 +189,7 @@ index 3565221b4fac..c84f13dd8626 100644 } else { /* * Ok, it wasn't in the queue. This must be -@@ -613,6 +671,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) +@@ -618,6 +676,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) bool resched_timer = false; int signr; @@ -198,7 +198,7 @@ index 3565221b4fac..c84f13dd8626 100644 /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ -@@ -1751,7 +1811,8 @@ EXPORT_SYMBOL(kill_pid); +@@ -1756,7 +1816,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { @@ -209,5 +209,5 @@ index 3565221b4fac..c84f13dd8626 100644 if (q) q->flags |= SIGQUEUE_PREALLOC; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0262-Add-localversion-for-RT-release.patch b/debian/patches-rt/0262-Add-localversion-for-RT-release.patch index a2fe662cb..d89a155ff 100644 --- a/debian/patches-rt/0262-Add-localversion-for-RT-release.patch +++ b/debian/patches-rt/0262-Add-localversion-for-RT-release.patch @@ -1,8 +1,8 @@ -From 1ce039a98b3970742c177d167d323f4a51fa51f0 Mon Sep 17 00:00:00 2001 +From 72f6593fc6e2e72b93484a7d2c03077aa6c32e4e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Jul 2011 20:25:16 +0200 -Subject: [PATCH 262/290] Add localversion for -RT release -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 262/319] Add localversion for -RT release +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Signed-off-by: Thomas Gleixner --- @@ -18,5 +18,5 @@ index 000000000000..1199ebade17b @@ -0,0 +1 @@ +-rt16 -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch b/debian/patches-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch index 0656823fd..02ecea9d8 100644 --- a/debian/patches-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch +++ b/debian/patches-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch @@ -1,9 +1,9 @@ -From 225db3efbd37d13d5f2f72e2371da7d6bd9a2405 Mon Sep 17 00:00:00 2001 +From 6a4420cb88ab99177938a2bc6e7f62e605812133 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 26 Mar 2019 18:31:54 +0100 -Subject: [PATCH 263/290] powerpc/pseries/iommu: Use a locallock instead +Subject: [PATCH 263/319] powerpc/pseries/iommu: Use a locallock instead local_irq_save() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The locallock protects the per-CPU variable tce_page. The function attempts to allocate memory while tce_page is protected (by disabling @@ -93,5 +93,5 @@ index 06f02960b439..d80d919c78d3 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0264-powerpc-reshuffle-TIF-bits.patch b/debian/patches-rt/0264-powerpc-reshuffle-TIF-bits.patch index 405db3a0d..765c7c7da 100644 --- a/debian/patches-rt/0264-powerpc-reshuffle-TIF-bits.patch +++ b/debian/patches-rt/0264-powerpc-reshuffle-TIF-bits.patch @@ -1,8 +1,8 @@ -From aef8aedf3ea12fb22a6f2b340a3f6bd4aacac4e0 Mon Sep 17 00:00:00 2001 +From 3bd9572a57b123ca2ba4349d8aed67d96e248e50 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 22 Mar 2019 17:15:58 +0100 -Subject: [PATCH 264/290] powerpc: reshuffle TIF bits -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 264/319] powerpc: reshuffle TIF bits +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Powerpc32/64 does not compile because TIF_SYSCALL_TRACE's bit is higher than 15 and the assembly instructions don't expect that. @@ -148,5 +148,5 @@ index ef02e41d5d96..229cdb04e9b0 100644 3: ldarx r10,0,r12 andc r10,r10,r11 -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch index 356db6cc9..15a724ed9 100644 --- a/debian/patches-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch +++ b/debian/patches-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch @@ -1,8 +1,8 @@ -From 80e49fd34b0d5b3dd5f122c074ce833b9d727ae8 Mon Sep 17 00:00:00 2001 +From 9958c5dc997969983c6d2cce116fc8628e3f802d Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Wed, 13 Mar 2019 11:40:34 +0000 -Subject: [PATCH 265/290] tty/sysrq: Convert show_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 265/319] tty/sysrq: Convert show_lock to raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Systems which don't provide arch_trigger_cpumask_backtrace() will invoke showacpu() from a smp_call_function() function which is invoked @@ -59,5 +59,5 @@ index 06ed20dd01ba..627517ad55bf 100644 static void sysrq_showregs_othercpus(struct work_struct *dummy) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch b/debian/patches-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch index d4f669a9b..cc44b40b5 100644 --- a/debian/patches-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch +++ b/debian/patches-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch @@ -1,9 +1,9 @@ -From 7bfefce1f534227fddc8cc519a813c04f7f6c1da Mon Sep 17 00:00:00 2001 +From ef267547d87ddb45e423131cd3ac2575f11bf9a9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 10 Apr 2019 11:01:37 +0200 -Subject: [PATCH 266/290] drm/i915: Don't disable interrupts independently of +Subject: [PATCH 266/319] drm/i915: Don't disable interrupts independently of the lock -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz The locks (timeline->lock and rq->lock) need to be taken with disabled interrupts. This is done in __retire_engine_request() by disabling the @@ -47,5 +47,5 @@ index 5c2c93cbab12..7124510b9131 100644 /* * The backing object for the context is done after switching to the -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch b/debian/patches-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch index d2a5e8895..9696f356b 100644 --- a/debian/patches-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch +++ b/debian/patches-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch @@ -1,9 +1,9 @@ -From d3bae006e096e47f22baad0a9cdf3f496f2bee6a Mon Sep 17 00:00:00 2001 +From 4f45596ee19784503b688216ac94613529dac495 Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Thu, 9 May 2019 14:33:20 -0500 -Subject: [PATCH 267/290] sched/completion: Fix a lockup in +Subject: [PATCH 267/319] sched/completion: Fix a lockup in wait_for_completion() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Consider following race: @@ -65,5 +65,5 @@ index 755a58084978..49c14137988e 100644 raw_spin_unlock_irq(&x->wait.lock); timeout = action(timeout); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0268-kthread-add-a-global-worker-thread.patch b/debian/patches-rt/0268-kthread-add-a-global-worker-thread.patch index 5bb594a36..61fc269ad 100644 --- a/debian/patches-rt/0268-kthread-add-a-global-worker-thread.patch +++ b/debian/patches-rt/0268-kthread-add-a-global-worker-thread.patch @@ -1,8 +1,8 @@ -From 9ec5aac95c498d3cd970b50a838ee738fe3861fc Mon Sep 17 00:00:00 2001 +From 466a14f58ab0b24e8700e7ac61660f180af4934c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 12 Feb 2019 15:09:38 +0100 -Subject: [PATCH 268/290] kthread: add a global worker thread. -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 268/319] kthread: add a global worker thread. +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 0532e87d9d44795221aa921ba7024bde689cc894 ] @@ -176,5 +176,5 @@ index 5641b55783a6..9db017761a1f 100644 /** * kthread_associate_blkcg - associate blkcg to current kthread -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch index 54773c817..b92c904e7 100644 --- a/debian/patches-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch +++ b/debian/patches-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch @@ -1,9 +1,9 @@ -From 0fcd7c5cee291d12eb90409cad4636cc17be0973 Mon Sep 17 00:00:00 2001 +From cf34fbf2667256851c65700b7fa4c21d15aaabe5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 21 Aug 2013 17:48:46 +0200 -Subject: [PATCH 269/290] genirq: Do not invoke the affinity callback via a +Subject: [PATCH 269/319] genirq: Do not invoke the affinity callback via a workqueue on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 2122adbe011cdc0eb62ad62494e181005b23c76a ] @@ -97,5 +97,5 @@ index ce86341a9e19..d5539e04e00a 100644 INIT_WORK(¬ify->work, irq_affinity_notify); #endif -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch b/debian/patches-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch index 043e5fbc5..1cd25b553 100644 --- a/debian/patches-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch +++ b/debian/patches-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch @@ -1,9 +1,9 @@ -From 5f103966da617ccbc58a5db958144f55725dd823 Mon Sep 17 00:00:00 2001 +From 8515c7b5a09be607644b2ebf10631a79b10d8a65 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 28 May 2019 10:42:15 +0200 -Subject: [PATCH 270/290] genirq: Handle missing work_struct in +Subject: [PATCH 270/319] genirq: Handle missing work_struct in irq_set_affinity_notifier() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit bbc4d2a7d6ff54ba923640d9a42c7bef7185fe98 ] @@ -38,5 +38,5 @@ index d5539e04e00a..290cd520dba1 100644 #endif kref_put(&old_notify->kref, old_notify->release); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch b/debian/patches-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch index 2984a46a2..4ecc413b3 100644 --- a/debian/patches-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch +++ b/debian/patches-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch @@ -1,8 +1,8 @@ -From 499b81170e694652eb759044a65b1d2de8a02540 Mon Sep 17 00:00:00 2001 +From 77b3e06a7e9564031e217fb4cb6fa0b0896a1654 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 14 May 2019 17:07:44 +0200 -Subject: [PATCH 271/290] arm: imx6: cpuidle: Use raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 271/319] arm: imx6: cpuidle: Use raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 40d0332ec8312e9c090f0a5414d9c90e12b13611 ] @@ -48,5 +48,5 @@ index 326e870d7123..d9ac80aa1eb0 100644 return index; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch b/debian/patches-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch index d9dcb7f9d..a5c4a7449 100644 --- a/debian/patches-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch +++ b/debian/patches-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch @@ -1,9 +1,9 @@ -From 8ffd7db75f55aa4a603b0a6a83eb05fe77ca543c Mon Sep 17 00:00:00 2001 +From 417fabbbbde4e5460a9bd8ae574cd34f989c3c27 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 24 Jun 2019 18:29:13 +0200 -Subject: [PATCH 272/290] rcu: Don't allow to change rcu_normal_after_boot on +Subject: [PATCH 272/319] rcu: Don't allow to change rcu_normal_after_boot on RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit c6c058c10577815a2491ce661876cff00a4c3b15 ] @@ -32,5 +32,5 @@ index 16d8dba23329..ed75addd3ccd 100644 #ifdef CONFIG_DEBUG_LOCK_ALLOC -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch b/debian/patches-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch index 7a2a1e3bf..b8c3424e0 100644 --- a/debian/patches-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch +++ b/debian/patches-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch @@ -1,8 +1,8 @@ -From 6c25ddefabb7f6bd4adaa2de6a0fdce865eb4c0c Mon Sep 17 00:00:00 2001 +From ab7b3b8cf695f51443b693840a6bee8c1ea43f1d Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Sat, 13 Apr 2019 11:22:51 +0800 -Subject: [PATCH 273/290] pci/switchtec: fix stream_open.cocci warnings -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 273/319] pci/switchtec: fix stream_open.cocci warnings +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 9462c69e29307adc95c289f50839d5d683973891 ] @@ -23,10 +23,10 @@ Signed-off-by: Steven Rostedt (VMware) 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c -index 77d4fb86d05b..ea70bc0b06e9 100644 +index f4c39feb5c04..821ac3ecca80 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c -@@ -360,7 +360,7 @@ static int switchtec_dev_open(struct inode *inode, struct file *filp) +@@ -356,7 +356,7 @@ static int switchtec_dev_open(struct inode *inode, struct file *filp) return PTR_ERR(stuser); filp->private_data = stuser; @@ -36,5 +36,5 @@ index 77d4fb86d05b..ea70bc0b06e9 100644 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch b/debian/patches-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch index 7dd3be7e2..a9b805bd7 100644 --- a/debian/patches-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch +++ b/debian/patches-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch @@ -1,8 +1,8 @@ -From e92a326589d8cf6722dac937b84c265aa10fdf1b Mon Sep 17 00:00:00 2001 +From bad6ac527d386a70c4b5a95d03a74e4c89ce53fc Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 24 Jun 2019 19:33:16 +0200 -Subject: [PATCH 274/290] sched/core: Drop a preempt_disable_rt() statement -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 274/319] sched/core: Drop a preempt_disable_rt() statement +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 761126efdcbe3fa3e99c9079fa0ad6eca2f251f2 ] @@ -46,5 +46,5 @@ index cfde725e1017..678c2c4de4f5 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch b/debian/patches-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch index 12a650abd..fd7f00ca3 100644 --- a/debian/patches-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch +++ b/debian/patches-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch @@ -1,9 +1,9 @@ -From 07d9aaa5917187845b59efdf55fa02d70f35cad2 Mon Sep 17 00:00:00 2001 +From 48208e1517299d3fbf42d883134397f72b714027 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 24 Jun 2019 19:39:06 +0200 -Subject: [PATCH 275/290] timers: Redo the notification of canceling timers on +Subject: [PATCH 275/319] timers: Redo the notification of canceling timers on -RT -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit c71273154c2ad12e13333aada340ff30e826a11b ] @@ -136,10 +136,10 @@ index 0571b498db73..3e6c91bdf2ef 100644 /* diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index 1d1f077cffb3..61ab2c923579 100644 +index a465564367ec..dcf0204264f1 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c -@@ -436,7 +436,7 @@ int alarm_cancel(struct alarm *alarm) +@@ -438,7 +438,7 @@ int alarm_cancel(struct alarm *alarm) int ret = alarm_try_to_cancel(alarm); if (ret >= 0) return ret; @@ -646,5 +646,5 @@ index 2fcd56aa6092..1c67aab34ff5 100644 } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch b/debian/patches-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch index 73ee4e6d6..cbde4c6f6 100644 --- a/debian/patches-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch +++ b/debian/patches-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch @@ -1,9 +1,9 @@ -From 9aa540150db0e795f552585505278527b6011665 Mon Sep 17 00:00:00 2001 +From ae24f173efdb746dc26c4870eef5b32f280f864e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 17:44:13 +0200 -Subject: [PATCH 276/290] Revert "futex: Ensure lock/unlock symetry versus +Subject: [PATCH 276/319] Revert "futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock" -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 6a773b70cf105b46298ed3b44e77c102ce31d9ec ] @@ -31,5 +31,5 @@ index 4c448dddce3c..38f53b95e370 100644 continue; } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch b/debian/patches-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch index 4c57cc9c4..e783916a7 100644 --- a/debian/patches-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch +++ b/debian/patches-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch @@ -1,9 +1,9 @@ -From 6d3fa5ea1a0500d5a53e8698f6e426785017db89 Mon Sep 17 00:00:00 2001 +From 01842de36028d1d6d0ce577688095649be41f4dc Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 17:44:18 +0200 -Subject: [PATCH 277/290] Revert "futex: Fix bug on when a requeued RT task +Subject: [PATCH 277/319] Revert "futex: Fix bug on when a requeued RT task times out" -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit f1a170cb3289a48df26cae3c60d77608f7a988bb ] @@ -79,5 +79,5 @@ index 546aaf058b9e..a501f3b47081 100644 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch b/debian/patches-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch index a738c407c..064c4012b 100644 --- a/debian/patches-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch +++ b/debian/patches-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch @@ -1,9 +1,9 @@ -From 6c64748ffc7a2dbc641b4364267f7218bd29c9c5 Mon Sep 17 00:00:00 2001 +From 6bd085d87346c9263a8ef219dc3ee0311f33a710 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 17:44:21 +0200 -Subject: [PATCH 278/290] Revert "rtmutex: Handle the various new futex race +Subject: [PATCH 278/319] Revert "rtmutex: Handle the various new futex race conditions" -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 9e0265c21af4d6388d47dcd5ce20f76ec3a2e468 ] @@ -254,5 +254,5 @@ index a501f3b47081..758dc43872e5 100644 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch b/debian/patches-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch index 8fd0baea4..643b2659a 100644 --- a/debian/patches-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch +++ b/debian/patches-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch @@ -1,9 +1,9 @@ -From 5aac8105ead1e975578ef5085e3e1be3721ab3f5 Mon Sep 17 00:00:00 2001 +From 1d7f8dfb51963c7dcf985b8c08da5cc52029b8fc Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 17:44:27 +0200 -Subject: [PATCH 279/290] Revert "futex: workaround migrate_disable/enable in +Subject: [PATCH 279/319] Revert "futex: workaround migrate_disable/enable in different context" -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit a71221d81cc4873891ae44f3aa02df596079b786 ] @@ -66,5 +66,5 @@ index 6ee55df4f3de..f636dcc706ec 100644 /* -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch b/debian/patches-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch index 760fe96a9..e484d877c 100644 --- a/debian/patches-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch +++ b/debian/patches-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch @@ -1,8 +1,8 @@ -From 61b70039f1df8a7dca7776bf3f15ffb4f07e2fb4 Mon Sep 17 00:00:00 2001 +From ea4d2e52e3eb65ec1cd26e6fc01f5ddfeb4ea2ab Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 26 Jun 2019 11:59:44 +0200 -Subject: [PATCH 280/290] futex: Make the futex_hash_bucket lock raw -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 280/319] futex: Make the futex_hash_bucket lock raw +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit f646521aadedab78801c9befe193e2e8a0c99298 ] @@ -345,5 +345,5 @@ index f636dcc706ec..a9d9283605e5 100644 return 0; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0281-futex-Delay-deallocation-of-pi_state.patch b/debian/patches-rt/0281-futex-Delay-deallocation-of-pi_state.patch index 8e8afb4a5..b7c6bd939 100644 --- a/debian/patches-rt/0281-futex-Delay-deallocation-of-pi_state.patch +++ b/debian/patches-rt/0281-futex-Delay-deallocation-of-pi_state.patch @@ -1,8 +1,8 @@ -From ebba085e584e2dafd1d06328a5d4f3b3f3fc0269 Mon Sep 17 00:00:00 2001 +From da369f30fa974bf526f11f2fe15d3599eee108a9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 26 Jun 2019 13:35:36 +0200 -Subject: [PATCH 281/290] futex: Delay deallocation of pi_state -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 281/319] futex: Delay deallocation of pi_state +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit d7c7cf8cb68b7df17e6e50be1f25f35d83e686c7 ] @@ -179,5 +179,5 @@ index a9d9283605e5..0b8cff8d9162 100644 } else { struct rt_mutex *pi_mutex; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch b/debian/patches-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch index 8c83dad30..92b39889d 100644 --- a/debian/patches-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch +++ b/debian/patches-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch @@ -1,9 +1,9 @@ -From b68bca0d18af8b42c058485dfde37d4acfbabcc5 Mon Sep 17 00:00:00 2001 +From f87bee2082697ae481818a08252986e3612e30aa Mon Sep 17 00:00:00 2001 From: "Luis Claudio R. Goncalves" Date: Tue, 25 Jun 2019 11:28:04 -0300 -Subject: [PATCH 282/290] mm/zswap: Do not disable preemption in +Subject: [PATCH 282/319] mm/zswap: Do not disable preemption in zswap_frontswap_store() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 4e4cf4be79635e67144632d9135286381acbc95a ] @@ -123,5 +123,5 @@ index cd91fd9d96b8..420225d3ff0b 100644 freepage: zswap_entry_cache_free(entry); -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0283-revert-aio.patch b/debian/patches-rt/0283-revert-aio.patch index ce26536cd..4c5303835 100644 --- a/debian/patches-rt/0283-revert-aio.patch +++ b/debian/patches-rt/0283-revert-aio.patch @@ -1,8 +1,8 @@ -From a127a24d65e5ec18ec4af24df6695fbaccf925ca Mon Sep 17 00:00:00 2001 +From c0017598befb0c2a932866db38f3e73429ddc141 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 20 Sep 2019 17:50:53 -0400 -Subject: [PATCH 283/290] revert-aio -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 283/319] revert-aio +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz revert: fs/aio: simple simple work @@ -12,7 +12,7 @@ Signed-off-by: Steven Rostedt (VMware) 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/fs/aio.c b/fs/aio.c -index 16dcf8521c2c..911e23087dfb 100644 +index 93f8cf7fdeab..b5fbf2061868 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -42,7 +42,6 @@ @@ -67,5 +67,5 @@ index 16dcf8521c2c..911e23087dfb 100644 { unsigned i, new_nr; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0284-fs-aio-simple-simple-work.patch b/debian/patches-rt/0284-fs-aio-simple-simple-work.patch index 4057624ef..b93d80532 100644 --- a/debian/patches-rt/0284-fs-aio-simple-simple-work.patch +++ b/debian/patches-rt/0284-fs-aio-simple-simple-work.patch @@ -1,8 +1,8 @@ -From fb686d1f8844133f15f617e0effa288bcbef4a18 Mon Sep 17 00:00:00 2001 +From aa2189aadb7a124991d1667f986da7fcec17caaa Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 16 Feb 2015 18:49:10 +0100 -Subject: [PATCH 284/290] fs/aio: simple simple work -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 284/319] fs/aio: simple simple work +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 1a142116f6435ef070ecebb66d2d599507c10601 ] @@ -33,7 +33,7 @@ Signed-off-by: Steven Rostedt (VMware) 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/fs/aio.c b/fs/aio.c -index 911e23087dfb..0c613d805bf1 100644 +index b5fbf2061868..a92119e05869 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -121,6 +121,7 @@ struct kioctx { @@ -72,5 +72,5 @@ index 911e23087dfb..0c613d805bf1 100644 { unsigned i, new_nr; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0285-revert-thermal.patch b/debian/patches-rt/0285-revert-thermal.patch index 3aeac5a89..774bfaa39 100644 --- a/debian/patches-rt/0285-revert-thermal.patch +++ b/debian/patches-rt/0285-revert-thermal.patch @@ -1,8 +1,8 @@ -From 466a66edbba9ee1f7405d51c674dd3dc11bbdb13 Mon Sep 17 00:00:00 2001 +From bfc122d2b025c85b6ab82e81c0ae29a24074dd73 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 20 Sep 2019 17:50:53 -0400 -Subject: [PATCH 285/290] revert-thermal -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 285/319] revert-thermal +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Revert: thermal: Defer thermal wakups to threads @@ -116,5 +116,5 @@ index a5991cbb408f..1ef937d799e4 100644 module_exit(pkg_temp_thermal_exit) -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch index cfe562a53..b090e6dfb 100644 --- a/debian/patches-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch +++ b/debian/patches-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch @@ -1,8 +1,8 @@ -From d68ea3ebd9e76e3a78a5034c6af647c0c948b6a8 Mon Sep 17 00:00:00 2001 +From 19f0f635eecfedc7102da094cef1cd6361d01e2b Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Tue, 17 Feb 2015 09:37:44 +0100 -Subject: [PATCH 286/290] thermal: Defer thermal wakups to threads -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 286/319] thermal: Defer thermal wakups to threads +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit ad2408dc248fe58536eef5b2b5734d8f9d3a280b ] @@ -94,5 +94,5 @@ index 1ef937d799e4..82f21fd4afb0 100644 kfree(packages); } -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0287-revert-block.patch b/debian/patches-rt/0287-revert-block.patch index c824bc710..634918460 100644 --- a/debian/patches-rt/0287-revert-block.patch +++ b/debian/patches-rt/0287-revert-block.patch @@ -1,8 +1,8 @@ -From 5d4341f0df610e341ef6248b72a4d24ea53b7694 Mon Sep 17 00:00:00 2001 +From b3070acdd10cdfc4be8230473c0c8894910d20f6 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 20 Sep 2019 17:50:54 -0400 -Subject: [PATCH 287/290] revert-block -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 287/319] revert-block +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz Revert swork version of: block: blk-mq: move blk_queue_usage_counter_release() into process context @@ -59,7 +59,7 @@ index 13bf37156bb0..4860cd26cd5a 100644 sizeof(struct request), 0, SLAB_PANIC, NULL); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 7b7c0bc6a514..f1960add94df 100644 +index 2f3b2e5196eb..111ab4209797 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -27,7 +27,6 @@ @@ -79,5 +79,5 @@ index 7b7c0bc6a514..f1960add94df 100644 struct list_head all_q_node; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/debian/patches-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch index 4de9279ef..6667c47fa 100644 --- a/debian/patches-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ b/debian/patches-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch @@ -1,9 +1,9 @@ -From 7de506cb172bfbca14767ae02f0309d70d3c7dcb Mon Sep 17 00:00:00 2001 +From 5e98a6d50f9efe451e93a38a87a852e1a8f11744 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 13 Mar 2018 13:49:16 +0100 -Subject: [PATCH 288/290] block: blk-mq: move blk_queue_usage_counter_release() +Subject: [PATCH 288/319] block: blk-mq: move blk_queue_usage_counter_release() into process context -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit 61c928ecf4fe200bda9b49a0813b5ba0f43995b5 ] @@ -90,7 +90,7 @@ index 4860cd26cd5a..7d709465876e 100644 /* * Init percpu_ref in atomic mode so that it's faster to shutdown. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index f1960add94df..15a489abfb62 100644 +index 111ab4209797..981103415cd8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -13,6 +13,7 @@ @@ -110,5 +110,5 @@ index f1960add94df..15a489abfb62 100644 struct list_head all_q_node; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0289-workqueue-rework.patch b/debian/patches-rt/0289-workqueue-rework.patch index aa415b029..15f815f97 100644 --- a/debian/patches-rt/0289-workqueue-rework.patch +++ b/debian/patches-rt/0289-workqueue-rework.patch @@ -1,8 +1,8 @@ -From b5e4a5ce5478ed6616eec2fed6c13da28cdbb61c Mon Sep 17 00:00:00 2001 +From c633a466375b6a537f79cffe84eac9271bc5b020 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 29 May 2019 18:52:27 +0200 -Subject: [PATCH 289/290] workqueue: rework -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz +Subject: [PATCH 289/319] workqueue: rework +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz [ Upstream commit d15a862f24df983458533aebd6fa207ecdd1095a ] @@ -155,7 +155,7 @@ index 82f21fd4afb0..1ef937d799e4 100644 kfree(packages); } diff --git a/fs/aio.c b/fs/aio.c -index 0c613d805bf1..c74dd321f5b7 100644 +index a92119e05869..37e75bb0c406 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -121,7 +121,7 @@ struct kioctx { @@ -204,7 +204,7 @@ index 0473efda4c65..da587e60fe86 100644 #include #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 15a489abfb62..45653e23e4cd 100644 +index 981103415cd8..6a0bfa0a2c52 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -656,7 +656,7 @@ struct request_queue { @@ -1565,5 +1565,5 @@ index 714d0cfe4d56..75f59299dd06 100644 attrs->no_numa = true; ordered_wq_attrs[i] = attrs; -- -2.24.1 +2.25.0 diff --git a/debian/patches-rt/0290-Linux-4.19.94-rt38-REBASE.patch b/debian/patches-rt/0290-Linux-4.19.94-rt38-REBASE.patch deleted file mode 100644 index 17ae447fc..000000000 --- a/debian/patches-rt/0290-Linux-4.19.94-rt38-REBASE.patch +++ /dev/null @@ -1,20 +0,0 @@ -From dee368fe90e9506b8a4c9948fb611fb77dda153f Mon Sep 17 00:00:00 2001 -From: Steven Rostedt -Date: Mon, 14 Oct 2019 13:24:30 -0400 -Subject: [PATCH 290/290] Linux 4.19.94-rt38 REBASE -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.94-rt38.tar.xz - ---- - localversion-rt | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/localversion-rt b/localversion-rt -index 1199ebade17b..49bae8d6aa67 100644 ---- a/localversion-rt -+++ b/localversion-rt -@@ -1 +1 @@ ---rt16 -+-rt38 --- -2.24.1 - diff --git a/debian/patches-rt/0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch b/debian/patches-rt/0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch new file mode 100644 index 000000000..4b0406d75 --- /dev/null +++ b/debian/patches-rt/0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch @@ -0,0 +1,48 @@ +From b275478a0e595eda55ed29d8c1ad9aad9d2a010a Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Tue, 13 Aug 2019 12:30:12 +0200 +Subject: [PATCH 290/319] i2c: exynos5: Remove IRQF_ONESHOT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 4b217df0ab3f7910c96e42091cc7d9f221d05f01 ] + +The drivers sets IRQF_ONESHOT and passes only a primary handler. The IRQ +is masked while the primary is handler is invoked independently of +IRQF_ONESHOT. +With IRQF_ONESHOT the core code will not force-thread the interrupt and +this is probably not intended. I *assume* that the original author copied +the IRQ registration from another driver which passed a primary and +secondary handler and removed the secondary handler but keeping the +ONESHOT flag. + +Remove IRQF_ONESHOT. + +Reported-by: Benjamin Rouxel +Tested-by: Benjamin Rouxel +Cc: Kukjin Kim +Cc: Krzysztof Kozlowski +Cc: linux-samsung-soc@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + drivers/i2c/busses/i2c-exynos5.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c +index c1ce2299a76e..5c57ecf4b79e 100644 +--- a/drivers/i2c/busses/i2c-exynos5.c ++++ b/drivers/i2c/busses/i2c-exynos5.c +@@ -800,9 +800,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev) + } + + ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq, +- IRQF_NO_SUSPEND | IRQF_ONESHOT, +- dev_name(&pdev->dev), i2c); +- ++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c); + if (ret != 0) { + dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq); + goto err_clk; +-- +2.25.0 + diff --git a/debian/patches-rt/0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch b/debian/patches-rt/0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch new file mode 100644 index 000000000..f93044db6 --- /dev/null +++ b/debian/patches-rt/0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch @@ -0,0 +1,42 @@ +From 6ac99b9a074232546f3ca080c4b44b1331ef4613 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Tue, 13 Aug 2019 12:30:37 +0200 +Subject: [PATCH 291/319] i2c: hix5hd2: Remove IRQF_ONESHOT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit e88b481f3f86f11e3243e0808a830e5ca5782a9d ] + +The drivers sets IRQF_ONESHOT and passes only a primary handler. The IRQ +is masked while the primary is handler is invoked independently of +IRQF_ONESHOT. +With IRQF_ONESHOT the core code will not force-thread the interrupt and +this is probably not intended. I *assume* that the original author copied +the IRQ registration from another driver which passed a primary and +secondary handler and removed the secondary handler but keeping the +ONESHOT flag. + +Remove IRQF_ONESHOT. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + drivers/i2c/busses/i2c-hix5hd2.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c +index 061a4bfb03f4..575aff50b19a 100644 +--- a/drivers/i2c/busses/i2c-hix5hd2.c ++++ b/drivers/i2c/busses/i2c-hix5hd2.c +@@ -449,8 +449,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev) + hix5hd2_i2c_init(priv); + + ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq, +- IRQF_NO_SUSPEND | IRQF_ONESHOT, +- dev_name(&pdev->dev), priv); ++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv); + if (ret != 0) { + dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq); + goto err_clk; +-- +2.25.0 + diff --git a/debian/patches-rt/0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch b/debian/patches-rt/0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch new file mode 100644 index 000000000..16df9ccd9 --- /dev/null +++ b/debian/patches-rt/0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch @@ -0,0 +1,50 @@ +From c3919e76704a2f2c0bf2e0bddbed37a8627e25b6 Mon Sep 17 00:00:00 2001 +From: Juri Lelli +Date: Wed, 31 Jul 2019 12:37:15 +0200 +Subject: [PATCH 292/319] sched/deadline: Ensure inactive_timer runs in hardirq + context +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit ba94e7aed7405c58251b1380e6e7d73aa8284b41 ] + +SCHED_DEADLINE inactive timer needs to run in hardirq context (as +dl_task_timer already does) on PREEMPT_RT + +Change the mode to HRTIMER_MODE_REL_HARD. + +[ tglx: Fixed up the start site, so mode debugging works ] + +Signed-off-by: Juri Lelli +Signed-off-by: Thomas Gleixner +Link: https://lkml.kernel.org/r/20190731103715.4047-1-juri.lelli@redhat.com +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/sched/deadline.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 974a8f9b615a..929167a1d991 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -287,7 +287,7 @@ static void task_non_contending(struct task_struct *p) + + dl_se->dl_non_contending = 1; + get_task_struct(p); +- hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL); ++ hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); + } + + static void task_contending(struct sched_dl_entity *dl_se, int flags) +@@ -1325,7 +1325,7 @@ void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) + { + struct hrtimer *timer = &dl_se->inactive_timer; + +- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + timer->function = inactive_task_timer; + } + +-- +2.25.0 + diff --git a/debian/patches-rt/0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch b/debian/patches-rt/0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch new file mode 100644 index 000000000..1b08ecd73 --- /dev/null +++ b/debian/patches-rt/0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch @@ -0,0 +1,120 @@ +From 949a5710e9cef5b8071c8ec81b7e02efe3216e25 Mon Sep 17 00:00:00 2001 +From: Clark Williams +Date: Mon, 15 Jul 2019 15:25:00 -0500 +Subject: [PATCH 293/319] thermal/x86_pkg_temp: make pkg_temp_lock a raw + spinlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 8b03bb3ea7861b70b506199a69b1c8f81fe2d4d0 ] + +The spinlock pkg_temp_lock has the potential of being taken in atomic +context on v5.2-rt PREEMPT_RT. It's static and limited scope so +go ahead and make it a raw spinlock. + +Signed-off-by: Clark Williams +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + drivers/thermal/x86_pkg_temp_thermal.c | 24 ++++++++++++------------ + 1 file changed, 12 insertions(+), 12 deletions(-) + +diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c +index 1ef937d799e4..540becb78a0f 100644 +--- a/drivers/thermal/x86_pkg_temp_thermal.c ++++ b/drivers/thermal/x86_pkg_temp_thermal.c +@@ -75,7 +75,7 @@ static int max_packages __read_mostly; + /* Array of package pointers */ + static struct pkg_device **packages; + /* Serializes interrupt notification, work and hotplug */ +-static DEFINE_SPINLOCK(pkg_temp_lock); ++static DEFINE_RAW_SPINLOCK(pkg_temp_lock); + /* Protects zone operation in the work function against hotplug removal */ + static DEFINE_MUTEX(thermal_zone_mutex); + +@@ -291,12 +291,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) + u64 msr_val, wr_val; + + mutex_lock(&thermal_zone_mutex); +- spin_lock_irq(&pkg_temp_lock); ++ raw_spin_lock_irq(&pkg_temp_lock); + ++pkg_work_cnt; + + pkgdev = pkg_temp_thermal_get_dev(cpu); + if (!pkgdev) { +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + mutex_unlock(&thermal_zone_mutex); + return; + } +@@ -310,7 +310,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) + } + + enable_pkg_thres_interrupt(); +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + + /* + * If tzone is not NULL, then thermal_zone_mutex will prevent the +@@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val) + struct pkg_device *pkgdev; + unsigned long flags; + +- spin_lock_irqsave(&pkg_temp_lock, flags); ++ raw_spin_lock_irqsave(&pkg_temp_lock, flags); + ++pkg_interrupt_cnt; + + disable_pkg_thres_interrupt(); +@@ -347,7 +347,7 @@ static int pkg_thermal_notify(u64 msr_val) + pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work); + } + +- spin_unlock_irqrestore(&pkg_temp_lock, flags); ++ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); + return 0; + } + +@@ -393,9 +393,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) + pkgdev->msr_pkg_therm_high); + + cpumask_set_cpu(cpu, &pkgdev->cpumask); +- spin_lock_irq(&pkg_temp_lock); ++ raw_spin_lock_irq(&pkg_temp_lock); + packages[pkgid] = pkgdev; +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + return 0; + } + +@@ -432,7 +432,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) + } + + /* Protect against work and interrupts */ +- spin_lock_irq(&pkg_temp_lock); ++ raw_spin_lock_irq(&pkg_temp_lock); + + /* + * Check whether this cpu was the current target and store the new +@@ -464,9 +464,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) + * To cancel the work we need to drop the lock, otherwise + * we might deadlock if the work needs to be flushed. + */ +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + cancel_delayed_work_sync(&pkgdev->work); +- spin_lock_irq(&pkg_temp_lock); ++ raw_spin_lock_irq(&pkg_temp_lock); + /* + * If this is not the last cpu in the package and the work + * did not run after we dropped the lock above, then we +@@ -477,7 +477,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) + pkg_thermal_schedule_work(target, &pkgdev->work); + } + +- spin_unlock_irq(&pkg_temp_lock); ++ raw_spin_unlock_irq(&pkg_temp_lock); + + /* Final cleanup if this is the last cpu */ + if (lastcpu) +-- +2.25.0 + diff --git a/debian/patches-rt/0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch b/debian/patches-rt/0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch new file mode 100644 index 000000000..154c6a950 --- /dev/null +++ b/debian/patches-rt/0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch @@ -0,0 +1,296 @@ +From ba6019c487e8d5455335d67b65afd82ca50a4589 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 14 Aug 2019 16:38:43 +0200 +Subject: [PATCH 294/319] dma-buf: Use seqlock_t instread disabling preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 240610aa31094f51f299f06eb8dae8d4cd8d4500 ] + +"dma reservation" disables preemption while acquiring the write access +for "seqcount" and then may acquire a spinlock_t. + +Replace the seqcount with a seqlock_t which provides seqcount like +semantic and lock for writer. + +Link: https://lkml.kernel.org/r/f410b429-db86-f81c-7c67-f563fa808b62@free.fr +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + drivers/dma-buf/dma-buf.c | 8 ++-- + drivers/dma-buf/reservation.c | 43 +++++++------------ + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 +-- + drivers/gpu/drm/i915/i915_gem.c | 10 ++--- + include/linux/reservation.h | 4 +- + 5 files changed, 29 insertions(+), 42 deletions(-) + +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c +index 69842145c223..4c3ef46e7149 100644 +--- a/drivers/dma-buf/dma-buf.c ++++ b/drivers/dma-buf/dma-buf.c +@@ -179,7 +179,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) + return 0; + + retry: +- seq = read_seqcount_begin(&resv->seq); ++ seq = read_seqbegin(&resv->seq); + rcu_read_lock(); + + fobj = rcu_dereference(resv->fence); +@@ -188,7 +188,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) + else + shared_count = 0; + fence_excl = rcu_dereference(resv->fence_excl); +- if (read_seqcount_retry(&resv->seq, seq)) { ++ if (read_seqretry(&resv->seq, seq)) { + rcu_read_unlock(); + goto retry; + } +@@ -1046,12 +1046,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) + + robj = buf_obj->resv; + while (true) { +- seq = read_seqcount_begin(&robj->seq); ++ seq = read_seqbegin(&robj->seq); + rcu_read_lock(); + fobj = rcu_dereference(robj->fence); + shared_count = fobj ? fobj->shared_count : 0; + fence = rcu_dereference(robj->fence_excl); +- if (!read_seqcount_retry(&robj->seq, seq)) ++ if (!read_seqretry(&robj->seq, seq)) + break; + rcu_read_unlock(); + } +diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c +index 49ab09468ba1..f11d58492216 100644 +--- a/drivers/dma-buf/reservation.c ++++ b/drivers/dma-buf/reservation.c +@@ -109,8 +109,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, + + dma_fence_get(fence); + +- preempt_disable(); +- write_seqcount_begin(&obj->seq); ++ write_seqlock(&obj->seq); + + for (i = 0; i < fobj->shared_count; ++i) { + struct dma_fence *old_fence; +@@ -121,8 +120,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, + if (old_fence->context == fence->context) { + /* memory barrier is added by write_seqcount_begin */ + RCU_INIT_POINTER(fobj->shared[i], fence); +- write_seqcount_end(&obj->seq); +- preempt_enable(); ++ write_sequnlock(&obj->seq); + + dma_fence_put(old_fence); + return; +@@ -146,8 +144,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, + fobj->shared_count++; + } + +- write_seqcount_end(&obj->seq); +- preempt_enable(); ++ write_sequnlock(&obj->seq); + + dma_fence_put(signaled); + } +@@ -191,15 +188,13 @@ reservation_object_add_shared_replace(struct reservation_object *obj, + fobj->shared_count++; + + done: +- preempt_disable(); +- write_seqcount_begin(&obj->seq); ++ write_seqlock(&obj->seq); + /* + * RCU_INIT_POINTER can be used here, + * seqcount provides the necessary barriers + */ + RCU_INIT_POINTER(obj->fence, fobj); +- write_seqcount_end(&obj->seq); +- preempt_enable(); ++ write_sequnlock(&obj->seq); + + if (!old) + return; +@@ -259,14 +254,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, + if (fence) + dma_fence_get(fence); + +- preempt_disable(); +- write_seqcount_begin(&obj->seq); +- /* write_seqcount_begin provides the necessary memory barrier */ ++ write_seqlock(&obj->seq); + RCU_INIT_POINTER(obj->fence_excl, fence); + if (old) + old->shared_count = 0; +- write_seqcount_end(&obj->seq); +- preempt_enable(); ++ write_sequnlock(&obj->seq); + + /* inplace update, no shared fences */ + while (i--) +@@ -349,13 +341,10 @@ int reservation_object_copy_fences(struct reservation_object *dst, + src_list = reservation_object_get_list(dst); + old = reservation_object_get_excl(dst); + +- preempt_disable(); +- write_seqcount_begin(&dst->seq); +- /* write_seqcount_begin provides the necessary memory barrier */ ++ write_seqlock(&dst->seq); + RCU_INIT_POINTER(dst->fence_excl, new); + RCU_INIT_POINTER(dst->fence, dst_list); +- write_seqcount_end(&dst->seq); +- preempt_enable(); ++ write_sequnlock(&dst->seq); + + if (src_list) + kfree_rcu(src_list, rcu); +@@ -396,7 +385,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, + shared_count = i = 0; + + rcu_read_lock(); +- seq = read_seqcount_begin(&obj->seq); ++ seq = read_seqbegin(&obj->seq); + + fence_excl = rcu_dereference(obj->fence_excl); + if (fence_excl && !dma_fence_get_rcu(fence_excl)) +@@ -445,7 +434,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, + } + } + +- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { ++ if (i != shared_count || read_seqretry(&obj->seq, seq)) { + while (i--) + dma_fence_put(shared[i]); + dma_fence_put(fence_excl); +@@ -494,7 +483,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + + retry: + shared_count = 0; +- seq = read_seqcount_begin(&obj->seq); ++ seq = read_seqbegin(&obj->seq); + rcu_read_lock(); + i = -1; + +@@ -541,7 +530,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + + rcu_read_unlock(); + if (fence) { +- if (read_seqcount_retry(&obj->seq, seq)) { ++ if (read_seqretry(&obj->seq, seq)) { + dma_fence_put(fence); + goto retry; + } +@@ -597,7 +586,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + retry: + ret = true; + shared_count = 0; +- seq = read_seqcount_begin(&obj->seq); ++ seq = read_seqbegin(&obj->seq); + + if (test_all) { + unsigned i; +@@ -618,7 +607,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + break; + } + +- if (read_seqcount_retry(&obj->seq, seq)) ++ if (read_seqretry(&obj->seq, seq)) + goto retry; + } + +@@ -631,7 +620,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + if (ret < 0) + goto retry; + +- if (read_seqcount_retry(&obj->seq, seq)) ++ if (read_seqretry(&obj->seq, seq)) + goto retry; + } + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +index f92597c292fe..10c675850aac 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +@@ -261,11 +261,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, + } + + /* Install the new fence list, seqcount provides the barriers */ +- preempt_disable(); +- write_seqcount_begin(&resv->seq); ++ write_seqlock(&resv->seq); + RCU_INIT_POINTER(resv->fence, new); +- write_seqcount_end(&resv->seq); +- preempt_enable(); ++ write_sequnlock(&resv->seq); + + /* Drop the references to the removed fences or move them to ef_list */ + for (i = j, k = 0; i < old->shared_count; ++i) { +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index c7d05ac7af3c..d484e79316bf 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -516,7 +516,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, + long timeout, + struct intel_rps_client *rps_client) + { +- unsigned int seq = __read_seqcount_begin(&resv->seq); ++ unsigned int seq = read_seqbegin(&resv->seq); + struct dma_fence *excl; + bool prune_fences = false; + +@@ -569,9 +569,9 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, + * signaled and that the reservation object has not been changed (i.e. + * no new fences have been added). + */ +- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { ++ if (prune_fences && !read_seqretry(&resv->seq, seq)) { + if (reservation_object_trylock(resv)) { +- if (!__read_seqcount_retry(&resv->seq, seq)) ++ if (!read_seqretry(&resv->seq, seq)) + reservation_object_add_excl_fence(resv, NULL); + reservation_object_unlock(resv); + } +@@ -4615,7 +4615,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, + * + */ + retry: +- seq = raw_read_seqcount(&obj->resv->seq); ++ seq = read_seqbegin(&obj->resv->seq); + + /* Translate the exclusive fence to the READ *and* WRITE engine */ + args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); +@@ -4633,7 +4633,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, + } + } + +- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) ++ if (args->busy && read_seqretry(&obj->resv->seq, seq)) + goto retry; + + err = 0; +diff --git a/include/linux/reservation.h b/include/linux/reservation.h +index 02166e815afb..0b31df1af698 100644 +--- a/include/linux/reservation.h ++++ b/include/linux/reservation.h +@@ -72,7 +72,7 @@ struct reservation_object_list { + */ + struct reservation_object { + struct ww_mutex lock; +- seqcount_t seq; ++ seqlock_t seq; + + struct dma_fence __rcu *fence_excl; + struct reservation_object_list __rcu *fence; +@@ -92,7 +92,7 @@ reservation_object_init(struct reservation_object *obj) + { + ww_mutex_init(&obj->lock, &reservation_ww_class); + +- __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); ++ seqlock_init(&obj->seq); + RCU_INIT_POINTER(obj->fence, NULL); + RCU_INIT_POINTER(obj->fence_excl, NULL); + obj->staged = NULL; +-- +2.25.0 + diff --git a/debian/patches-rt/0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch b/debian/patches-rt/0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch new file mode 100644 index 000000000..be120d9af --- /dev/null +++ b/debian/patches-rt/0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch @@ -0,0 +1,54 @@ +From ddc93ca5505342645b197c99b2a040d3be8b348b Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 13 Aug 2019 14:29:41 +0200 +Subject: [PATCH 295/319] KVM: arm/arm64: Let the timer expire in hardirq + context on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 719cc080c914045a6e35787bf4dc3ba91cfd3efb ] + +The timers are canceled from an preempt-notifier which is invoked with +disabled preemption which is not allowed on PREEMPT_RT. +The timer callback is short so in could be invoked in hard-IRQ context +on -RT. + +Let the timer expire on hard-IRQ context even on -RT. + +Signed-off-by: Thomas Gleixner +Acked-by: Marc Zyngier +Tested-by: Julien Grall +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + virt/kvm/arm/arch_timer.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c +index 17cecc96f735..217d39f40393 100644 +--- a/virt/kvm/arm/arch_timer.c ++++ b/virt/kvm/arm/arch_timer.c +@@ -67,7 +67,7 @@ static inline bool userspace_irqchip(struct kvm *kvm) + static void soft_timer_start(struct hrtimer *hrt, u64 ns) + { + hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), +- HRTIMER_MODE_ABS); ++ HRTIMER_MODE_ABS_HARD); + } + + static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) +@@ -638,10 +638,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) + vcpu_ptimer(vcpu)->cntvoff = 0; + + INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); +- hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); + timer->bg_timer.function = kvm_bg_timer_expire; + +- hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); + timer->phys_timer.function = kvm_phys_timer_expire; + + vtimer->irq.irq = default_vtimer_irq.irq; +-- +2.25.0 + diff --git a/debian/patches-rt/0296-x86-preempt-Check-preemption-level-before-looking-at.patch b/debian/patches-rt/0296-x86-preempt-Check-preemption-level-before-looking-at.patch new file mode 100644 index 000000000..78287dd38 --- /dev/null +++ b/debian/patches-rt/0296-x86-preempt-Check-preemption-level-before-looking-at.patch @@ -0,0 +1,34 @@ +From 19a2e03459ead700d7fd53c47677511bbd27d48f Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 14 Aug 2019 17:08:58 +0200 +Subject: [PATCH 296/319] x86: preempt: Check preemption level before looking + at lazy-preempt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 19fc8557f2323c52b26561651ed4d51fc688a740 ] + +Before evaluating the lazy-preempt state it must be ensure that the +preempt-count is zero. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + arch/x86/include/asm/preempt.h | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h +index f66708779274..afa0e42ccdd1 100644 +--- a/arch/x86/include/asm/preempt.h ++++ b/arch/x86/include/asm/preempt.h +@@ -96,6 +96,8 @@ static __always_inline bool __preempt_count_dec_and_test(void) + if (____preempt_count_dec_and_test()) + return true; + #ifdef CONFIG_PREEMPT_LAZY ++ if (preempt_count()) ++ return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +-- +2.25.0 + diff --git a/debian/patches-rt/0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch b/debian/patches-rt/0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch new file mode 100644 index 000000000..96307ec39 --- /dev/null +++ b/debian/patches-rt/0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch @@ -0,0 +1,42 @@ +From 442b496afde51592914e85fc0d0d00970944a460 Mon Sep 17 00:00:00 2001 +From: Julien Grall +Date: Wed, 21 Aug 2019 10:24:07 +0100 +Subject: [PATCH 297/319] hrtimer: Use READ_ONCE to access timer->base in + hrimer_grab_expiry_lock() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 2c8fdbe7ef0ad06c1a326886c5954e117b5657d6 ] + +The update to timer->base is protected by the base->cpu_base->lock(). +However, hrtimer_grab_expirty_lock() does not access it with the lock. + +So it would theorically be possible to have timer->base changed under +our feet. We need to prevent the compiler to refetch timer->base so the +check and the access is performed on the same base. + +Other access of timer->base are either done with a lock or protected +with READ_ONCE(). So use READ_ONCE() in hrtimer_grab_expirty_lock(). + +Signed-off-by: Julien Grall +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/time/hrtimer.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 94d97eae0a46..d6026c170c2d 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -941,7 +941,7 @@ EXPORT_SYMBOL_GPL(hrtimer_forward); + + void hrtimer_grab_expiry_lock(const struct hrtimer *timer) + { +- struct hrtimer_clock_base *base = timer->base; ++ struct hrtimer_clock_base *base = READ_ONCE(timer->base); + + if (base && base->cpu_base) { + spin_lock(&base->cpu_base->softirq_expiry_lock); +-- +2.25.0 + diff --git a/debian/patches-rt/0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch b/debian/patches-rt/0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch new file mode 100644 index 000000000..aa4961313 --- /dev/null +++ b/debian/patches-rt/0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch @@ -0,0 +1,41 @@ +From ac0fd05d6de78b52ea2a788a75c20f59c23325bd Mon Sep 17 00:00:00 2001 +From: Julien Grall +Date: Wed, 21 Aug 2019 10:24:08 +0100 +Subject: [PATCH 298/319] hrtimer: Don't grab the expiry lock for non-soft + hrtimer +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit fd420354bea2f57c11f3de191dffdeea76531e76 ] + +Acquiring the lock in hrtimer_grab_expiry_lock() is designed for +sleeping-locks and should not be used with disabled interrupts. +hrtimer_cancel() may invoke hrtimer_grab_expiry_lock() also for locks +which expire in hard-IRQ context. + +Let hrtimer_cancel() invoke hrtimer_grab_expiry_lock() only for locks +which expire in softirq context. + +Signed-off-by: Julien Grall +Signed-off-by: Steven Rostedt (VMware) +[bigeasy: rewrite changelog] +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/time/hrtimer.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index d6026c170c2d..49d20fe8570f 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -943,7 +943,7 @@ void hrtimer_grab_expiry_lock(const struct hrtimer *timer) + { + struct hrtimer_clock_base *base = READ_ONCE(timer->base); + +- if (base && base->cpu_base) { ++ if (timer->is_soft && base && base->cpu_base) { + spin_lock(&base->cpu_base->softirq_expiry_lock); + spin_unlock(&base->cpu_base->softirq_expiry_lock); + } +-- +2.25.0 + diff --git a/debian/patches-rt/0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch b/debian/patches-rt/0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch new file mode 100644 index 000000000..aba50764c --- /dev/null +++ b/debian/patches-rt/0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch @@ -0,0 +1,43 @@ +From 80ffe312fe52287d041ec16ba9d4906f39a5ad72 Mon Sep 17 00:00:00 2001 +From: Julien Grall +Date: Wed, 21 Aug 2019 10:24:09 +0100 +Subject: [PATCH 299/319] hrtimer: Prevent using hrtimer_grab_expiry_lock() on + migration_base +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit cef1b87f98823af923a386f3f69149acb212d4a1 ] + +As tglx puts it: +|If base == migration_base then there is no point to lock soft_expiry_lock +|simply because the timer is not executing the callback in soft irq context +|and the whole lock/unlock dance can be avoided. + +Furthermore, all the path leading to hrtimer_grab_expiry_lock() assumes +timer->base and timer->base->cpu_base are always non-NULL. So it is safe +to remove the NULL checks here. + +Signed-off-by: Julien Grall +Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1908211557420.2223@nanos.tec.linutronix.de +Signed-off-by: Steven Rostedt (VMware) +[bigeasy: rewrite changelog] +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/time/hrtimer.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 49d20fe8570f..1a5167c68310 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -943,7 +943,7 @@ void hrtimer_grab_expiry_lock(const struct hrtimer *timer) + { + struct hrtimer_clock_base *base = READ_ONCE(timer->base); + +- if (timer->is_soft && base && base->cpu_base) { ++ if (timer->is_soft && base != &migration_base) { + spin_lock(&base->cpu_base->softirq_expiry_lock); + spin_unlock(&base->cpu_base->softirq_expiry_lock); + } +-- +2.25.0 + diff --git a/debian/patches-rt/0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch b/debian/patches-rt/0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch new file mode 100644 index 000000000..43a063c0c --- /dev/null +++ b/debian/patches-rt/0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch @@ -0,0 +1,75 @@ +From 0ab2c6f2dc6d8c42c0a68d5f63725b68b68dbeb7 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 4 Sep 2019 16:55:27 +0200 +Subject: [PATCH 300/319] hrtimer: Add a missing bracket and hide + `migration_base' on !SMP +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 47b6de0b7f22c28b40275aeede3993d807668c3b ] + +[ Upstream commit 5d2295f3a93b04986d069ebeaf5b07725f9096c1 ] + +The recent change to avoid taking the expiry lock when a timer is currently +migrated missed to add a bracket at the end of the if statement leading to +compile errors. Since that commit the variable `migration_base' is always +used but it is only available on SMP configuration thus leading to another +compile error. The changelog says "The timer base and base->cpu_base +cannot be NULL in the code path", so it is safe to limit this check to SMP +configurations only. + +Add the missing bracket to the if statement and hide `migration_base' +behind CONFIG_SMP bars. + +[ tglx: Mark the functions inline ... ] + +Fixes: 68b2c8c1e4210 ("hrtimer: Don't take expiry_lock when timer is currently migrated") +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Thomas Gleixner +Link: https://lkml.kernel.org/r/20190904145527.eah7z56ntwobqm6j@linutronix.de +Signed-off-by: Steven Rostedt (VMware) +[bigeasy: port back to RT] +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/time/hrtimer.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 1a5167c68310..e54a95de8b79 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -150,6 +150,11 @@ static struct hrtimer_cpu_base migration_cpu_base = { + + #define migration_base migration_cpu_base.clock_base[0] + ++static inline bool is_migration_base(struct hrtimer_clock_base *base) ++{ ++ return base == &migration_base; ++} ++ + /* + * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock + * means that all timers which are tied to this base via timer->base are +@@ -274,6 +279,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, + + #else /* CONFIG_SMP */ + ++static inline bool is_migration_base(struct hrtimer_clock_base *base) ++{ ++ return false; ++} ++ + static inline struct hrtimer_clock_base * + lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) + { +@@ -943,7 +953,7 @@ void hrtimer_grab_expiry_lock(const struct hrtimer *timer) + { + struct hrtimer_clock_base *base = READ_ONCE(timer->base); + +- if (timer->is_soft && base != &migration_base) { ++ if (timer->is_soft && is_migration_base(base)) { + spin_lock(&base->cpu_base->softirq_expiry_lock); + spin_unlock(&base->cpu_base->softirq_expiry_lock); + } +-- +2.25.0 + diff --git a/debian/patches-rt/0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch b/debian/patches-rt/0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch new file mode 100644 index 000000000..d33efe79b --- /dev/null +++ b/debian/patches-rt/0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch @@ -0,0 +1,41 @@ +From b8e6a62c4429fb11e7e9332bfdbe914fba024216 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 16 Sep 2019 12:33:59 +0200 +Subject: [PATCH 301/319] posix-timers: Unlock expiry lock in the early return +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 356a2781375ec58521a9bc3f500488745990c242 ] + +Patch ("posix-timers: Add expiry lock") acquired a lock in +run_posix_cpu_timers() but didn't drop the lock in the early return. + +Unlock the lock in the early return path. + +Reported-by: kbuild test robot +Reported-by: Dan Carpenter +Reviewed-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/time/posix-cpu-timers.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index 765e700962ab..c9964dc3276b 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -1175,8 +1175,10 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) + expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock); + spin_lock(expiry_lock); + +- if (!lock_task_sighand(tsk, &flags)) ++ if (!lock_task_sighand(tsk, &flags)) { ++ spin_unlock(expiry_lock); + return; ++ } + /* + * Here we take off tsk->signal->cpu_timers[N] and + * tsk->cpu_timers[N] all the timers that are firing, and +-- +2.25.0 + diff --git a/debian/patches-rt/0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch b/debian/patches-rt/0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch new file mode 100644 index 000000000..77bc5bd11 --- /dev/null +++ b/debian/patches-rt/0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch @@ -0,0 +1,58 @@ +From 8f12a178508fbd42268f68a093bb590cac046816 Mon Sep 17 00:00:00 2001 +From: Scott Wood +Date: Tue, 24 Sep 2019 14:36:41 +0200 +Subject: [PATCH 302/319] =?UTF-8?q?sched:=20migrate=5Fdis/enable:=20Use=20?= + =?UTF-8?q?sleeping=5Flock=E2=80=A6()=20to=20annotate=20sleeping=20points?= +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 4230dd3824c3e1785504e6f757ce79a4b55651fa ] + +Without this, rcu_note_context_switch() will complain if an RCU read lock +is held when migrate_enable() calls stop_one_cpu(). Likewise when +migrate_disable() calls pin_current_cpu() which calls __read_rt_lock() -- +which bypasses the part of the mutex code that calls sleeping_lock_inc(). + +Signed-off-by: Scott Wood +Signed-off-by: Steven Rostedt (VMware) +[bigeasy: use sleeping_lock_…() ] +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/cpu.c | 2 ++ + kernel/sched/core.c | 3 +++ + 2 files changed, 5 insertions(+) + +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 00dac381da2e..7170fbd35a22 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -307,7 +307,9 @@ void pin_current_cpu(void) + preempt_lazy_enable(); + preempt_enable(); + ++ sleeping_lock_inc(); + __read_rt_lock(cpuhp_pin); ++ sleeping_lock_dec(); + + preempt_disable(); + preempt_lazy_disable(); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index a1c0123e7636..3413b9ebef1f 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7372,7 +7372,10 @@ void migrate_enable(void) + unpin_current_cpu(); + preempt_lazy_enable(); + preempt_enable(); ++ ++ sleeping_lock_inc(); + stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); ++ sleeping_lock_dec(); + tlb_migrate_finish(p->mm); + + return; +-- +2.25.0 + diff --git a/debian/patches-rt/0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch b/debian/patches-rt/0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch new file mode 100644 index 000000000..199aaebe1 --- /dev/null +++ b/debian/patches-rt/0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch @@ -0,0 +1,39 @@ +From b69f6ca67cc7f82f3da3d4a9885488058a12c1ed Mon Sep 17 00:00:00 2001 +From: Scott Wood +Date: Sat, 27 Jul 2019 00:56:32 -0500 +Subject: [PATCH 303/319] sched: __set_cpus_allowed_ptr: Check cpus_mask, not + cpus_ptr +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit e5606fb7b042db634ed62b4dd733d62e050e468f ] + +This function is concerned with the long-term cpu mask, not the +transitory mask the task might have while migrate disabled. Before +this patch, if a task was migrate disabled at the time +__set_cpus_allowed_ptr() was called, and the new mask happened to be +equal to the cpu that the task was running on, then the mask update +would be lost. + +Signed-off-by: Scott Wood +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/sched/core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 3413b9ebef1f..d6bd8129a390 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1157,7 +1157,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, + goto out; + } + +- if (cpumask_equal(p->cpus_ptr, new_mask)) ++ if (cpumask_equal(&p->cpus_mask, new_mask)) + goto out; + + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); +-- +2.25.0 + diff --git a/debian/patches-rt/0304-sched-Remove-dead-__migrate_disabled-check.patch b/debian/patches-rt/0304-sched-Remove-dead-__migrate_disabled-check.patch new file mode 100644 index 000000000..a921fb04f --- /dev/null +++ b/debian/patches-rt/0304-sched-Remove-dead-__migrate_disabled-check.patch @@ -0,0 +1,39 @@ +From 9db4f95e41cb7ff1db6ba93a503ea922ba3aaedf Mon Sep 17 00:00:00 2001 +From: Scott Wood +Date: Sat, 27 Jul 2019 00:56:33 -0500 +Subject: [PATCH 304/319] sched: Remove dead __migrate_disabled() check +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 14d9272d534ea91262e15db99443fc5995c7c016 ] + +This code was unreachable given the __migrate_disabled() branch +to "out" immediately beforehand. + +Signed-off-by: Scott Wood +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/sched/core.c | 7 ------- + 1 file changed, 7 deletions(-) + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index d6bd8129a390..a29f33e776d0 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1182,13 +1182,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) + goto out; + +-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) +- if (__migrate_disabled(p)) { +- p->migrate_disable_update = 1; +- goto out; +- } +-#endif +- + if (task_running(rq, p) || p->state == TASK_WAKING) { + struct migration_arg arg = { p, dest_cpu }; + /* Need help from migration thread: drop lock and wait. */ +-- +2.25.0 + diff --git a/debian/patches-rt/0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch b/debian/patches-rt/0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch new file mode 100644 index 000000000..928158ffc --- /dev/null +++ b/debian/patches-rt/0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch @@ -0,0 +1,47 @@ +From da6f82ad4f67dec908c062d597063a072883c771 Mon Sep 17 00:00:00 2001 +From: Scott Wood +Date: Sat, 27 Jul 2019 00:56:34 -0500 +Subject: [PATCH 305/319] sched: migrate disable: Protect cpus_ptr with lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 27ee52a891ed2c7e2e2c8332ccae0de7c2674b09 ] + +Various places assume that cpus_ptr is protected by rq/pi locks, +so don't change it before grabbing those locks. + +Signed-off-by: Scott Wood +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/sched/core.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index a29f33e776d0..d9a3f88508ee 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7250,9 +7250,8 @@ migrate_disable_update_cpus_allowed(struct task_struct *p) + struct rq *rq; + struct rq_flags rf; + +- p->cpus_ptr = cpumask_of(smp_processor_id()); +- + rq = task_rq_lock(p, &rf); ++ p->cpus_ptr = cpumask_of(smp_processor_id()); + update_nr_migratory(p, -1); + p->nr_cpus_allowed = 1; + task_rq_unlock(rq, p, &rf); +@@ -7264,9 +7263,8 @@ migrate_enable_update_cpus_allowed(struct task_struct *p) + struct rq *rq; + struct rq_flags rf; + +- p->cpus_ptr = &p->cpus_mask; +- + rq = task_rq_lock(p, &rf); ++ p->cpus_ptr = &p->cpus_mask; + p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); + update_nr_migratory(p, 1); + task_rq_unlock(rq, p, &rf); +-- +2.25.0 + diff --git a/debian/patches-rt/0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch b/debian/patches-rt/0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch new file mode 100644 index 000000000..4d608ac37 --- /dev/null +++ b/debian/patches-rt/0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch @@ -0,0 +1,45 @@ +From f8fd574326b570c47250ec0cd6f908d23574a5d1 Mon Sep 17 00:00:00 2001 +From: Waiman Long +Date: Thu, 3 Oct 2019 16:36:08 -0400 +Subject: [PATCH 306/319] lib/smp_processor_id: Don't use cpumask_equal() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 659252061477862f45b79e1de169e6030f5c8918 ] + +The check_preemption_disabled() function uses cpumask_equal() to see +if the task is bounded to the current CPU only. cpumask_equal() calls +memcmp() to do the comparison. As x86 doesn't have __HAVE_ARCH_MEMCMP, +the slow memcmp() function in lib/string.c is used. + +On a RT kernel that call check_preemption_disabled() very frequently, +below is the perf-record output of a certain microbenchmark: + + 42.75% 2.45% testpmd [kernel.kallsyms] [k] check_preemption_disabled + 40.01% 39.97% testpmd [kernel.kallsyms] [k] memcmp + +We should avoid calling memcmp() in performance critical path. So the +cpumask_equal() call is now replaced with an equivalent simpler check. + +Signed-off-by: Waiman Long +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + lib/smp_processor_id.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c +index fb35c45b9421..b8a8a8db2d75 100644 +--- a/lib/smp_processor_id.c ++++ b/lib/smp_processor_id.c +@@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1, + * Kernel threads bound to a single CPU can safely use + * smp_processor_id(): + */ +- if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu))) ++ if (current->nr_cpus_allowed == 1) + goto out; + + /* +-- +2.25.0 + diff --git a/debian/patches-rt/0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch b/debian/patches-rt/0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch new file mode 100644 index 000000000..a3c183471 --- /dev/null +++ b/debian/patches-rt/0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch @@ -0,0 +1,738 @@ +From 3112f6410209b71e4cdbe79c78d6477658fd885a Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 7 Oct 2019 16:45:18 +0200 +Subject: [PATCH 307/319] futex: Make the futex_hash_bucket spinlock_t again + and bring back its old state +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 954ad80c23edfe71f4e8ce70b961eac884320c3a ] + +This is an all-in-one patch that reverts the patches: + futex: Make the futex_hash_bucket lock raw + futex: Delay deallocation of pi_state + +and adds back the old patches we had: + futex: workaround migrate_disable/enable in different context + rtmutex: Handle the various new futex race conditions + futex: Fix bug on when a requeued RT task times out + futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/futex.c | 230 ++++++++++++++++++-------------- + kernel/locking/rtmutex.c | 65 ++++++++- + kernel/locking/rtmutex_common.h | 3 + + 3 files changed, 194 insertions(+), 104 deletions(-) + +diff --git a/kernel/futex.c b/kernel/futex.c +index 0b8cff8d9162..e815cf542b82 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -243,7 +243,7 @@ struct futex_q { + struct plist_node list; + + struct task_struct *task; +- raw_spinlock_t *lock_ptr; ++ spinlock_t *lock_ptr; + union futex_key key; + struct futex_pi_state *pi_state; + struct rt_mutex_waiter *rt_waiter; +@@ -264,7 +264,7 @@ static const struct futex_q futex_q_init = { + */ + struct futex_hash_bucket { + atomic_t waiters; +- raw_spinlock_t lock; ++ spinlock_t lock; + struct plist_head chain; + } ____cacheline_aligned_in_smp; + +@@ -825,13 +825,13 @@ static void get_pi_state(struct futex_pi_state *pi_state) + * Drops a reference to the pi_state object and frees or caches it + * when the last reference is gone. + */ +-static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state) ++static void put_pi_state(struct futex_pi_state *pi_state) + { + if (!pi_state) +- return NULL; ++ return; + + if (!atomic_dec_and_test(&pi_state->refcount)) +- return NULL; ++ return; + + /* + * If pi_state->owner is NULL, the owner is most probably dying +@@ -851,7 +851,9 @@ static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state) + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + } + +- if (!current->pi_state_cache) { ++ if (current->pi_state_cache) { ++ kfree(pi_state); ++ } else { + /* + * pi_state->list is already empty. + * clear pi_state->owner. +@@ -860,30 +862,6 @@ static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state) + pi_state->owner = NULL; + atomic_set(&pi_state->refcount, 1); + current->pi_state_cache = pi_state; +- pi_state = NULL; +- } +- return pi_state; +-} +- +-static void put_pi_state(struct futex_pi_state *pi_state) +-{ +- kfree(__put_pi_state(pi_state)); +-} +- +-static void put_pi_state_atomic(struct futex_pi_state *pi_state, +- struct list_head *to_free) +-{ +- if (__put_pi_state(pi_state)) +- list_add(&pi_state->list, to_free); +-} +- +-static void free_pi_state_list(struct list_head *to_free) +-{ +- struct futex_pi_state *p, *next; +- +- list_for_each_entry_safe(p, next, to_free, list) { +- list_del(&p->list); +- kfree(p); + } + } + +@@ -900,7 +878,6 @@ void exit_pi_state_list(struct task_struct *curr) + struct futex_pi_state *pi_state; + struct futex_hash_bucket *hb; + union futex_key key = FUTEX_KEY_INIT; +- LIST_HEAD(to_free); + + if (!futex_cmpxchg_enabled) + return; +@@ -934,7 +911,7 @@ void exit_pi_state_list(struct task_struct *curr) + } + raw_spin_unlock_irq(&curr->pi_lock); + +- raw_spin_lock(&hb->lock); ++ spin_lock(&hb->lock); + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + raw_spin_lock(&curr->pi_lock); + /* +@@ -944,8 +921,10 @@ void exit_pi_state_list(struct task_struct *curr) + if (head->next != next) { + /* retain curr->pi_lock for the loop invariant */ + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); +- raw_spin_unlock(&hb->lock); +- put_pi_state_atomic(pi_state, &to_free); ++ raw_spin_unlock_irq(&curr->pi_lock); ++ spin_unlock(&hb->lock); ++ raw_spin_lock_irq(&curr->pi_lock); ++ put_pi_state(pi_state); + continue; + } + +@@ -956,7 +935,7 @@ void exit_pi_state_list(struct task_struct *curr) + + raw_spin_unlock(&curr->pi_lock); + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); +- raw_spin_unlock(&hb->lock); ++ spin_unlock(&hb->lock); + + rt_mutex_futex_unlock(&pi_state->pi_mutex); + put_pi_state(pi_state); +@@ -964,8 +943,6 @@ void exit_pi_state_list(struct task_struct *curr) + raw_spin_lock_irq(&curr->pi_lock); + } + raw_spin_unlock_irq(&curr->pi_lock); +- +- free_pi_state_list(&to_free); + } + + #endif +@@ -1452,7 +1429,7 @@ static void __unqueue_futex(struct futex_q *q) + { + struct futex_hash_bucket *hb; + +- if (WARN_ON_SMP(!q->lock_ptr || !raw_spin_is_locked(q->lock_ptr)) ++ if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) + || WARN_ON(plist_node_empty(&q->list))) + return; + +@@ -1580,21 +1557,21 @@ static inline void + double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) + { + if (hb1 <= hb2) { +- raw_spin_lock(&hb1->lock); ++ spin_lock(&hb1->lock); + if (hb1 < hb2) +- raw_spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); ++ spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); + } else { /* hb1 > hb2 */ +- raw_spin_lock(&hb2->lock); +- raw_spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); ++ spin_lock(&hb2->lock); ++ spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); + } + } + + static inline void + double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) + { +- raw_spin_unlock(&hb1->lock); ++ spin_unlock(&hb1->lock); + if (hb1 != hb2) +- raw_spin_unlock(&hb2->lock); ++ spin_unlock(&hb2->lock); + } + + /* +@@ -1622,7 +1599,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) + if (!hb_waiters_pending(hb)) + goto out_put_key; + +- raw_spin_lock(&hb->lock); ++ spin_lock(&hb->lock); + + plist_for_each_entry_safe(this, next, &hb->chain, list) { + if (match_futex (&this->key, &key)) { +@@ -1641,7 +1618,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) + } + } + +- raw_spin_unlock(&hb->lock); ++ spin_unlock(&hb->lock); + wake_up_q(&wake_q); + out_put_key: + put_futex_key(&key); +@@ -1948,7 +1925,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + struct futex_hash_bucket *hb1, *hb2; + struct futex_q *this, *next; + DEFINE_WAKE_Q(wake_q); +- LIST_HEAD(to_free); + + if (nr_wake < 0 || nr_requeue < 0) + return -EINVAL; +@@ -2176,6 +2152,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + requeue_pi_wake_futex(this, &key2, hb2); + drop_count++; + continue; ++ } else if (ret == -EAGAIN) { ++ /* ++ * Waiter was woken by timeout or ++ * signal and has set pi_blocked_on to ++ * PI_WAKEUP_INPROGRESS before we ++ * tried to enqueue it on the rtmutex. ++ */ ++ this->pi_state = NULL; ++ put_pi_state(pi_state); ++ continue; + } else if (ret) { + /* + * rt_mutex_start_proxy_lock() detected a +@@ -2186,7 +2172,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + * object. + */ + this->pi_state = NULL; +- put_pi_state_atomic(pi_state, &to_free); ++ put_pi_state(pi_state); + /* + * We stop queueing more waiters and let user + * space deal with the mess. +@@ -2203,7 +2189,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We + * need to drop it here again. + */ +- put_pi_state_atomic(pi_state, &to_free); ++ put_pi_state(pi_state); + + out_unlock: + double_unlock_hb(hb1, hb2); +@@ -2224,7 +2210,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + out_put_key1: + put_futex_key(&key1); + out: +- free_pi_state_list(&to_free); + return ret ? ret : task_count; + } + +@@ -2248,7 +2233,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) + + q->lock_ptr = &hb->lock; + +- raw_spin_lock(&hb->lock); /* implies smp_mb(); (A) */ ++ spin_lock(&hb->lock); /* implies smp_mb(); (A) */ + return hb; + } + +@@ -2256,7 +2241,7 @@ static inline void + queue_unlock(struct futex_hash_bucket *hb) + __releases(&hb->lock) + { +- raw_spin_unlock(&hb->lock); ++ spin_unlock(&hb->lock); + hb_waiters_dec(hb); + } + +@@ -2295,7 +2280,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) + __releases(&hb->lock) + { + __queue_me(q, hb); +- raw_spin_unlock(&hb->lock); ++ spin_unlock(&hb->lock); + } + + /** +@@ -2311,41 +2296,41 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) + */ + static int unqueue_me(struct futex_q *q) + { +- raw_spinlock_t *lock_ptr; ++ spinlock_t *lock_ptr; + int ret = 0; + + /* In the common case we don't take the spinlock, which is nice. */ + retry: + /* +- * q->lock_ptr can change between this read and the following +- * raw_spin_lock. Use READ_ONCE to forbid the compiler from reloading +- * q->lock_ptr and optimizing lock_ptr out of the logic below. ++ * q->lock_ptr can change between this read and the following spin_lock. ++ * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and ++ * optimizing lock_ptr out of the logic below. + */ + lock_ptr = READ_ONCE(q->lock_ptr); + if (lock_ptr != NULL) { +- raw_spin_lock(lock_ptr); ++ spin_lock(lock_ptr); + /* + * q->lock_ptr can change between reading it and +- * raw_spin_lock(), causing us to take the wrong lock. This ++ * spin_lock(), causing us to take the wrong lock. This + * corrects the race condition. + * + * Reasoning goes like this: if we have the wrong lock, + * q->lock_ptr must have changed (maybe several times) +- * between reading it and the raw_spin_lock(). It can +- * change again after the raw_spin_lock() but only if it was +- * already changed before the raw_spin_lock(). It cannot, ++ * between reading it and the spin_lock(). It can ++ * change again after the spin_lock() but only if it was ++ * already changed before the spin_lock(). It cannot, + * however, change back to the original value. Therefore + * we can detect whether we acquired the correct lock. + */ + if (unlikely(lock_ptr != q->lock_ptr)) { +- raw_spin_unlock(lock_ptr); ++ spin_unlock(lock_ptr); + goto retry; + } + __unqueue_futex(q); + + BUG_ON(q->pi_state); + +- raw_spin_unlock(lock_ptr); ++ spin_unlock(lock_ptr); + ret = 1; + } + +@@ -2361,16 +2346,13 @@ static int unqueue_me(struct futex_q *q) + static void unqueue_me_pi(struct futex_q *q) + __releases(q->lock_ptr) + { +- struct futex_pi_state *ps; +- + __unqueue_futex(q); + + BUG_ON(!q->pi_state); +- ps = __put_pi_state(q->pi_state); ++ put_pi_state(q->pi_state); + q->pi_state = NULL; + +- raw_spin_unlock(q->lock_ptr); +- kfree(ps); ++ spin_unlock(q->lock_ptr); + } + + static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, +@@ -2503,7 +2485,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + */ + handle_err: + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); +- raw_spin_unlock(q->lock_ptr); ++ spin_unlock(q->lock_ptr); + + switch (err) { + case -EFAULT: +@@ -2521,7 +2503,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + break; + } + +- raw_spin_lock(q->lock_ptr); ++ spin_lock(q->lock_ptr); + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + + /* +@@ -2617,7 +2599,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, + /* + * The task state is guaranteed to be set before another task can + * wake it. set_current_state() is implemented using smp_store_mb() and +- * queue_me() calls raw_spin_unlock() upon completion, both serializing ++ * queue_me() calls spin_unlock() upon completion, both serializing + * access to the hash list and forcing another memory barrier. + */ + set_current_state(TASK_INTERRUPTIBLE); +@@ -2908,7 +2890,15 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + * before __rt_mutex_start_proxy_lock() is done. + */ + raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); +- raw_spin_unlock(q.lock_ptr); ++ /* ++ * the migrate_disable() here disables migration in the in_atomic() fast ++ * path which is enabled again in the following spin_unlock(). We have ++ * one migrate_disable() pending in the slow-path which is reversed ++ * after the raw_spin_unlock_irq() where we leave the atomic context. ++ */ ++ migrate_disable(); ++ ++ spin_unlock(q.lock_ptr); + /* + * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter + * such that futex_unlock_pi() is guaranteed to observe the waiter when +@@ -2916,6 +2906,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + */ + ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); + raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); ++ migrate_enable(); + + if (ret) { + if (ret == 1) +@@ -2929,7 +2920,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); + + cleanup: +- raw_spin_lock(q.lock_ptr); ++ spin_lock(q.lock_ptr); + /* + * If we failed to acquire the lock (deadlock/signal/timeout), we must + * first acquire the hb->lock before removing the lock from the +@@ -3030,7 +3021,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + return ret; + + hb = hash_futex(&key); +- raw_spin_lock(&hb->lock); ++ spin_lock(&hb->lock); + + /* + * Check waiters first. We do not trust user space values at +@@ -3064,10 +3055,19 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + * rt_waiter. Also see the WARN in wake_futex_pi(). + */ + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); +- raw_spin_unlock(&hb->lock); ++ /* ++ * Magic trickery for now to make the RT migrate disable ++ * logic happy. The following spin_unlock() happens with ++ * interrupts disabled so the internal migrate_enable() ++ * won't undo the migrate_disable() which was issued when ++ * locking hb->lock. ++ */ ++ migrate_disable(); ++ spin_unlock(&hb->lock); + + /* drops pi_state->pi_mutex.wait_lock */ + ret = wake_futex_pi(uaddr, uval, pi_state); ++ migrate_enable(); + + put_pi_state(pi_state); + +@@ -3103,7 +3103,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + * owner. + */ + if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) { +- raw_spin_unlock(&hb->lock); ++ spin_unlock(&hb->lock); + switch (ret) { + case -EFAULT: + goto pi_faulted; +@@ -3123,7 +3123,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + ret = (curval == uval) ? 0 : -EAGAIN; + + out_unlock: +- raw_spin_unlock(&hb->lock); ++ spin_unlock(&hb->lock); + out_putkey: + put_futex_key(&key); + return ret; +@@ -3239,7 +3239,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + struct hrtimer_sleeper timeout, *to = NULL; + struct futex_pi_state *pi_state = NULL; + struct rt_mutex_waiter rt_waiter; +- struct futex_hash_bucket *hb; ++ struct futex_hash_bucket *hb, *hb2; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; + int res, ret; +@@ -3297,20 +3297,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ + futex_wait_queue_me(hb, &q, to); + +- raw_spin_lock(&hb->lock); +- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); +- raw_spin_unlock(&hb->lock); +- if (ret) +- goto out_put_keys; ++ /* ++ * On RT we must avoid races with requeue and trying to block ++ * on two mutexes (hb->lock and uaddr2's rtmutex) by ++ * serializing access to pi_blocked_on with pi_lock. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ if (current->pi_blocked_on) { ++ /* ++ * We have been requeued or are in the process of ++ * being requeued. ++ */ ++ raw_spin_unlock_irq(¤t->pi_lock); ++ } else { ++ /* ++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS ++ * prevents a concurrent requeue from moving us to the ++ * uaddr2 rtmutex. After that we can safely acquire ++ * (and possibly block on) hb->lock. ++ */ ++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ spin_lock(&hb->lock); ++ ++ /* ++ * Clean up pi_blocked_on. We might leak it otherwise ++ * when we succeeded with the hb->lock in the fast ++ * path. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ current->pi_blocked_on = NULL; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); ++ spin_unlock(&hb->lock); ++ if (ret) ++ goto out_put_keys; ++ } + + /* +- * In order for us to be here, we know our q.key == key2, and since +- * we took the hb->lock above, we also know that futex_requeue() has +- * completed and we no longer have to concern ourselves with a wakeup +- * race with the atomic proxy lock acquisition by the requeue code. The +- * futex_requeue dropped our key1 reference and incremented our key2 +- * reference count. ++ * In order to be here, we have either been requeued, are in ++ * the process of being requeued, or requeue successfully ++ * acquired uaddr2 on our behalf. If pi_blocked_on was ++ * non-null above, we may be racing with a requeue. Do not ++ * rely on q->lock_ptr to be hb2->lock until after blocking on ++ * hb->lock or hb2->lock. The futex_requeue dropped our key1 ++ * reference and incremented our key2 reference count. + */ ++ hb2 = hash_futex(&key2); + + /* Check if the requeue code acquired the second futex for us. */ + if (!q.rt_waiter) { +@@ -3319,9 +3354,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + * did a lock-steal - fix up the PI-state in that case. + */ + if (q.pi_state && (q.pi_state->owner != current)) { +- struct futex_pi_state *ps_free; +- +- raw_spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { + pi_state = q.pi_state; +@@ -3331,9 +3365,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + * Drop the reference to the pi state which + * the requeue_pi() code acquired for us. + */ +- ps_free = __put_pi_state(q.pi_state); +- raw_spin_unlock(q.lock_ptr); +- kfree(ps_free); ++ put_pi_state(q.pi_state); ++ spin_unlock(&hb2->lock); + } + } else { + struct rt_mutex *pi_mutex; +@@ -3347,7 +3380,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + pi_mutex = &q.pi_state->pi_mutex; + ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); + +- raw_spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) + ret = 0; + +@@ -4014,7 +4048,7 @@ static int __init futex_init(void) + for (i = 0; i < futex_hashsize; i++) { + atomic_set(&futex_queues[i].waiters, 0); + plist_head_init(&futex_queues[i].chain); +- raw_spin_lock_init(&futex_queues[i].lock); ++ spin_lock_init(&futex_queues[i].lock); + } + + return 0; +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 44a33057a83a..2a9bf2443acc 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -142,6 +142,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) + WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); + } + ++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) ++{ ++ return waiter && waiter != PI_WAKEUP_INPROGRESS && ++ waiter != PI_REQUEUE_INPROGRESS; ++} ++ + /* + * We can speed up the acquire/release, if there's no debugging state to be + * set up. +@@ -415,7 +421,8 @@ int max_lock_depth = 1024; + + static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) + { +- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; ++ return rt_mutex_real_waiter(p->pi_blocked_on) ? ++ p->pi_blocked_on->lock : NULL; + } + + /* +@@ -551,7 +558,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + * reached or the state of the chain has changed while we + * dropped the locks. + */ +- if (!waiter) ++ if (!rt_mutex_real_waiter(waiter)) + goto out_unlock_pi; + + /* +@@ -1321,6 +1328,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + return -EDEADLK; + + raw_spin_lock(&task->pi_lock); ++ /* ++ * In the case of futex requeue PI, this will be a proxy ++ * lock. The task will wake unaware that it is enqueueed on ++ * this lock. Avoid blocking on two locks and corrupting ++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS ++ * flag. futex_wait_requeue_pi() sets this when it wakes up ++ * before requeue (due to a signal or timeout). Do not enqueue ++ * the task if PI_WAKEUP_INPROGRESS is set. ++ */ ++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { ++ raw_spin_unlock(&task->pi_lock); ++ return -EAGAIN; ++ } ++ ++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); ++ + waiter->task = task; + waiter->lock = lock; + waiter->prio = task->prio; +@@ -1344,7 +1367,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + rt_mutex_enqueue_pi(owner, waiter); + + rt_mutex_adjust_prio(owner); +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { + chain_walk = 1; +@@ -1444,7 +1467,7 @@ static void remove_waiter(struct rt_mutex *lock, + { + bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); + struct task_struct *owner = rt_mutex_owner(lock); +- struct rt_mutex *next_lock; ++ struct rt_mutex *next_lock = NULL; + + lockdep_assert_held(&lock->wait_lock); + +@@ -1470,7 +1493,8 @@ static void remove_waiter(struct rt_mutex *lock, + rt_mutex_adjust_prio(owner); + + /* Store the lock on which owner is blocked or NULL */ +- next_lock = task_blocked_on_lock(owner); ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) ++ next_lock = task_blocked_on_lock(owner); + + raw_spin_unlock(&owner->pi_lock); + +@@ -1506,7 +1530,8 @@ void rt_mutex_adjust_pi(struct task_struct *task) + raw_spin_lock_irqsave(&task->pi_lock, flags); + + waiter = task->pi_blocked_on; +- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { ++ if (!rt_mutex_real_waiter(waiter) || ++ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return; + } +@@ -2325,6 +2350,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + if (try_to_take_rt_mutex(lock, task, NULL)) + return 1; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * In PREEMPT_RT there's an added race. ++ * If the task, that we are about to requeue, times out, ++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue ++ * to skip this task. But right after the task sets ++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then ++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. ++ * This will replace the PI_WAKEUP_INPROGRESS with the actual ++ * lock that it blocks on. We *must not* place this task ++ * on this proxy lock in that case. ++ * ++ * To prevent this race, we first take the task's pi_lock ++ * and check if it has updated its pi_blocked_on. If it has, ++ * we assume that it woke up and we return -EAGAIN. ++ * Otherwise, we set the task's pi_blocked_on to ++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up ++ * it will know that we are in the process of requeuing it. ++ */ ++ raw_spin_lock(&task->pi_lock); ++ if (task->pi_blocked_on) { ++ raw_spin_unlock(&task->pi_lock); ++ return -EAGAIN; ++ } ++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; ++ raw_spin_unlock(&task->pi_lock); ++#endif ++ + /* We enforce deadlock detection for futexes */ + ret = task_blocks_on_rt_mutex(lock, waiter, task, + RT_MUTEX_FULL_CHAINWALK); +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h +index 758dc43872e5..546aaf058b9e 100644 +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -132,6 +132,9 @@ enum rtmutex_chainwalk { + /* + * PI-futex support (proxy locking functions, etc.): + */ ++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) ++ + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); +-- +2.25.0 + diff --git a/debian/patches-rt/0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch b/debian/patches-rt/0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch new file mode 100644 index 000000000..4d4d377c6 --- /dev/null +++ b/debian/patches-rt/0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch @@ -0,0 +1,102 @@ +From 3a13f9e1ba717755553c85395a53cafd46cb9b5c Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Mon, 30 Sep 2019 18:15:44 +0200 +Subject: [PATCH 308/319] locking/rtmutex: Clean ->pi_blocked_on in the error + case +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 0be4ea6e3ce693101be0fbd55a0cc7ce238ab2eb ] + +The function rt_mutex_wait_proxy_lock() cleans ->pi_blocked_on in case +of failure (timeout, signal). The same cleanup is required in +__rt_mutex_start_proxy_lock(). +In both the cases the tasks was interrupted by a signal or timeout while +acquiring the lock and after the interruption it longer blocks on the +lock. + +Fixes: 1a1fb985f2e2b ("futex: Handle early deadlock return correctly") +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/locking/rtmutex.c | 43 +++++++++++++++++++++++----------------- + 1 file changed, 25 insertions(+), 18 deletions(-) + +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 2a9bf2443acc..63b3d6f306fa 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -2320,6 +2320,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, + rt_mutex_set_owner(lock, NULL); + } + ++static void fixup_rt_mutex_blocked(struct rt_mutex *lock) ++{ ++ struct task_struct *tsk = current; ++ /* ++ * RT has a problem here when the wait got interrupted by a timeout ++ * or a signal. task->pi_blocked_on is still set. The task must ++ * acquire the hash bucket lock when returning from this function. ++ * ++ * If the hash bucket lock is contended then the ++ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in ++ * task_blocks_on_rt_mutex() will trigger. This can be avoided by ++ * clearing task->pi_blocked_on which removes the task from the ++ * boosting chain of the rtmutex. That's correct because the task ++ * is not longer blocked on it. ++ */ ++ raw_spin_lock(&tsk->pi_lock); ++ tsk->pi_blocked_on = NULL; ++ raw_spin_unlock(&tsk->pi_lock); ++} ++ + /** + * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task + * @lock: the rt_mutex to take +@@ -2392,6 +2412,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + ret = 0; + } + ++ if (ret) ++ fixup_rt_mutex_blocked(lock); ++ + debug_rt_mutex_print_deadlock(waiter); + + return ret; +@@ -2472,7 +2495,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *to, + struct rt_mutex_waiter *waiter) + { +- struct task_struct *tsk = current; + int ret; + + raw_spin_lock_irq(&lock->wait_lock); +@@ -2484,23 +2506,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + * have to fix that up. + */ + fixup_rt_mutex_waiters(lock); +- /* +- * RT has a problem here when the wait got interrupted by a timeout +- * or a signal. task->pi_blocked_on is still set. The task must +- * acquire the hash bucket lock when returning from this function. +- * +- * If the hash bucket lock is contended then the +- * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in +- * task_blocks_on_rt_mutex() will trigger. This can be avoided by +- * clearing task->pi_blocked_on which removes the task from the +- * boosting chain of the rtmutex. That's correct because the task +- * is not longer blocked on it. +- */ +- if (ret) { +- raw_spin_lock(&tsk->pi_lock); +- tsk->pi_blocked_on = NULL; +- raw_spin_unlock(&tsk->pi_lock); +- } ++ if (ret) ++ fixup_rt_mutex_blocked(lock); + + raw_spin_unlock_irq(&lock->wait_lock); + +-- +2.25.0 + diff --git a/debian/patches-rt/0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch b/debian/patches-rt/0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch new file mode 100644 index 000000000..123b8fb1a --- /dev/null +++ b/debian/patches-rt/0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch @@ -0,0 +1,308 @@ +From c7a35250787bb30ab831087e4ea1485eda891680 Mon Sep 17 00:00:00 2001 +From: Julien Grall +Date: Fri, 20 Sep 2019 11:08:35 +0100 +Subject: [PATCH 309/319] lib/ubsan: Don't seralize UBSAN report +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 4702c28ac777b27acb499cbd5e8e787ce1a7d82d ] + +At the moment, UBSAN report will be serialized using a spin_lock(). On +RT-systems, spinlocks are turned to rt_spin_lock and may sleep. This will +result to the following splat if the undefined behavior is in a context +that can sleep: + +| BUG: sleeping function called from invalid context at /src/linux/kernel/locking/rtmutex.c:968 +| in_atomic(): 1, irqs_disabled(): 128, pid: 3447, name: make +| 1 lock held by make/3447: +| #0: 000000009a966332 (&mm->mmap_sem){++++}, at: do_page_fault+0x140/0x4f8 +| Preemption disabled at: +| [] rt_mutex_futex_unlock+0x4c/0xb0 +| CPU: 3 PID: 3447 Comm: make Tainted: G W 5.2.14-rt7-01890-ge6e057589653 #911 +| Call trace: +| dump_backtrace+0x0/0x148 +| show_stack+0x14/0x20 +| dump_stack+0xbc/0x104 +| ___might_sleep+0x154/0x210 +| rt_spin_lock+0x68/0xa0 +| ubsan_prologue+0x30/0x68 +| handle_overflow+0x64/0xe0 +| __ubsan_handle_add_overflow+0x10/0x18 +| __lock_acquire+0x1c28/0x2a28 +| lock_acquire+0xf0/0x370 +| _raw_spin_lock_irqsave+0x58/0x78 +| rt_mutex_futex_unlock+0x4c/0xb0 +| rt_spin_unlock+0x28/0x70 +| get_page_from_freelist+0x428/0x2b60 +| __alloc_pages_nodemask+0x174/0x1708 +| alloc_pages_vma+0x1ac/0x238 +| __handle_mm_fault+0x4ac/0x10b0 +| handle_mm_fault+0x1d8/0x3b0 +| do_page_fault+0x1c8/0x4f8 +| do_translation_fault+0xb8/0xe0 +| do_mem_abort+0x3c/0x98 +| el0_da+0x20/0x24 + +The spin_lock() will protect against multiple CPUs to output a report +together, I guess to prevent them to be interleaved. However, they can +still interleave with other messages (and even splat from __migth_sleep). + +So the lock usefulness seems pretty limited. Rather than trying to +accomodate RT-system by switching to a raw_spin_lock(), the lock is now +completely dropped. + +Link: https://lkml.kernel.org/r/20190920100835.14999-1-julien.grall@arm.com +Reported-by: Andre Przywara +Signed-off-by: Julien Grall +Acked-by: Andrey Ryabinin +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + lib/ubsan.c | 64 +++++++++++++++++++---------------------------------- + 1 file changed, 23 insertions(+), 41 deletions(-) + +diff --git a/lib/ubsan.c b/lib/ubsan.c +index 1e9e2ab25539..5830cc9a2164 100644 +--- a/lib/ubsan.c ++++ b/lib/ubsan.c +@@ -143,25 +143,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, + } + } + +-static DEFINE_SPINLOCK(report_lock); +- +-static void ubsan_prologue(struct source_location *location, +- unsigned long *flags) ++static void ubsan_prologue(struct source_location *location) + { + current->in_ubsan++; +- spin_lock_irqsave(&report_lock, *flags); + + pr_err("========================================" + "========================================\n"); + print_source_location("UBSAN: Undefined behaviour in", location); + } + +-static void ubsan_epilogue(unsigned long *flags) ++static void ubsan_epilogue(void) + { + dump_stack(); + pr_err("========================================" + "========================================\n"); +- spin_unlock_irqrestore(&report_lock, *flags); ++ + current->in_ubsan--; + } + +@@ -170,14 +166,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs, + { + + struct type_descriptor *type = data->type; +- unsigned long flags; + char lhs_val_str[VALUE_LENGTH]; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); + val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); +@@ -189,7 +184,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, + rhs_val_str, + type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + void __ubsan_handle_add_overflow(struct overflow_data *data, +@@ -217,20 +212,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow); + void __ubsan_handle_negate_overflow(struct overflow_data *data, + void *old_val) + { +- unsigned long flags; + char old_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); + + pr_err("negation of %s cannot be represented in type %s:\n", + old_val_str, data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + +@@ -238,13 +232,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + void __ubsan_handle_divrem_overflow(struct overflow_data *data, + void *lhs, void *rhs) + { +- unsigned long flags; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); + +@@ -254,58 +247,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, + else + pr_err("division by zero\n"); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); + + static void handle_null_ptr_deref(struct type_mismatch_data_common *data) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + + pr_err("%s null pointer of type %s\n", + type_check_kinds[data->type_check_kind], + data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void handle_misaligned_access(struct type_mismatch_data_common *data, + unsigned long ptr) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + + pr_err("%s misaligned address %p for type %s\n", + type_check_kinds[data->type_check_kind], + (void *)ptr, data->type->type_name); + pr_err("which requires %ld byte alignment\n", data->alignment); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void handle_object_size_mismatch(struct type_mismatch_data_common *data, + unsigned long ptr) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + pr_err("%s address %p with insufficient space\n", + type_check_kinds[data->type_check_kind], + (void *) ptr); + pr_err("for an object of type %s\n", data->type->type_name); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, +@@ -369,25 +356,23 @@ EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); + + void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) + { +- unsigned long flags; + char index_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(index_str, sizeof(index_str), data->index_type, index); + pr_err("index %s is out of range for type %s\n", index_str, + data->array_type->type_name); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); + + void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + void *lhs, void *rhs) + { +- unsigned long flags; + struct type_descriptor *rhs_type = data->rhs_type; + struct type_descriptor *lhs_type = data->lhs_type; + char rhs_str[VALUE_LENGTH]; +@@ -396,7 +381,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); + val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); +@@ -419,18 +404,16 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + lhs_str, rhs_str, + lhs_type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); + + + void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) + { +- unsigned long flags; +- +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + pr_err("calling __builtin_unreachable()\n"); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + panic("can't return from __builtin_unreachable()"); + } + EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); +@@ -438,19 +421,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); + void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, + void *val) + { +- unsigned long flags; + char val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(val_str, sizeof(val_str), data->type, val); + + pr_err("load of value %s is not a valid value for type %s\n", + val_str, data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); +-- +2.25.0 + diff --git a/debian/patches-rt/0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch b/debian/patches-rt/0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch new file mode 100644 index 000000000..32043da19 --- /dev/null +++ b/debian/patches-rt/0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch @@ -0,0 +1,293 @@ +From ece6093ebeeab9ee10748cd45b5b9a1408bace4e Mon Sep 17 00:00:00 2001 +From: Liu Haitao +Date: Fri, 27 Sep 2019 16:22:30 +0800 +Subject: [PATCH 310/319] kmemleak: Change the lock of kmemleak_object to + raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 217847f57119b5fdd377bfa3d344613ddb98d9fc ] + +The commit ("kmemleak: Turn kmemleak_lock to raw spinlock on RT") +changed the kmemleak_lock to raw spinlock. However the +kmemleak_object->lock is held after the kmemleak_lock is held in +scan_block(). + +Make the object->lock a raw_spinlock_t. + +Cc: stable-rt@vger.kernel.org +Link: https://lkml.kernel.org/r/20190927082230.34152-1-yongxin.liu@windriver.com +Signed-off-by: Liu Haitao +Signed-off-by: Yongxin Liu +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + mm/kmemleak.c | 72 +++++++++++++++++++++++++-------------------------- + 1 file changed, 36 insertions(+), 36 deletions(-) + +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index 92ce99b15f2b..e5f5eeed338d 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -147,7 +147,7 @@ struct kmemleak_scan_area { + * (use_count) and freed using the RCU mechanism. + */ + struct kmemleak_object { +- spinlock_t lock; ++ raw_spinlock_t lock; + unsigned int flags; /* object status flags */ + struct list_head object_list; + struct list_head gray_list; +@@ -561,7 +561,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, + INIT_LIST_HEAD(&object->object_list); + INIT_LIST_HEAD(&object->gray_list); + INIT_HLIST_HEAD(&object->area_list); +- spin_lock_init(&object->lock); ++ raw_spin_lock_init(&object->lock); + atomic_set(&object->use_count, 1); + object->flags = OBJECT_ALLOCATED; + object->pointer = ptr; +@@ -642,9 +642,9 @@ static void __delete_object(struct kmemleak_object *object) + * Locking here also ensures that the corresponding memory block + * cannot be freed when it is being scanned. + */ +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + object->flags &= ~OBJECT_ALLOCATED; +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + put_object(object); + } + +@@ -716,9 +716,9 @@ static void paint_it(struct kmemleak_object *object, int color) + { + unsigned long flags; + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + __paint_it(object, color); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + + static void paint_ptr(unsigned long ptr, int color) +@@ -778,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) + goto out; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if (size == SIZE_MAX) { + size = object->pointer + object->size - ptr; + } else if (ptr + size > object->pointer + object->size) { +@@ -794,7 +794,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) + + hlist_add_head(&area->node, &object->area_list); + out_unlock: +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + out: + put_object(object); + } +@@ -817,9 +817,9 @@ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref) + return; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + object->excess_ref = excess_ref; +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + put_object(object); + } + +@@ -839,9 +839,9 @@ static void object_no_scan(unsigned long ptr) + return; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + object->flags |= OBJECT_NO_SCAN; +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + put_object(object); + } + +@@ -902,11 +902,11 @@ static void early_alloc(struct early_log *log) + log->min_count, GFP_ATOMIC); + if (!object) + goto out; +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + for (i = 0; i < log->trace_len; i++) + object->trace[i] = log->trace[i]; + object->trace_len = log->trace_len; +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + out: + rcu_read_unlock(); + } +@@ -1096,9 +1096,9 @@ void __ref kmemleak_update_trace(const void *ptr) + return; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + object->trace_len = __save_stack_trace(object->trace); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + + put_object(object); + } +@@ -1344,7 +1344,7 @@ static void scan_block(void *_start, void *_end, + * previously acquired in scan_object(). These locks are + * enclosed by scan_mutex. + */ +- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); ++ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); + /* only pass surplus references (object already gray) */ + if (color_gray(object)) { + excess_ref = object->excess_ref; +@@ -1353,7 +1353,7 @@ static void scan_block(void *_start, void *_end, + excess_ref = 0; + update_refs(object); + } +- spin_unlock(&object->lock); ++ raw_spin_unlock(&object->lock); + + if (excess_ref) { + object = lookup_object(excess_ref, 0); +@@ -1362,9 +1362,9 @@ static void scan_block(void *_start, void *_end, + if (object == scanned) + /* circular reference, ignore */ + continue; +- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); ++ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); + update_refs(object); +- spin_unlock(&object->lock); ++ raw_spin_unlock(&object->lock); + } + } + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); +@@ -1400,7 +1400,7 @@ static void scan_object(struct kmemleak_object *object) + * Once the object->lock is acquired, the corresponding memory block + * cannot be freed (the same lock is acquired in delete_object). + */ +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if (object->flags & OBJECT_NO_SCAN) + goto out; + if (!(object->flags & OBJECT_ALLOCATED)) +@@ -1419,9 +1419,9 @@ static void scan_object(struct kmemleak_object *object) + if (start >= end) + break; + +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + cond_resched(); +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + } while (object->flags & OBJECT_ALLOCATED); + } else + hlist_for_each_entry(area, &object->area_list, node) +@@ -1429,7 +1429,7 @@ static void scan_object(struct kmemleak_object *object) + (void *)(area->start + area->size), + object); + out: +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + + /* +@@ -1482,7 +1482,7 @@ static void kmemleak_scan(void) + /* prepare the kmemleak_object's */ + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + #ifdef DEBUG + /* + * With a few exceptions there should be a maximum of +@@ -1499,7 +1499,7 @@ static void kmemleak_scan(void) + if (color_gray(object) && get_object(object)) + list_add_tail(&object->gray_list, &gray_list); + +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + +@@ -1564,14 +1564,14 @@ static void kmemleak_scan(void) + */ + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if (color_white(object) && (object->flags & OBJECT_ALLOCATED) + && update_checksum(object) && get_object(object)) { + /* color it gray temporarily */ + object->count = object->min_count; + list_add_tail(&object->gray_list, &gray_list); + } +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + +@@ -1591,13 +1591,13 @@ static void kmemleak_scan(void) + */ + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if (unreferenced_object(object) && + !(object->flags & OBJECT_REPORTED)) { + object->flags |= OBJECT_REPORTED; + new_leaks++; + } +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + +@@ -1749,10 +1749,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) + struct kmemleak_object *object = v; + unsigned long flags; + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) + print_unreferenced(seq, object); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + return 0; + } + +@@ -1782,9 +1782,9 @@ static int dump_str_object_info(const char *str) + return -EINVAL; + } + +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + dump_object_info(object); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + + put_object(object); + return 0; +@@ -1803,11 +1803,11 @@ static void kmemleak_clear(void) + + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { +- spin_lock_irqsave(&object->lock, flags); ++ raw_spin_lock_irqsave(&object->lock, flags); + if ((object->flags & OBJECT_REPORTED) && + unreferenced_object(object)) + __paint_it(object, KMEMLEAK_GREY); +- spin_unlock_irqrestore(&object->lock, flags); ++ raw_spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + +-- +2.25.0 + diff --git a/debian/patches-rt/0311-sched-migrate_enable-Use-select_fallback_rq.patch b/debian/patches-rt/0311-sched-migrate_enable-Use-select_fallback_rq.patch new file mode 100644 index 000000000..9bd7d4796 --- /dev/null +++ b/debian/patches-rt/0311-sched-migrate_enable-Use-select_fallback_rq.patch @@ -0,0 +1,66 @@ +From f6209eaaf4b8446932a9fa6c0738b6e6ec19fd72 Mon Sep 17 00:00:00 2001 +From: Scott Wood +Date: Sat, 12 Oct 2019 01:52:12 -0500 +Subject: [PATCH 311/319] sched: migrate_enable: Use select_fallback_rq() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit adfa969d4cfcc995a9d866020124e50f1827d2d1 ] + +migrate_enable() currently open-codes a variant of select_fallback_rq(). +However, it does not have the "No more Mr. Nice Guy" fallback and thus +it will pass an invalid CPU to the migration thread if cpus_mask only +contains a CPU that is !active. + +Signed-off-by: Scott Wood +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/sched/core.c | 25 ++++++++++--------------- + 1 file changed, 10 insertions(+), 15 deletions(-) + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index d9a3f88508ee..6fd3f7b4d7d8 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7335,6 +7335,7 @@ void migrate_enable(void) + if (p->migrate_disable_update) { + struct rq *rq; + struct rq_flags rf; ++ int cpu = task_cpu(p); + + rq = task_rq_lock(p, &rf); + update_rq_clock(rq); +@@ -7344,21 +7345,15 @@ void migrate_enable(void) + + p->migrate_disable_update = 0; + +- WARN_ON(smp_processor_id() != task_cpu(p)); +- if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { +- const struct cpumask *cpu_valid_mask = cpu_active_mask; +- struct migration_arg arg; +- unsigned int dest_cpu; +- +- if (p->flags & PF_KTHREAD) { +- /* +- * Kernel threads are allowed on online && !active CPUs +- */ +- cpu_valid_mask = cpu_online_mask; +- } +- dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask); +- arg.task = p; +- arg.dest_cpu = dest_cpu; ++ WARN_ON(smp_processor_id() != cpu); ++ if (!cpumask_test_cpu(cpu, &p->cpus_mask)) { ++ struct migration_arg arg = { p }; ++ struct rq_flags rf; ++ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ arg.dest_cpu = select_fallback_rq(cpu, p); ++ task_rq_unlock(rq, p, &rf); + + unpin_current_cpu(); + preempt_lazy_enable(); +-- +2.25.0 + diff --git a/debian/patches-rt/0312-sched-Lazy-migrate_disable-processing.patch b/debian/patches-rt/0312-sched-Lazy-migrate_disable-processing.patch new file mode 100644 index 000000000..b2e7d6cc3 --- /dev/null +++ b/debian/patches-rt/0312-sched-Lazy-migrate_disable-processing.patch @@ -0,0 +1,616 @@ +From 42a1f80a938e51bf524f063180ef2bca3a4ed31b Mon Sep 17 00:00:00 2001 +From: Scott Wood +Date: Sat, 12 Oct 2019 01:52:13 -0500 +Subject: [PATCH 312/319] sched: Lazy migrate_disable processing +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 425c5b38779a860062aa62219dc920d374b13c17 ] + +Avoid overhead on the majority of migrate disable/enable sequences by +only manipulating scheduler data (and grabbing the relevant locks) when +the task actually schedules while migrate-disabled. A kernel build +showed around a 10% reduction in system time (with CONFIG_NR_CPUS=512). + +Instead of cpuhp_pin_lock, CPU hotplug is handled by keeping a per-CPU +count of the number of pinned tasks (including tasks which have not +scheduled in the migrate-disabled section); takedown_cpu() will +wait until that reaches zero (confirmed by take_cpu_down() in stop +machine context to deal with races) before migrating tasks off of the +cpu. + +To simplify synchronization, updating cpus_mask is no longer deferred +until migrate_enable(). This lets us not have to worry about +migrate_enable() missing the update if it's on the fast path (didn't +schedule during the migrate disabled section). It also makes the code +a bit simpler and reduces deviation from mainline. + +While the main motivation for this is the performance benefit, lazy +migrate disable also eliminates the restriction on calling +migrate_disable() while atomic but leaving the atomic region prior to +calling migrate_enable() -- though this won't help with local_bh_disable() +(and thus rcutorture) unless something similar is done with the recently +added local_lock. + +Signed-off-by: Scott Wood +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + include/linux/cpu.h | 4 - + include/linux/sched.h | 11 +-- + init/init_task.c | 4 + + kernel/cpu.c | 103 +++++++++-------------- + kernel/sched/core.c | 182 +++++++++++++++++------------------------ + kernel/sched/sched.h | 4 + + lib/smp_processor_id.c | 3 + + 7 files changed, 129 insertions(+), 182 deletions(-) + +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index e67645924404..87347ccbba0c 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -118,8 +118,6 @@ extern void cpu_hotplug_disable(void); + extern void cpu_hotplug_enable(void); + void clear_tasks_mm_cpumask(int cpu); + int cpu_down(unsigned int cpu); +-extern void pin_current_cpu(void); +-extern void unpin_current_cpu(void); + + #else /* CONFIG_HOTPLUG_CPU */ + +@@ -131,8 +129,6 @@ static inline int cpus_read_trylock(void) { return true; } + static inline void lockdep_assert_cpus_held(void) { } + static inline void cpu_hotplug_disable(void) { } + static inline void cpu_hotplug_enable(void) { } +-static inline void pin_current_cpu(void) { } +-static inline void unpin_current_cpu(void) { } + + #endif /* !CONFIG_HOTPLUG_CPU */ + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 854a6cb456af..60ac271472aa 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -227,6 +227,8 @@ extern void io_schedule_finish(int token); + extern long io_schedule_timeout(long timeout); + extern void io_schedule(void); + ++int cpu_nr_pinned(int cpu); ++ + /** + * struct prev_cputime - snapshot of system and user cputime + * @utime: time spent in user mode +@@ -670,16 +672,13 @@ struct task_struct { + cpumask_t cpus_mask; + #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) + int migrate_disable; +- int migrate_disable_update; +- int pinned_on_cpu; ++ bool migrate_disable_scheduled; + # ifdef CONFIG_SCHED_DEBUG +- int migrate_disable_atomic; ++ int pinned_on_cpu; + # endif +- + #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) + # ifdef CONFIG_SCHED_DEBUG + int migrate_disable; +- int migrate_disable_atomic; + # endif + #endif + #ifdef CONFIG_PREEMPT_RT_FULL +@@ -2058,4 +2057,6 @@ static inline void rseq_syscall(struct pt_regs *regs) + + #endif + ++extern struct task_struct *takedown_cpu_task; ++ + #endif +diff --git a/init/init_task.c b/init/init_task.c +index 9e3362748214..4e5af4616dbd 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -80,6 +80,10 @@ struct task_struct init_task + .cpus_ptr = &init_task.cpus_mask, + .cpus_mask = CPU_MASK_ALL, + .nr_cpus_allowed= NR_CPUS, ++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) && \ ++ defined(CONFIG_SCHED_DEBUG) ++ .pinned_on_cpu = -1, ++#endif + .mm = NULL, + .active_mm = &init_mm, + .restart_block = { +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 7170fbd35a22..5366c8c69c2f 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -75,11 +75,6 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { + .fail = CPUHP_INVALID, + }; + +-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PREEMPT_RT_FULL) +-static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \ +- __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock); +-#endif +- + #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) + static struct lockdep_map cpuhp_state_up_map = + STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); +@@ -286,57 +281,6 @@ static int cpu_hotplug_disabled; + + #ifdef CONFIG_HOTPLUG_CPU + +-/** +- * pin_current_cpu - Prevent the current cpu from being unplugged +- */ +-void pin_current_cpu(void) +-{ +-#ifdef CONFIG_PREEMPT_RT_FULL +- struct rt_rw_lock *cpuhp_pin; +- unsigned int cpu; +- int ret; +- +-again: +- cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); +- ret = __read_rt_trylock(cpuhp_pin); +- if (ret) { +- current->pinned_on_cpu = smp_processor_id(); +- return; +- } +- cpu = smp_processor_id(); +- preempt_lazy_enable(); +- preempt_enable(); +- +- sleeping_lock_inc(); +- __read_rt_lock(cpuhp_pin); +- sleeping_lock_dec(); +- +- preempt_disable(); +- preempt_lazy_disable(); +- if (cpu != smp_processor_id()) { +- __read_rt_unlock(cpuhp_pin); +- goto again; +- } +- current->pinned_on_cpu = cpu; +-#endif +-} +- +-/** +- * unpin_current_cpu - Allow unplug of current cpu +- */ +-void unpin_current_cpu(void) +-{ +-#ifdef CONFIG_PREEMPT_RT_FULL +- struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); +- +- if (WARN_ON(current->pinned_on_cpu != smp_processor_id())) +- cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, current->pinned_on_cpu); +- +- current->pinned_on_cpu = -1; +- __read_rt_unlock(cpuhp_pin); +-#endif +-} +- + DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); + + void cpus_read_lock(void) +@@ -866,6 +810,15 @@ static int take_cpu_down(void *_param) + int err, cpu = smp_processor_id(); + int ret; + ++#ifdef CONFIG_PREEMPT_RT_BASE ++ /* ++ * If any tasks disabled migration before we got here, ++ * go back and sleep again. ++ */ ++ if (cpu_nr_pinned(cpu)) ++ return -EAGAIN; ++#endif ++ + /* Ensure this CPU doesn't handle any more interrupts. */ + err = __cpu_disable(); + if (err < 0) +@@ -893,11 +846,10 @@ static int take_cpu_down(void *_param) + return 0; + } + ++struct task_struct *takedown_cpu_task; ++ + static int takedown_cpu(unsigned int cpu) + { +-#ifdef CONFIG_PREEMPT_RT_FULL +- struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu); +-#endif + struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); + int err; + +@@ -910,17 +862,38 @@ static int takedown_cpu(unsigned int cpu) + */ + irq_lock_sparse(); + +-#ifdef CONFIG_PREEMPT_RT_FULL +- __write_rt_lock(cpuhp_pin); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ WARN_ON_ONCE(takedown_cpu_task); ++ takedown_cpu_task = current; ++ ++again: ++ /* ++ * If a task pins this CPU after we pass this check, take_cpu_down ++ * will return -EAGAIN. ++ */ ++ for (;;) { ++ int nr_pinned; ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ nr_pinned = cpu_nr_pinned(cpu); ++ if (nr_pinned == 0) ++ break; ++ schedule(); ++ } ++ set_current_state(TASK_RUNNING); + #endif + + /* + * So now all preempt/rcu users must observe !cpu_active(). + */ + err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ if (err == -EAGAIN) ++ goto again; ++#endif + if (err) { +-#ifdef CONFIG_PREEMPT_RT_FULL +- __write_rt_unlock(cpuhp_pin); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ takedown_cpu_task = NULL; + #endif + /* CPU refused to die */ + irq_unlock_sparse(); +@@ -940,8 +913,8 @@ static int takedown_cpu(unsigned int cpu) + wait_for_ap_thread(st, false); + BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); + +-#ifdef CONFIG_PREEMPT_RT_FULL +- __write_rt_unlock(cpuhp_pin); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ takedown_cpu_task = NULL; + #endif + /* Interrupts are moved away from the dying cpu, reenable alloc/free */ + irq_unlock_sparse(); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 6fd3f7b4d7d8..e97ac751aad2 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1065,7 +1065,8 @@ static int migration_cpu_stop(void *data) + void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) + { + cpumask_copy(&p->cpus_mask, new_mask); +- p->nr_cpus_allowed = cpumask_weight(new_mask); ++ if (p->cpus_ptr == &p->cpus_mask) ++ p->nr_cpus_allowed = cpumask_weight(new_mask); + } + + #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) +@@ -1076,8 +1077,7 @@ int __migrate_disabled(struct task_struct *p) + EXPORT_SYMBOL_GPL(__migrate_disabled); + #endif + +-static void __do_set_cpus_allowed_tail(struct task_struct *p, +- const struct cpumask *new_mask) ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { + struct rq *rq = task_rq(p); + bool queued, running; +@@ -1106,20 +1106,6 @@ static void __do_set_cpus_allowed_tail(struct task_struct *p, + set_curr_task(rq, p); + } + +-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +-{ +-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) +- if (__migrate_disabled(p)) { +- lockdep_assert_held(&p->pi_lock); +- +- cpumask_copy(&p->cpus_mask, new_mask); +- p->migrate_disable_update = 1; +- return; +- } +-#endif +- __do_set_cpus_allowed_tail(p, new_mask); +-} +- + /* + * Change a given task's CPU affinity. Migrate the thread to a + * proper CPU and schedule it away if the CPU it's executing on +@@ -1179,7 +1165,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, + } + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || ++ p->cpus_ptr != &p->cpus_mask) + goto out; + + if (task_running(rq, p) || p->state == TASK_WAKING) { +@@ -3454,6 +3441,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) + BUG(); + } + ++static void migrate_disabled_sched(struct task_struct *p); ++ + /* + * __schedule() is the main scheduler function. + * +@@ -3524,6 +3513,9 @@ static void __sched notrace __schedule(bool preempt) + rq_lock(rq, &rf); + smp_mb__after_spinlock(); + ++ if (__migrate_disabled(prev)) ++ migrate_disabled_sched(prev); ++ + /* Promote REQ to ACT */ + rq->clock_update_flags <<= 1; + update_rq_clock(rq); +@@ -5779,6 +5771,8 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) + BUG_ON(!next); + put_prev_task(rq, next); + ++ WARN_ON_ONCE(__migrate_disabled(next)); ++ + /* + * Rules for changing task_struct::cpus_mask are holding + * both pi_lock and rq->lock, such that holding either +@@ -7247,14 +7241,9 @@ update_nr_migratory(struct task_struct *p, long delta) + static inline void + migrate_disable_update_cpus_allowed(struct task_struct *p) + { +- struct rq *rq; +- struct rq_flags rf; +- +- rq = task_rq_lock(p, &rf); + p->cpus_ptr = cpumask_of(smp_processor_id()); + update_nr_migratory(p, -1); + p->nr_cpus_allowed = 1; +- task_rq_unlock(rq, p, &rf); + } + + static inline void +@@ -7272,54 +7261,35 @@ migrate_enable_update_cpus_allowed(struct task_struct *p) + + void migrate_disable(void) + { +- struct task_struct *p = current; ++ preempt_disable(); + +- if (in_atomic() || irqs_disabled()) { ++ if (++current->migrate_disable == 1) { ++ this_rq()->nr_pinned++; ++ preempt_lazy_disable(); + #ifdef CONFIG_SCHED_DEBUG +- p->migrate_disable_atomic++; ++ WARN_ON_ONCE(current->pinned_on_cpu >= 0); ++ current->pinned_on_cpu = smp_processor_id(); + #endif +- return; +- } +-#ifdef CONFIG_SCHED_DEBUG +- if (unlikely(p->migrate_disable_atomic)) { +- tracing_off(); +- WARN_ON_ONCE(1); + } +-#endif + +- if (p->migrate_disable) { +- p->migrate_disable++; +- return; +- } ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_disable); + +- preempt_disable(); +- preempt_lazy_disable(); +- pin_current_cpu(); ++static void migrate_disabled_sched(struct task_struct *p) ++{ ++ if (p->migrate_disable_scheduled) ++ return; + + migrate_disable_update_cpus_allowed(p); +- p->migrate_disable = 1; +- +- preempt_enable(); ++ p->migrate_disable_scheduled = 1; + } +-EXPORT_SYMBOL(migrate_disable); + + void migrate_enable(void) + { + struct task_struct *p = current; +- +- if (in_atomic() || irqs_disabled()) { +-#ifdef CONFIG_SCHED_DEBUG +- p->migrate_disable_atomic--; +-#endif +- return; +- } +- +-#ifdef CONFIG_SCHED_DEBUG +- if (unlikely(p->migrate_disable_atomic)) { +- tracing_off(); +- WARN_ON_ONCE(1); +- } +-#endif ++ struct rq *rq = this_rq(); ++ int cpu = task_cpu(p); + + WARN_ON_ONCE(p->migrate_disable <= 0); + if (p->migrate_disable > 1) { +@@ -7329,67 +7299,69 @@ void migrate_enable(void) + + preempt_disable(); + ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(current->pinned_on_cpu != cpu); ++ current->pinned_on_cpu = -1; ++#endif ++ ++ WARN_ON_ONCE(rq->nr_pinned < 1); ++ + p->migrate_disable = 0; ++ rq->nr_pinned--; ++ if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) && ++ takedown_cpu_task) ++ wake_up_process(takedown_cpu_task); ++ ++ if (!p->migrate_disable_scheduled) ++ goto out; ++ ++ p->migrate_disable_scheduled = 0; ++ + migrate_enable_update_cpus_allowed(p); + +- if (p->migrate_disable_update) { +- struct rq *rq; ++ WARN_ON(smp_processor_id() != cpu); ++ if (!is_cpu_allowed(p, cpu)) { ++ struct migration_arg arg = { p }; + struct rq_flags rf; +- int cpu = task_cpu(p); + + rq = task_rq_lock(p, &rf); + update_rq_clock(rq); +- +- __do_set_cpus_allowed_tail(p, &p->cpus_mask); ++ arg.dest_cpu = select_fallback_rq(cpu, p); + task_rq_unlock(rq, p, &rf); + +- p->migrate_disable_update = 0; +- +- WARN_ON(smp_processor_id() != cpu); +- if (!cpumask_test_cpu(cpu, &p->cpus_mask)) { +- struct migration_arg arg = { p }; +- struct rq_flags rf; ++ preempt_lazy_enable(); ++ preempt_enable(); + +- rq = task_rq_lock(p, &rf); +- update_rq_clock(rq); +- arg.dest_cpu = select_fallback_rq(cpu, p); +- task_rq_unlock(rq, p, &rf); +- +- unpin_current_cpu(); +- preempt_lazy_enable(); +- preempt_enable(); +- +- sleeping_lock_inc(); +- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); +- sleeping_lock_dec(); +- tlb_migrate_finish(p->mm); ++ sleeping_lock_inc(); ++ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); ++ sleeping_lock_dec(); ++ tlb_migrate_finish(p->mm); + +- return; +- } ++ return; + } +- unpin_current_cpu(); ++ ++out: + preempt_lazy_enable(); + preempt_enable(); + } + EXPORT_SYMBOL(migrate_enable); + +-#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) +-void migrate_disable(void) ++int cpu_nr_pinned(int cpu) + { +-#ifdef CONFIG_SCHED_DEBUG +- struct task_struct *p = current; ++ struct rq *rq = cpu_rq(cpu); + +- if (in_atomic() || irqs_disabled()) { +- p->migrate_disable_atomic++; +- return; +- } ++ return rq->nr_pinned; ++} + +- if (unlikely(p->migrate_disable_atomic)) { +- tracing_off(); +- WARN_ON_ONCE(1); +- } ++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++static void migrate_disabled_sched(struct task_struct *p) ++{ ++} + +- p->migrate_disable++; ++void migrate_disable(void) ++{ ++#ifdef CONFIG_SCHED_DEBUG ++ current->migrate_disable++; + #endif + barrier(); + } +@@ -7400,20 +7372,14 @@ void migrate_enable(void) + #ifdef CONFIG_SCHED_DEBUG + struct task_struct *p = current; + +- if (in_atomic() || irqs_disabled()) { +- p->migrate_disable_atomic--; +- return; +- } +- +- if (unlikely(p->migrate_disable_atomic)) { +- tracing_off(); +- WARN_ON_ONCE(1); +- } +- + WARN_ON_ONCE(p->migrate_disable <= 0); + p->migrate_disable--; + #endif + barrier(); + } + EXPORT_SYMBOL(migrate_enable); ++#else ++static void migrate_disabled_sched(struct task_struct *p) ++{ ++} + #endif +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index c90574112bca..78fa5911dd55 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -913,6 +913,10 @@ struct rq { + /* Must be inspected within a rcu lock section */ + struct cpuidle_state *idle_state; + #endif ++ ++#if defined(CONFIG_PREEMPT_RT_BASE) && defined(CONFIG_SMP) ++ int nr_pinned; ++#endif + }; + + static inline int cpu_of(struct rq *rq) +diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c +index b8a8a8db2d75..0c80992aa337 100644 +--- a/lib/smp_processor_id.c ++++ b/lib/smp_processor_id.c +@@ -22,6 +22,9 @@ notrace static unsigned int check_preemption_disabled(const char *what1, + * Kernel threads bound to a single CPU can safely use + * smp_processor_id(): + */ ++ if (current->migrate_disable) ++ goto out; ++ + if (current->nr_cpus_allowed == 1) + goto out; + +-- +2.25.0 + diff --git a/debian/patches-rt/0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch b/debian/patches-rt/0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch new file mode 100644 index 000000000..8b695bf79 --- /dev/null +++ b/debian/patches-rt/0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch @@ -0,0 +1,121 @@ +From 5c24a957f345bc33f4ab1637d199224dfecaee0b Mon Sep 17 00:00:00 2001 +From: Scott Wood +Date: Sat, 12 Oct 2019 01:52:14 -0500 +Subject: [PATCH 313/319] sched: migrate_enable: Use stop_one_cpu_nowait() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 6b39a1fa8c53cae08dc03afdae193b7d3a78a173 ] + +migrate_enable() can be called with current->state != TASK_RUNNING. +Avoid clobbering the existing state by using stop_one_cpu_nowait(). +Since we're stopping the current cpu, we know that we won't get +past __schedule() until migration_cpu_stop() has run (at least up to +the point of migrating us to another cpu). + +Signed-off-by: Scott Wood +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + include/linux/stop_machine.h | 2 ++ + kernel/sched/core.c | 23 +++++++++++++---------- + kernel/stop_machine.c | 7 +++++-- + 3 files changed, 20 insertions(+), 12 deletions(-) + +diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h +index 6d3635c86dbe..82fc686ddd9e 100644 +--- a/include/linux/stop_machine.h ++++ b/include/linux/stop_machine.h +@@ -26,6 +26,8 @@ struct cpu_stop_work { + cpu_stop_fn_t fn; + void *arg; + struct cpu_stop_done *done; ++ /* Did not run due to disabled stopper; for nowait debug checks */ ++ bool disabled; + }; + + int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index e97ac751aad2..e465381b464d 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -990,6 +990,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, + struct migration_arg { + struct task_struct *task; + int dest_cpu; ++ bool done; + }; + + /* +@@ -1025,6 +1026,11 @@ static int migration_cpu_stop(void *data) + struct task_struct *p = arg->task; + struct rq *rq = this_rq(); + struct rq_flags rf; ++ int dest_cpu = arg->dest_cpu; ++ ++ /* We don't look at arg after this point. */ ++ smp_mb(); ++ arg->done = true; + + /* + * The original target CPU might have gone down and we might +@@ -1047,9 +1053,9 @@ static int migration_cpu_stop(void *data) + */ + if (task_rq(p) == rq) { + if (task_on_rq_queued(p)) +- rq = __migrate_task(rq, &rf, p, arg->dest_cpu); ++ rq = __migrate_task(rq, &rf, p, dest_cpu); + else +- p->wake_cpu = arg->dest_cpu; ++ p->wake_cpu = dest_cpu; + } + rq_unlock(rq, &rf); + raw_spin_unlock(&p->pi_lock); +@@ -7322,6 +7328,7 @@ void migrate_enable(void) + WARN_ON(smp_processor_id() != cpu); + if (!is_cpu_allowed(p, cpu)) { + struct migration_arg arg = { p }; ++ struct cpu_stop_work work; + struct rq_flags rf; + + rq = task_rq_lock(p, &rf); +@@ -7329,15 +7336,11 @@ void migrate_enable(void) + arg.dest_cpu = select_fallback_rq(cpu, p); + task_rq_unlock(rq, p, &rf); + +- preempt_lazy_enable(); +- preempt_enable(); +- +- sleeping_lock_inc(); +- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); +- sleeping_lock_dec(); ++ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, ++ &arg, &work); + tlb_migrate_finish(p->mm); +- +- return; ++ __schedule(true); ++ WARN_ON_ONCE(!arg.done && !work.disabled); + } + + out: +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c +index 067cb83f37ea..2d15c0d50625 100644 +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c +@@ -86,8 +86,11 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) + enabled = stopper->enabled; + if (enabled) + __cpu_stop_queue_work(stopper, work, &wakeq); +- else if (work->done) +- cpu_stop_signal_done(work->done); ++ else { ++ work->disabled = true; ++ if (work->done) ++ cpu_stop_signal_done(work->done); ++ } + raw_spin_unlock_irqrestore(&stopper->lock, flags); + + wake_up_q(&wakeq); +-- +2.25.0 + diff --git a/debian/patches-rt/0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch b/debian/patches-rt/0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch new file mode 100644 index 000000000..39a0fa302 --- /dev/null +++ b/debian/patches-rt/0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch @@ -0,0 +1,87 @@ +From a3ce35e84633343c7341a0a6be4ebfa7e9c0000f Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 18 Oct 2019 13:04:15 +0200 +Subject: [PATCH 314/319] Revert "ARM: Initialize split page table locks for + vector page" +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 247074c44d8c3e619dfde6404a52295d8d671d38 ] + +I'm dropping this patch, with its original description: + +|ARM: Initialize split page table locks for vector page +| +|Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if +|PREEMPT_RT_FULL=y because vectors_user_mapping() creates a +|VM_ALWAYSDUMP mapping of the vector page (address 0xffff0000), but no +|ptl->lock has been allocated for the page. An attempt to coredump +|that page will result in a kernel NULL pointer dereference when +|follow_page() attempts to lock the page. +| +|The call tree to the NULL pointer dereference is: +| +| do_notify_resume() +| get_signal_to_deliver() +| do_coredump() +| elf_core_dump() +| get_dump_page() +| __get_user_pages() +| follow_page() +| pte_offset_map_lock() <----- a #define +| ... +| rt_spin_lock() +| +|The underlying problem is exposed by mm-shrink-the-page-frame-to-rt-size.patch. + +The patch named mm-shrink-the-page-frame-to-rt-size.patch was dropped +from the RT queue once the SPLIT_PTLOCK_CPUS feature (in a slightly +different shape) went upstream (somewhere between v3.12 and v3.14). + +I can see that the patch still allocates a lock which wasn't there +before. However I can't trigger a kernel oops like described in the +patch by triggering a coredump. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + arch/arm/kernel/process.c | 24 ------------------------ + 1 file changed, 24 deletions(-) + +diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c +index 8d3c7ce34c24..82ab015bf42b 100644 +--- a/arch/arm/kernel/process.c ++++ b/arch/arm/kernel/process.c +@@ -324,30 +324,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) + } + + #ifdef CONFIG_MMU +-/* +- * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not +- * initialized by pgtable_page_ctor() then a coredump of the vector page will +- * fail. +- */ +-static int __init vectors_user_mapping_init_page(void) +-{ +- struct page *page; +- unsigned long addr = 0xffff0000; +- pgd_t *pgd; +- pud_t *pud; +- pmd_t *pmd; +- +- pgd = pgd_offset_k(addr); +- pud = pud_offset(pgd, addr); +- pmd = pmd_offset(pud, addr); +- page = pmd_page(*(pmd)); +- +- pgtable_page_ctor(page); +- +- return 0; +-} +-late_initcall(vectors_user_mapping_init_page); +- + #ifdef CONFIG_KUSER_HELPERS + /* + * The vectors page is always readable from user space for the +-- +2.25.0 + diff --git a/debian/patches-rt/0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch b/debian/patches-rt/0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch new file mode 100644 index 000000000..ebb7c49b9 --- /dev/null +++ b/debian/patches-rt/0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch @@ -0,0 +1,129 @@ +From 4c69294b303a42d5c6d759ac3ca1512a59b01c44 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Tue, 19 Nov 2019 09:25:04 +0100 +Subject: [PATCH 315/319] locking: Make spinlock_t and rwlock_t a RCU section + on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 84440022a0e1c8c936d61f8f97593674a295d409 ] + +On !RT a locked spinlock_t and rwlock_t disables preemption which +implies a RCU read section. There is code that relies on that behaviour. + +Add an explicit RCU read section on RT while a sleeping lock (a lock +which would disables preemption on !RT) acquired. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/locking/rtmutex.c | 6 ++++++ + kernel/locking/rwlock-rt.c | 6 ++++++ + 2 files changed, 12 insertions(+) + +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 63b3d6f306fa..c7d3ae01b4e5 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1142,6 +1142,7 @@ void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) + void __lockfunc rt_spin_lock(spinlock_t *lock) + { + sleeping_lock_inc(); ++ rcu_read_lock(); + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); +@@ -1157,6 +1158,7 @@ void __lockfunc __rt_spin_lock(struct rt_mutex *lock) + void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) + { + sleeping_lock_inc(); ++ rcu_read_lock(); + migrate_disable(); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); +@@ -1170,6 +1172,7 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock) + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); + migrate_enable(); ++ rcu_read_unlock(); + sleeping_lock_dec(); + } + EXPORT_SYMBOL(rt_spin_unlock); +@@ -1201,6 +1204,7 @@ int __lockfunc rt_spin_trylock(spinlock_t *lock) + ret = __rt_mutex_trylock(&lock->lock); + if (ret) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ rcu_read_lock(); + } else { + migrate_enable(); + sleeping_lock_dec(); +@@ -1217,6 +1221,7 @@ int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) + ret = __rt_mutex_trylock(&lock->lock); + if (ret) { + sleeping_lock_inc(); ++ rcu_read_lock(); + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } else +@@ -1233,6 +1238,7 @@ int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) + ret = __rt_mutex_trylock(&lock->lock); + if (ret) { + sleeping_lock_inc(); ++ rcu_read_lock(); + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } +diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c +index c3b91205161c..0ae8c62ea832 100644 +--- a/kernel/locking/rwlock-rt.c ++++ b/kernel/locking/rwlock-rt.c +@@ -310,6 +310,7 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock) + ret = do_read_rt_trylock(rwlock); + if (ret) { + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); ++ rcu_read_lock(); + } else { + migrate_enable(); + sleeping_lock_dec(); +@@ -327,6 +328,7 @@ int __lockfunc rt_write_trylock(rwlock_t *rwlock) + ret = do_write_rt_trylock(rwlock); + if (ret) { + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ rcu_read_lock(); + } else { + migrate_enable(); + sleeping_lock_dec(); +@@ -338,6 +340,7 @@ EXPORT_SYMBOL(rt_write_trylock); + void __lockfunc rt_read_lock(rwlock_t *rwlock) + { + sleeping_lock_inc(); ++ rcu_read_lock(); + migrate_disable(); + rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); + do_read_rt_lock(rwlock); +@@ -347,6 +350,7 @@ EXPORT_SYMBOL(rt_read_lock); + void __lockfunc rt_write_lock(rwlock_t *rwlock) + { + sleeping_lock_inc(); ++ rcu_read_lock(); + migrate_disable(); + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + do_write_rt_lock(rwlock); +@@ -358,6 +362,7 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock) + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + do_read_rt_unlock(rwlock); + migrate_enable(); ++ rcu_read_unlock(); + sleeping_lock_dec(); + } + EXPORT_SYMBOL(rt_read_unlock); +@@ -367,6 +372,7 @@ void __lockfunc rt_write_unlock(rwlock_t *rwlock) + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + do_write_rt_unlock(rwlock); + migrate_enable(); ++ rcu_read_unlock(); + sleeping_lock_dec(); + } + EXPORT_SYMBOL(rt_write_unlock); +-- +2.25.0 + diff --git a/debian/patches-rt/0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch b/debian/patches-rt/0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch new file mode 100644 index 000000000..a48f05e71 --- /dev/null +++ b/debian/patches-rt/0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch @@ -0,0 +1,55 @@ +From b26bb167211f678c47b2ffc0376aa357c4d506d1 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 29 Nov 2019 17:24:55 +0100 +Subject: [PATCH 316/319] sched/core: migrate_enable() must access + takedown_cpu_task on !HOTPLUG_CPU +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit a61d1977f692e46bad99a100f264981ba08cb4bd ] + +The variable takedown_cpu_task is never declared/used on !HOTPLUG_CPU +except for migrate_enable(). This leads to a link error. + +Don't use takedown_cpu_task in !HOTPLUG_CPU. + +Reported-by: Dick Hollenbeck +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/cpu.c | 2 ++ + kernel/sched/core.c | 2 ++ + 2 files changed, 4 insertions(+) + +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 5366c8c69c2f..b9d7ac61d707 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -846,7 +846,9 @@ static int take_cpu_down(void *_param) + return 0; + } + ++#ifdef CONFIG_PREEMPT_RT_BASE + struct task_struct *takedown_cpu_task; ++#endif + + static int takedown_cpu(unsigned int cpu) + { +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index e465381b464d..cbd76324babd 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7314,9 +7314,11 @@ void migrate_enable(void) + + p->migrate_disable = 0; + rq->nr_pinned--; ++#ifdef CONFIG_HOTPLUG_CPU + if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) && + takedown_cpu_task) + wake_up_process(takedown_cpu_task); ++#endif + + if (!p->migrate_disable_scheduled) + goto out; +-- +2.25.0 + diff --git a/debian/patches-rt/0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch b/debian/patches-rt/0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch new file mode 100644 index 000000000..9dcfc59e5 --- /dev/null +++ b/debian/patches-rt/0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch @@ -0,0 +1,41 @@ +From d61aff5dff024476104365ca6dfa862e241d6be4 Mon Sep 17 00:00:00 2001 +From: Daniel Wagner +Date: Mon, 16 Dec 2019 16:15:57 +0100 +Subject: [PATCH 317/319] lib/smp_processor_id: Adjust + check_preemption_disabled() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit af3c1c5fdf177870fb5e6e16b24e374696ab28f5 ] + +The current->migrate_disable counter is not always defined leading to +build failures with DEBUG_PREEMPT && !PREEMPT_RT_BASE. + +Restrict the access to ->migrate_disable to same set where +->migrate_disable is modified. + +Signed-off-by: Daniel Wagner +Signed-off-by: Steven Rostedt (VMware) +[bigeasy: adjust condition + description] +Signed-off-by: Sebastian Andrzej Siewior +--- + lib/smp_processor_id.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c +index 0c80992aa337..2e7398534b66 100644 +--- a/lib/smp_processor_id.c ++++ b/lib/smp_processor_id.c +@@ -22,8 +22,10 @@ notrace static unsigned int check_preemption_disabled(const char *what1, + * Kernel threads bound to a single CPU can safely use + * smp_processor_id(): + */ ++#if defined(CONFIG_PREEMPT_RT_BASE) && (defined(CONFIG_SMP) || defined(CONFIG_SCHED_DEBUG)) + if (current->migrate_disable) + goto out; ++#endif + + if (current->nr_cpus_allowed == 1) + goto out; +-- +2.25.0 + diff --git a/debian/patches-rt/0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch b/debian/patches-rt/0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch new file mode 100644 index 000000000..ad0107ae8 --- /dev/null +++ b/debian/patches-rt/0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch @@ -0,0 +1,57 @@ +From ce6105c1e1149f6a8272acf267011edcf92ba778 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 12 Dec 2019 10:53:59 +0100 +Subject: [PATCH 318/319] sched: migrate_enable: Busy loop until the migration + request is completed +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +[ Upstream commit 140d7f54a5fff02898d2ca9802b39548bf7455f1 ] + +If user task changes the CPU affinity mask of a running task it will +dispatch migration request if the current CPU is no longer allowed. This +might happen shortly before a task enters a migrate_disable() section. +Upon leaving the migrate_disable() section, the task will notice that +the current CPU is no longer allowed and will will dispatch its own +migration request to move it off the current CPU. +While invoking __schedule() the first migration request will be +processed and the task returns on the "new" CPU with "arg.done = 0". Its +own migration request will be processed shortly after and will result in +memory corruption if the stack memory, designed for request, was used +otherwise in the meantime. + +Spin until the migration request has been processed if it was accepted. + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Steven Rostedt (VMware) +--- + kernel/sched/core.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index cbd76324babd..4616c086dd26 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7329,7 +7329,7 @@ void migrate_enable(void) + + WARN_ON(smp_processor_id() != cpu); + if (!is_cpu_allowed(p, cpu)) { +- struct migration_arg arg = { p }; ++ struct migration_arg arg = { .task = p }; + struct cpu_stop_work work; + struct rq_flags rf; + +@@ -7342,7 +7342,10 @@ void migrate_enable(void) + &arg, &work); + tlb_migrate_finish(p->mm); + __schedule(true); +- WARN_ON_ONCE(!arg.done && !work.disabled); ++ if (!work.disabled) { ++ while (!arg.done) ++ cpu_relax(); ++ } + } + + out: +-- +2.25.0 + diff --git a/debian/patches-rt/0319-Linux-4.19.103-rt42-REBASE.patch b/debian/patches-rt/0319-Linux-4.19.103-rt42-REBASE.patch new file mode 100644 index 000000000..443bbdeb3 --- /dev/null +++ b/debian/patches-rt/0319-Linux-4.19.103-rt42-REBASE.patch @@ -0,0 +1,20 @@ +From 921dad4d00b430b1d8849cf6498e8a1af6220762 Mon Sep 17 00:00:00 2001 +From: "Steven Rostedt (VMware)" +Date: Fri, 17 Jan 2020 11:55:15 -0500 +Subject: [PATCH 319/319] Linux 4.19.103-rt42 REBASE +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.103-rt42.tar.xz + +--- + localversion-rt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/localversion-rt b/localversion-rt +index 1199ebade17b..8bdfb9a04683 100644 +--- a/localversion-rt ++++ b/localversion-rt +@@ -1 +1 @@ +--rt16 ++-rt42 +-- +2.25.0 + diff --git a/debian/patches-rt/series b/debian/patches-rt/series index 8d124318d..5b96c4dc2 100644 --- a/debian/patches-rt/series +++ b/debian/patches-rt/series @@ -287,4 +287,33 @@ 0287-revert-block.patch 0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch 0289-workqueue-rework.patch -0290-Linux-4.19.94-rt38-REBASE.patch +0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch +0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch +0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch +0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch +0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch +0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch +0296-x86-preempt-Check-preemption-level-before-looking-at.patch +0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch +0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch +0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch +0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch +0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch +0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch +0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch +0304-sched-Remove-dead-__migrate_disabled-check.patch +0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch +0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch +0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch +0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch +0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch +0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch +0311-sched-migrate_enable-Use-select_fallback_rq.patch +0312-sched-Lazy-migrate_disable-processing.patch +0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch +0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch +0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch +0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch +0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch +0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch +0319-Linux-4.19.103-rt42-REBASE.patch