Update to 3.15-rc3

aufs: Update to aufs3.x-rcN-20140421
[rt] Disable until it is updated for 3.15 or later

Refresh/drop patches as appropriate; in particular:
- update filenames in fbdev patches
- drop the ARM sunxi backports, which came from 3.15

svn path=/dists/trunk/linux/; revision=21296
This commit is contained in:
Ben Hutchings 2014-05-02 01:45:17 +00:00
parent 36a42156d2
commit 13c1700f3a
335 changed files with 223 additions and 39208 deletions

10
debian/changelog vendored
View File

@ -1,3 +1,13 @@
linux (3.15~rc3-1~exp1) UNRELEASED; urgency=medium
* New upstream release candidate
[ Ben Hutchings ]
* aufs: Update to aufs3.x-rcN-20140421
* [rt] Disable until it is updated for 3.15 or later
-- Ben Hutchings <ben@decadent.org.uk> Fri, 02 May 2014 01:54:56 +0100
linux (3.14.2-1) unstable; urgency=medium
* New upstream stable update:

View File

@ -29,7 +29,7 @@ featuresets:
rt
[featureset-rt_base]
enabled: true
enabled: false
[description]
part-long-up: This kernel is not suitable for SMP (multi-processor,

View File

@ -1,71 +0,0 @@
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Tue, 18 Feb 2014 02:19:26 -0300
Subject: Bluetooth: allocate static minor for vhci
Origin: https://git.kernel.org/cgit/linux/kernel/git/bluetooth/bluetooth-next.git/commit?id=b075dd40c95d11c2c8690f6c4d6232fc0d9e7f56
Commit bfacbb9 (Bluetooth: Use devname:vhci module alias for virtual HCI
driver) added the module alias to hci_vhci module so it's possible to
create the /dev/vhci node. However creating an alias without
specifying the minor doesn't allow us to create the node ahead,
triggerring module auto-load when it's first accessed.
Starting with depmod from kmod 16 we started to warn if there's a
devname alias without specifying the major and minor.
Let's do the same done for uhid, kvm, fuse and others, specifying a
fixed minor. In systems with systemd as the init the following will
happen: on early boot systemd will call "kmod static-nodes" to read
/lib/modules/$(uname -r)/modules.devname and then create the nodes. When
first accessed these "dead" nodes will trigger the module loading.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
---
Documentation/devices.txt | 1 +
drivers/bluetooth/hci_vhci.c | 3 ++-
include/linux/miscdevice.h | 1 +
3 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 10378cc..04356f5 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -353,6 +353,7 @@ Your cooperation is appreciated.
133 = /dev/exttrp External device trap
134 = /dev/apm_bios Advanced Power Management BIOS
135 = /dev/rtc Real Time Clock
+ 137 = /dev/vhci Bluetooth virtual HCI driver
139 = /dev/openprom SPARC OpenBoot PROM
140 = /dev/relay8 Berkshire Products Octal relay card
141 = /dev/relay16 Berkshire Products ISO-16 relay card
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 1ef6990..add1c6a 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -359,7 +359,7 @@ static const struct file_operations vhci_fops = {
static struct miscdevice vhci_miscdev= {
.name = "vhci",
.fops = &vhci_fops,
- .minor = MISC_DYNAMIC_MINOR,
+ .minor = VHCI_MINOR,
};
static int __init vhci_init(void)
@@ -385,3 +385,4 @@ MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("devname:vhci");
+MODULE_ALIAS_MISCDEV(VHCI_MINOR);
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 3737f72..7bb6148 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -23,6 +23,7 @@
#define TEMP_MINOR 131 /* Temperature Sensor */
#define RTC_MINOR 135
#define EFI_RTC_MINOR 136 /* EFI Time services */
+#define VHCI_MINOR 137
#define SUN_OPENPROM_MINOR 139
#define DMAPI_MINOR 140 /* DMAPI */
#define NVRAM_MINOR 144

View File

@ -96,8 +96,8 @@ upstream submission.
fw_size = firmware->size / sizeof(u32);
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -363,10 +363,8 @@ static int ath3k_load_patch(struct usb_d
fw_version.rom_version);
@@ -370,10 +370,8 @@ static int ath3k_load_patch(struct usb_d
le32_to_cpu(fw_version.rom_version));
ret = request_firmware(&firmware, filename, &udev->dev);
- if (ret < 0) {
@ -108,8 +108,8 @@ upstream submission.
pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8);
pt_version.build_version = *(int *)
@@ -425,10 +423,8 @@ static int ath3k_load_syscfg(struct usb_
fw_version.rom_version, clk_value, ".dfu");
@@ -432,10 +430,8 @@ static int ath3k_load_syscfg(struct usb_
le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
ret = request_firmware(&firmware, filename, &udev->dev);
- if (ret < 0) {
@ -140,7 +140,7 @@ upstream submission.
return -EIO;
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -658,10 +658,8 @@ static int bfusb_probe(struct usb_interf
@@ -664,10 +664,8 @@ static int bfusb_probe(struct usb_interf
skb_queue_head_init(&data->pending_q);
skb_queue_head_init(&data->completed_q);
@ -154,7 +154,7 @@ upstream submission.
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -569,10 +569,8 @@ static int bt3c_open(bt3c_info_t *info)
@@ -570,10 +570,8 @@ static int bt3c_open(bt3c_info_t *info)
/* Load firmware */
err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
@ -232,10 +232,10 @@ upstream submission.
where = 0;
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1095,10 +1095,8 @@ nvc0_graph_ctor_fw(struct nvc0_graph_pri
@@ -1221,10 +1221,8 @@ nvc0_graph_ctor_fw(struct nvc0_graph_pri
if (ret) {
snprintf(f, sizeof(f), "nouveau/%s", fwname);
ret = request_firmware(&fw, f, &device->pdev->dev);
ret = request_firmware(&fw, f, nv_device_base(device));
- if (ret) {
- nv_error(priv, "failed to load %s\n", fwname);
+ if (ret)
@ -288,7 +288,7 @@ upstream submission.
rdev->me_fw->size, fw_name);
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2436,10 +2436,6 @@ int r600_init_microcode(struct radeon_de
@@ -2434,10 +2434,6 @@ int r600_init_microcode(struct radeon_de
out:
if (err) {
@ -371,7 +371,7 @@ upstream submission.
card->name, firmware->size);
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1369,7 +1369,6 @@ static void load_firmware_cb(const struc
@@ -1373,7 +1373,6 @@ static void load_firmware_cb(const struc
tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error");
if (!fw) {
@ -685,15 +685,13 @@ upstream submission.
printk(KERN_INFO "tda1004x: please rename the firmware file to %s\n",
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -951,14 +951,8 @@ static int tda10071_init(struct dvb_fron
@@ -952,12 +952,8 @@ static int tda10071_init(struct dvb_fron
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
- if (ret) {
- dev_err(&priv->i2c->dev, "%s: did not find the " \
- "firmware file. (%s) Please see " \
- "linux/Documentation/dvb/ for more " \
- "details on firmware-problems. (%d)\n",
- dev_err(&priv->i2c->dev,
- "%s: did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)\n",
- KBUILD_MODNAME, fw_file, ret);
+ if (ret)
goto error;
@ -837,7 +835,7 @@ upstream submission.
fw_data = (void *)fw_entry->data;
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -3799,10 +3799,8 @@ static int pvr_boot(struct bttv *btv)
@@ -3814,10 +3814,8 @@ static int pvr_boot(struct bttv *btv)
int rc;
rc = request_firmware(&fw_entry, "hcwamc.rbf", &btv->c.pci->dev);
@ -1063,12 +1061,12 @@ upstream submission.
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -2563,10 +2563,8 @@ static int s2255_probe(struct usb_interf
@@ -2306,10 +2306,8 @@ static int s2255_probe(struct usb_interf
}
/* load the first chunk */
if (request_firmware(&dev->fw_data->fw,
- FIRMWARE_FILE_NAME, &dev->udev->dev)) {
- printk(KERN_ERR "sensoray 2255 failed to get firmware\n");
- dev_err(&interface->dev, "sensoray 2255 failed to get firmware\n");
+ FIRMWARE_FILE_NAME, &dev->udev->dev))
goto errorREQFW;
- }
@ -1077,7 +1075,7 @@ upstream submission.
pdata = (__le32 *) &dev->fw_data->fw->data[fw_size - 8];
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -88,10 +88,8 @@ int s5p_mfc_load_firmware(struct s5p_mfc
@@ -86,10 +86,8 @@ int s5p_mfc_load_firmware(struct s5p_mfc
err = request_firmware((const struct firmware **)&fw_blob,
dev->variant->fw_name, dev->v4l2_dev.dev);
@ -1089,7 +1087,7 @@ upstream submission.
if (fw_blob->size > dev->fw_size) {
mfc_err("MFC firmware is too big to be loaded\n");
release_firmware(fw_blob);
@@ -121,10 +119,8 @@ int s5p_mfc_reload_firmware(struct s5p_m
@@ -119,10 +117,8 @@ int s5p_mfc_reload_firmware(struct s5p_m
err = request_firmware((const struct firmware **)&fw_blob,
dev->variant->fw_name, dev->v4l2_dev.dev);
@ -1220,7 +1218,7 @@ upstream submission.
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3696,16 +3696,13 @@ static int bnx2_request_uncached_firmwar
@@ -3700,16 +3700,13 @@ static int bnx2_request_uncached_firmwar
}
rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
@ -1242,7 +1240,7 @@ upstream submission.
if (bp->mips_firmware->size < sizeof(*mips_fw) ||
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12637,11 +12637,8 @@ static int bnx2x_init_firmware(struct bn
@@ -12775,11 +12775,8 @@ static int bnx2x_init_firmware(struct bn
BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
@ -1257,7 +1255,7 @@ upstream submission.
if (rc) {
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -11293,11 +11293,8 @@ static int tg3_request_firmware(struct t
@@ -11286,11 +11286,8 @@ static int tg3_request_firmware(struct t
{
const struct tg3_firmware_hdr *fw_hdr;
@ -1450,7 +1448,7 @@ upstream submission.
fwh = (struct at76_fw_header *)(fwe->fw->data);
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1082,12 +1082,8 @@ static void ath9k_hif_usb_firmware_cb(co
@@ -1084,12 +1084,8 @@ static void ath9k_hif_usb_firmware_cb(co
struct hif_device_usb *hif_dev = context;
int ret;
@ -1530,7 +1528,7 @@ upstream submission.
hdr = (struct b43legacy_fw_header *)((*fw)->data);
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -590,10 +590,8 @@ static const struct firmware *brcmf_sdio
@@ -663,10 +663,8 @@ static const struct firmware *brcmf_sdio
found:
err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
@ -1598,7 +1596,7 @@ upstream submission.
IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -1866,7 +1866,6 @@ il3945_read_ucode(struct il_priv *il)
@@ -1861,7 +1861,6 @@ il3945_read_ucode(struct il_priv *il)
sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
if (ret < 0) {
@ -1608,7 +1606,7 @@ upstream submission.
else
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -928,13 +928,8 @@ static void iwl_req_fw_callback(const st
@@ -979,13 +979,8 @@ static void iwl_req_fw_callback(const st
memset(&pieces, 0, sizeof(pieces));
@ -1636,7 +1634,7 @@ upstream submission.
}
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -420,11 +420,8 @@ static void mwifiex_fw_dpc(const struct
@@ -421,11 +421,8 @@ static void mwifiex_fw_dpc(const struct
bool init_failed = false;
struct wireless_dev *wdev;
@ -1651,7 +1649,7 @@ upstream submission.
adapter->firmware = firmware;
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5530,16 +5530,12 @@ static int mwl8k_firmware_load_success(s
@@ -5715,16 +5715,12 @@ static int mwl8k_firmware_load_success(s
static void mwl8k_fw_state_machine(const struct firmware *fw, void *context)
{
struct mwl8k_priv *priv = context;
@ -1669,7 +1667,7 @@ upstream submission.
priv->fw_helper = fw;
rc = mwl8k_request_fw(priv, priv->fw_pref, &priv->fw_ucode,
true);
@@ -5574,11 +5570,8 @@ static void mwl8k_fw_state_machine(const
@@ -5759,11 +5755,8 @@ static void mwl8k_fw_state_machine(const
break;
case FW_STATE_LOADING_ALT:
@ -1682,7 +1680,7 @@ upstream submission.
priv->fw_ucode = fw;
rc = mwl8k_firmware_load_success(priv);
if (rc)
@@ -5616,10 +5609,8 @@ retry:
@@ -5801,10 +5794,8 @@ retry:
/* Ask userland hotplug daemon for the device firmware */
rc = mwl8k_request_firmware(priv, fw_image, nowait);
@ -1805,7 +1803,7 @@ upstream submission.
rt2x00_err(rt2x00dev, "Failed to read Firmware\n");
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -55,7 +55,6 @@ void rtl_fw_cb(const struct firmware *fi
@@ -115,7 +115,6 @@ void rtl_fw_cb(const struct firmware *fi
if (!err)
goto found_alt;
}
@ -1851,7 +1849,7 @@ upstream submission.
wl1251_error("nvs size is not multiple of 32 bits: %zu",
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -752,10 +752,8 @@ static int wl12xx_fetch_firmware(struct
@@ -748,10 +748,8 @@ static int wl12xx_fetch_firmware(struct
ret = request_firmware(&fw, fw_name, wl->dev);
@ -1970,7 +1968,7 @@ upstream submission.
}
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3945,10 +3945,8 @@ static ssize_t ipr_store_update_fw(struc
@@ -3983,10 +3983,8 @@ static ssize_t ipr_store_update_fw(struc
len = snprintf(fname, 99, "%s", buf);
fname[len-1] = '\0';
@ -1984,7 +1982,7 @@ upstream submission.
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -690,9 +690,6 @@ static ssize_t pm8001_store_update_fw(st
@@ -676,9 +676,6 @@ static ssize_t pm8001_store_update_fw(st
pm8001_ha->dev);
if (err) {
@ -1996,7 +1994,7 @@ upstream submission.
}
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1560,8 +1560,6 @@ qla1280_request_firmware(struct scsi_qla
@@ -1553,8 +1553,6 @@ qla1280_request_firmware(struct scsi_qla
err = request_firmware(&fw, fwname, &ha->pdev->dev);
if (err) {
@ -2007,7 +2005,7 @@ upstream submission.
}
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5136,8 +5136,6 @@ qla2x00_load_risc(scsi_qla_host_t *vha,
@@ -5358,8 +5358,6 @@ qla2x00_load_risc(scsi_qla_host_t *vha,
/* Load firmware blob. */
blob = qla2x00_request_firmware(vha);
if (!blob) {
@ -2016,7 +2014,7 @@ upstream submission.
ql_log(ql_log_info, vha, 0x0084,
"Firmware images can be retrieved from: "QLA_FW_URL ".\n");
return QLA_FUNCTION_FAILED;
@@ -5238,8 +5236,6 @@ qla24xx_load_risc_blob(scsi_qla_host_t *
@@ -5461,8 +5459,6 @@ qla24xx_load_risc_blob(scsi_qla_host_t *
/* Load firmware blob. */
blob = qla2x00_request_firmware(vha);
if (!blob) {
@ -2042,7 +2040,7 @@ upstream submission.
if (qla82xx_validate_firmware_blob(vha,
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -5315,8 +5315,6 @@ qla2x00_request_firmware(scsi_qla_host_t
@@ -5393,8 +5393,6 @@ qla2x00_request_firmware(scsi_qla_host_t
goto out;
if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
@ -2200,7 +2198,7 @@ upstream submission.
}
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -280,10 +280,8 @@ bool init_firmware(struct net_device *de
@@ -242,10 +242,8 @@ bool init_firmware(struct net_device *de
*/
if (rst_opt == OPT_SYSTEM_RESET) {
rc = request_firmware(&fw_entry, fw_name[init_step],&priv->udev->dev);
@ -2224,7 +2222,7 @@ upstream submission.
usb_set_intfdata(pusb_intf, NULL);
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -425,11 +425,8 @@ static int slic_card_download_gbrcv(stru
@@ -402,11 +402,8 @@ static int slic_card_download_gbrcv(stru
}
ret = request_firmware(&fw, file, &adapter->pcidev->dev);
@ -2237,7 +2235,7 @@ upstream submission.
rcvucodelen = *(u32 *)(fw->data + index);
index += 4;
@@ -503,11 +500,8 @@ static int slic_card_download(struct ada
@@ -480,11 +477,8 @@ static int slic_card_download(struct ada
return -ENOENT;
}
ret = request_firmware(&fw, file, &adapter->pcidev->dev);
@ -2494,7 +2492,7 @@ upstream submission.
BootMajorVersion = rec->data[0];
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -768,8 +768,6 @@ static int build_i2c_fw_hdr(__u8 *header
@@ -784,8 +784,6 @@ static int build_i2c_fw_hdr(__u8 *header
err = request_firmware(&fw, fw_name, dev);
if (err) {
@ -2503,7 +2501,7 @@ upstream submission.
kfree(buffer);
return err;
}
@@ -1315,8 +1313,6 @@ static int download_fw(struct edgeport_s
@@ -1331,8 +1329,6 @@ static int download_fw(struct edgeport_s
err = request_firmware(&fw, fw_name, dev);
if (err) {
@ -2526,8 +2524,8 @@ upstream submission.
if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
release_firmware(fw_p);
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -741,10 +741,8 @@ static ssize_t broadsheet_loadstore_wave
return -EINVAL;
@ -2540,8 +2538,8 @@ upstream submission.
/* try to enforce reasonable min max on waveform */
if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) {
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
--- a/drivers/video/fbdev/metronomefb.c
+++ b/drivers/video/fbdev/metronomefb.c
@@ -677,10 +677,8 @@ static int metronomefb_probe(struct plat
a) request the waveform file from userspace
b) process waveform and decode into metromem */
@ -2642,12 +2640,13 @@ upstream submission.
if (firmware->size < sizeof(header)) {
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -57,10 +57,8 @@ static int get_firmware(const struct fir
@@ -57,11 +57,8 @@ static int get_firmware(const struct fir
DE_ACT(("firmware requested: %s\n", card_fw[fw_index].data));
snprintf(name, sizeof(name), "ea/%s", card_fw[fw_index].data);
err = request_firmware(fw_entry, name, pci_device(chip));
- if (err < 0)
- snd_printk(KERN_ERR "get_firmware(): Firmware not available (%d)\n", err);
- dev_err(chip->card->dev,
- "get_firmware(): Firmware not available (%d)\n", err);
#ifdef CONFIG_PM_SLEEP
- else
+ if (!err)
@ -2656,27 +2655,28 @@ upstream submission.
return err;
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -879,10 +879,8 @@ static int snd_emu10k1_emu1010_init(stru
@@ -887,12 +887,8 @@ static int snd_emu10k1_emu1010_init(stru
}
err = request_firmware(&emu->firmware, filename, &emu->pci->dev);
- if (err != 0) {
- snd_printk(KERN_ERR "emu1010: firmware: %s not found. Err = %d\n", filename, err);
- dev_info(emu->card->dev,
- "emu1010: firmware: %s not found. Err = %d\n",
- filename, err);
+ if (err != 0)
return err;
- }
snd_printk(KERN_INFO "emu1010: firmware file = %s, size = 0x%zx\n",
dev_info(emu->card->dev,
"emu1010: firmware file = %s, size = 0x%zx\n",
filename, emu->firmware->size);
}
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -3792,11 +3792,8 @@ static void azx_firmware_cb(const struct
@@ -1407,10 +1407,8 @@ static void azx_firmware_cb(const struct
struct azx *chip = card->private_data;
struct pci_dev *pci = chip->pci;
- if (!fw) {
- snd_printk(KERN_ERR SFX "%s: Cannot load firmware, aborting\n",
- pci_name(chip->pci));
- dev_err(card->dev, "Cannot load firmware, aborting\n");
+ if (!fw)
goto error;
- }
@ -2695,12 +2695,13 @@ upstream submission.
}
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -558,10 +558,8 @@ int snd_mixart_setup_firmware(struct mix
@@ -570,11 +570,8 @@ int snd_mixart_setup_firmware(struct mix
for (i = 0; i < 3; i++) {
sprintf(path, "mixart/%s", fw_files[i]);
- if (request_firmware(&fw_entry, path, &mgr->pci->dev)) {
- snd_printk(KERN_ERR "miXart: can't load firmware %s\n", path);
- dev_err(&mgr->pci->dev,
- "miXart: can't load firmware %s\n", path);
+ if (request_firmware(&fw_entry, path, &mgr->pci->dev))
return -ENOENT;
- }
@ -2709,12 +2710,13 @@ upstream submission.
release_firmware(fw_entry);
--- a/sound/pci/pcxhr/pcxhr_hwdep.c
+++ b/sound/pci/pcxhr/pcxhr_hwdep.c
@@ -381,11 +381,8 @@ int pcxhr_setup_firmware(struct pcxhr_mg
@@ -385,12 +385,8 @@ int pcxhr_setup_firmware(struct pcxhr_mg
if (!fw_files[fw_set][i])
continue;
sprintf(path, "pcxhr/%s", fw_files[fw_set][i]);
- if (request_firmware(&fw_entry, path, &mgr->pci->dev)) {
- snd_printk(KERN_ERR "pcxhr: can't load firmware %s\n",
- dev_err(&mgr->pci->dev,
- "pcxhr: can't load firmware %s\n",
- path);
+ if (request_firmware(&fw_entry, path, &mgr->pci->dev))
return -ENOENT;
@ -2739,21 +2741,22 @@ upstream submission.
if (err) {
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -5145,10 +5145,8 @@ static int hdsp_request_fw_loader(struct
@@ -5167,11 +5167,8 @@ static int hdsp_request_fw_loader(struct
return -EINVAL;
}
- if (request_firmware(&fw, fwfile, &hdsp->pci->dev)) {
- snd_printk(KERN_ERR "Hammerfall-DSP: cannot load firmware %s\n", fwfile);
- dev_err(hdsp->card->dev,
- "cannot load firmware %s\n", fwfile);
+ if (request_firmware(&fw, fwfile, &hdsp->pci->dev))
return -ENOENT;
- }
if (fw->size < HDSP_FIRMWARE_SIZE) {
snd_printk(KERN_ERR "Hammerfall-DSP: too short firmware size %d (expected %d)\n",
(int)fw->size, HDSP_FIRMWARE_SIZE);
dev_err(hdsp->card->dev,
"too short firmware size %d (expected %d)\n",
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -893,10 +893,8 @@ static int wm2000_i2c_probe(struct i2c_c
@@ -891,10 +891,8 @@ static int wm2000_i2c_probe(struct i2c_c
}
ret = request_firmware(&fw, filename, &i2c->dev);
@ -2771,32 +2774,32 @@ upstream submission.
ret = request_firmware(&fw, fwname, &device->dev);
if (ret < 0) {
kfree(rec);
- snd_printk(KERN_ERR PREFIX "error requesting ezusb "
- "firmware %s.\n", fwname);
- dev_err(&intf->dev,
- "error requesting ezusb firmware %s.\n", fwname);
return ret;
}
ret = usb6fire_fw_ihex_init(fw, rec);
@@ -292,8 +290,6 @@ static int usb6fire_fw_fpga_upload(
@@ -296,8 +294,6 @@ static int usb6fire_fw_fpga_upload(
ret = request_firmware(&fw, fwname, &device->dev);
if (ret < 0) {
- snd_printk(KERN_ERR PREFIX "unable to get fpga firmware %s.\n",
- dev_err(&intf->dev, "unable to get fpga firmware %s.\n",
- fwname);
kfree(buffer);
return -EIO;
}
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -3265,11 +3265,8 @@ int snd_cs46xx_start_dsp(struct snd_cs46
@@ -3284,11 +3284,8 @@ int snd_cs46xx_start_dsp(struct snd_cs46
#ifdef CONFIG_SND_CS46XX_NEW_DSP
for (i = 0; i < CS46XX_DSP_MODULES; i++) {
err = load_firmware(chip, &chip->modules[i], module_names[i]);
- if (err < 0) {
- snd_printk(KERN_ERR "firmware load error [%s]\n",
- dev_err(chip->card->dev, "firmware load error [%s]\n",
- module_names[i]);
+ if (err < 0)
return err;
- }
err = cs46xx_dsp_load_module(chip, chip->modules[i]);
if (err < 0) {
snd_printk(KERN_ERR "image download error [%s]\n",
dev_err(chip->card->dev, "image download error [%s]\n",

View File

@ -1,30 +0,0 @@
From: Ben Hutchings <ben@decadent.org.uk>
Date: Sun, 9 Mar 2014 03:58:52 +0000
Subject: bfa: Replace large udelay() with mdelay()
Bug-Debian: https://bugs.debian.org/741142
Forwarded: http://mid.gmane.org/1394337858.2861.72.camel@deadeye.wl.decadent.org.uk
udelay() does not work on some architectures for values above
2000, in particular on ARM:
ERROR: "__bad_udelay" [drivers/scsi/bfa/bfa.ko] undefined!
Reported-by: Vagrant Cascadian <vagrant@debian.org>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
---
drivers/scsi/bfa/bfa_ioc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 65180e1..50c75e1 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -7006,7 +7006,7 @@ bfa_flash_sem_get(void __iomem *bar)
while (!bfa_raw_sem_get(bar)) {
if (--n <= 0)
return BFA_STATUS_BADFLASH;
- udelay(10000);
+ mdelay(10);
}
return BFA_STATUS_OK;
}

View File

@ -14,8 +14,8 @@ We need to allow it to be autoloaded on this model only, and then
un-blacklist it in udev.
---
--- a/drivers/video/via/via-core.c
+++ b/drivers/video/via/via-core.c
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -753,7 +753,14 @@ static struct pci_device_id via_pci_tabl
.driver_data = UNICHROME_VX900 },
{ }

View File

@ -26,5 +26,5 @@ rm drivers/staging/wlags49_h2/ap_*.c
rm drivers/staging/wlags49_h2/sta_*.c
# These include apparently obfuscated code
rm drivers/video/nvidia/
rm drivers/video/riva/
rm drivers/video/fbdev/nvidia/
rm drivers/video/fbdev/riva/

View File

@ -17,12 +17,11 @@ diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192
index ad82bc3..282e293 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -5,5 +5,6 @@ config RTL8192E
@@ -5,4 +5,5 @@ config RTL8192E
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
+ select FW_LOADER
default N
---help---
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index abcd22f..1a95d1f 100644

View File

@ -13,9 +13,9 @@ These drivers are also largely redundant with nouveau. The RIVA 128
probably discontinued 10 years ago.
---
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1028,101 +1028,6 @@ config FB_ATMEL_STN
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -991,101 +991,6 @@ config FB_ATMEL_STN
If unsure, say N.
@ -117,9 +117,9 @@ probably discontinued 10 years ago.
config FB_I740
tristate "Intel740 support"
depends on FB && PCI
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -38,8 +38,6 @@ obj-$(CONFIG_FB_PM2) += pm2fb.o
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -22,8 +22,6 @@ obj-$(CONFIG_FB_PM3) += pm3fb.o
obj-$(CONFIG_FB_I740) += i740fb.o
obj-$(CONFIG_FB_MATROX) += matrox/

View File

@ -7,19 +7,17 @@ Author: Roland Stigge <stigge@antcom.de>
Bug-Debian: http://bugs.debian.org/708094
Forwarded: not-needed
Index: linux-3.8.12/arch/powerpc/boot/Makefile
===================================================================
--- linux-3.8.12.orig/arch/powerpc/boot/Makefile 2013-05-13 21:05:41.000000000 +0200
+++ linux-3.8.12/arch/powerpc/boot/Makefile 2013-05-14 07:48:26.434286772 +0200
@@ -204,7 +204,6 @@
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -204,7 +204,6 @@ image-$(CONFIG_PPC_CHRP) += zImage.chrp
image-$(CONFIG_PPC_EFIKA) += zImage.chrp
image-$(CONFIG_PPC_PMAC) += zImage.pmac
image-$(CONFIG_PPC_HOLLY) += dtbImage.holly
image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800
-image-$(CONFIG_DEFAULT_UIMAGE) += uImage
image-$(CONFIG_EPAPR_BOOT) += zImage.epapr
#
@@ -262,23 +261,6 @@
@@ -262,23 +261,6 @@ image-$(CONFIG_MPC834x_MDS) += cuImage.
image-$(CONFIG_MPC836x_MDS) += cuImage.mpc836x_mds
image-$(CONFIG_ASP834x) += dtbImage.asp834x-redboot

File diff suppressed because it is too large Load Diff

View File

@ -105,7 +105,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -727,6 +727,14 @@ next_menu:
@@ -732,6 +732,14 @@ next_menu:
return 0;
}
@ -120,7 +120,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
int conf_write(const char *name)
{
FILE *out;
@@ -1159,7 +1167,10 @@ bool conf_set_all_new_symbols(enum conf_
@@ -1164,7 +1172,10 @@ bool conf_set_all_new_symbols(enum conf_
bool has_changed = false;
for_all_symbols(i, sym) {
@ -134,9 +134,9 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
case S_BOOLEAN:
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -109,6 +109,8 @@ struct symbol {
/* choice values need to be set before calculating this symbol value */
#define SYMBOL_NEED_SET_CHOICE_VALUES 0x100000
@@ -112,6 +112,8 @@ struct symbol {
/* Set symbol to y if allnoconfig; used for symbols that hide others */
#define SYMBOL_ALLNOCONFIG_Y 0x200000
+#define SYMBOL_NEW 0x200000 /* symbol is new (loaded config did not provide a value) */
+

View File

@ -1,7 +1,7 @@
From: J. R. Okajima <hooanon05@yahoo.co.jp>
Date: Tue Apr 1 11:01:55 2014 +0900
Subject: aufs3.14-20140407
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/66ac1c999bb00cd649d702e8b0c8e8ab7fa9b352/tree/
Date: Tue Apr 15 12:26:44 2014 +0900
Subject: aufs3.x-rcN-20140421
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/8803c2d8d64effad56733f141431af5a2491b19f/tree/
Bug-Debian: http://bugs.debian.org/541828
Patch generated by debian/patches/features/all/aufs3/gen-patch
@ -1704,8 +1704,8 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+#endif /* __KERNEL__ */
+#endif /* __AUFS_H__ */
--- a/fs/aufs/branch.c 1970-01-01 01:00:00.000000000 +0100
+++ b/fs/aufs/branch.c 2014-01-20 03:24:33.508760970 +0000
@@ -0,0 +1,1219 @@
+++ b/fs/aufs/branch.c 2014-05-02 02:24:14.519131596 +0100
@@ -0,0 +1,1220 @@
+/*
+ * Copyright (C) 2005-2014 Junjiro R. Okajima
+ *
@ -2757,7 +2757,7 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+ unsigned int mnt_flags;
+ unsigned long long ull, max;
+ aufs_bindex_t br_id;
+ unsigned char verbose;
+ unsigned char verbose, writer;
+ struct file *file, *hf, **array;
+ struct inode *inode;
+ struct au_hfile *hfile;
@ -2826,11 +2826,12 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+ hf = hfile->hf_file;
+ /* fi_read_unlock(file); */
+ spin_lock(&hf->f_lock);
+ hf->f_mode &= ~FMODE_WRITE;
+ writer = !!(hf->f_mode & FMODE_WRITER);
+ hf->f_mode &= ~(FMODE_WRITE | FMODE_WRITER);
+ spin_unlock(&hf->f_lock);
+ if (!file_check_writeable(hf)) {
+ if (writer) {
+ put_write_access(file_inode(hf));
+ __mnt_drop_write(hf->f_path.mnt);
+ file_release_write(hf);
+ }
+ }
+
@ -11259,7 +11260,7 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+ return ret;
+}
--- a/fs/aufs/file.c 1970-01-01 01:00:00.000000000 +0100
+++ b/fs/aufs/file.c 2014-01-20 03:24:33.512760970 +0000
+++ b/fs/aufs/file.c 2014-05-02 02:24:14.519131596 +0100
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2005-2014 Junjiro R. Okajima
@ -11946,8 +11947,8 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+static int aufs_launder_page(struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_is_partially_uptodate(struct page *page,
+ read_descriptor_t *desc,
+ unsigned long from)
+ unsigned long from,
+ unsigned long count)
+{ AuUnsupport(); return 0; }
+static void aufs_is_dirty_writeback(struct page *page, bool *dirty,
+ bool *writeback)
@ -13275,8 +13276,8 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+ }
+}
--- a/fs/aufs/hnotify.c 1970-01-01 01:00:00.000000000 +0100
+++ b/fs/aufs/hnotify.c 2014-04-08 03:07:56.523497471 +0100
@@ -0,0 +1,710 @@
+++ b/fs/aufs/hnotify.c 2014-05-02 02:24:14.519131596 +0100
@@ -0,0 +1,712 @@
+/*
+ * Copyright (C) 2005-2014 Junjiro R. Okajima
+ *
@ -13908,8 +13909,10 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+ p[len] = 0;
+ }
+
+ /* NFS fires the event for silly-renamed one from kworker */
+ f = 0;
+ if (!dir->i_nlink)
+ if (!dir->i_nlink
+ || (au_test_nfs(h_dir->i_sb) && (mask & FS_DELETE)))
+ f = AuWkq_NEST;
+ err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f);
+ if (unlikely(err)) {
@ -27075,7 +27078,7 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+ return 0;
+}
--- a/fs/aufs/vfsub.c 1970-01-01 01:00:00.000000000 +0100
+++ b/fs/aufs/vfsub.c 2014-01-20 03:24:33.516760970 +0000
+++ b/fs/aufs/vfsub.c 2014-05-02 02:24:14.519131596 +0100
@@ -0,0 +1,782 @@
+/*
+ * Copyright (C) 2005-2014 Junjiro R. Okajima
@ -27384,14 +27387,14 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ tmp.dentry = src_dentry->d_parent;
+ err = security_path_rename(&tmp, src_dentry, path, d);
+ err = security_path_rename(&tmp, src_dentry, path, d, /*flags*/0);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_rename(src_dir, src_dentry, dir, path->dentry,
+ delegated_inode);
+ delegated_inode, /*flags*/0);
+ lockdep_on();
+ if (!err) {
+ int did;
@ -30048,8 +30051,8 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WHOUT_H__ */
--- a/fs/aufs/wkq.c 1970-01-01 01:00:00.000000000 +0100
+++ b/fs/aufs/wkq.c 2014-01-20 03:24:33.520760970 +0000
@@ -0,0 +1,212 @@
+++ b/fs/aufs/wkq.c 2014-05-02 02:24:14.519131596 +0100
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2005-2014 Junjiro R. Okajima
+ *
@ -30155,7 +30158,8 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+{
+ if (au_ftest_wkq(wkinfo->flags, NEST)) {
+ if (au_wkq_test()) {
+ AuWarn1("wkq from wkq, due to a dead dir by UDBA?\n");
+ AuWarn1("wkq from wkq, unless silly-rename on NFS,"
+ " due to a dead dir by UDBA?\n");
+ AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
+ }
+ } else
@ -31674,7 +31678,7 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+ return err;
+}
--- a/include/uapi/linux/aufs_type.h 1970-01-01 01:00:00.000000000 +0100
+++ b/include/uapi/linux/aufs_type.h 2014-04-08 03:07:56.523497471 +0100
+++ b/include/uapi/linux/aufs_type.h 2014-05-02 02:24:14.519131596 +0100
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2005-2014 Junjiro R. Okajima
@ -31717,7 +31721,7 @@ Patch generated by debian/patches/features/all/aufs3/gen-patch
+
+#include <linux/limits.h>
+
+#define AUFS_VERSION "3.14-20140407"
+#define AUFS_VERSION "3.x-rcN-20140421"
+
+/* todo? move this to linux-2.6.19/include/magic.h */
+#define AUFS_SUPER_MAGIC ('a' << 24 | 'u' << 16 | 'f' << 8 | 's')

View File

@ -1,15 +1,15 @@
From: J. R. Okajima <hooanon05@yahoo.co.jp>
Date: Tue Apr 1 11:01:55 2014 +0900
Subject: aufs3.14 base patch
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/66ac1c999bb00cd649d702e8b0c8e8ab7fa9b352/tree/
Date: Tue Apr 15 12:26:44 2014 +0900
Subject: aufs3.x-rcN base patch
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/8803c2d8d64effad56733f141431af5a2491b19f/tree/
Bug-Debian: http://bugs.debian.org/541828
Patch headers added by debian/patches/features/all/aufs3/gen-patch
aufs3.14 base patch
aufs3.x-rcN base patch
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 66e8c3b..ec278ac 100644
index f70a230..138104b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -692,6 +692,24 @@ static inline int is_loop_device(struct file *file)
@ -38,10 +38,10 @@ index 66e8c3b..ec278ac 100644
static ssize_t loop_attr_show(struct device *dev, char *page,
diff --git a/fs/inode.c b/fs/inode.c
index 4bcdad3..bc83168 100644
index f96d2a6..2d72083 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1497,7 +1497,7 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
@@ -1496,7 +1496,7 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
* This does the actual work of updating an inodes time or version. Must have
* had called mnt_want_write() before calling this.
*/
@ -51,10 +51,10 @@ index 4bcdad3..bc83168 100644
if (inode->i_op->update_time)
return inode->i_op->update_time(inode, time, flags);
diff --git a/fs/splice.c b/fs/splice.c
index 12028fa..f26cbaf 100644
index 9bc07d2..897bde2 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1111,8 +1111,8 @@ EXPORT_SYMBOL(generic_splice_sendpage);
@@ -1103,8 +1103,8 @@ EXPORT_SYMBOL(generic_splice_sendpage);
/*
* Attempt to initiate a splice from pipe to file.
*/
@ -65,7 +65,7 @@ index 12028fa..f26cbaf 100644
{
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
loff_t *, size_t, unsigned int);
@@ -1128,9 +1128,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
@@ -1120,9 +1120,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
/*
* Attempt to initiate a splice from a file to a pipe.
*/
@ -79,10 +79,10 @@ index 12028fa..f26cbaf 100644
ssize_t (*splice_read)(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 23b2a35..f3f635c 100644
index 7a9c5bc..ab5e957 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2669,6 +2669,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *);
@@ -2606,6 +2606,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
extern void setattr_copy(struct inode *inode, const struct iattr *attr);

View File

@ -1,18 +1,18 @@
From: J. R. Okajima <hooanon05@yahoo.co.jp>
Date: Tue Apr 1 11:01:55 2014 +0900
Subject: aufs3.14 kbuild patch
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/66ac1c999bb00cd649d702e8b0c8e8ab7fa9b352/tree/
Date: Thu Apr 10 21:15:16 2014 +0900
Subject: aufs3.x-rcN kbuild patch
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/8803c2d8d64effad56733f141431af5a2491b19f/tree/
Bug-Debian: http://bugs.debian.org/541828
Patch headers added by debian/patches/features/all/aufs3/gen-patch
aufs3.14 kbuild patch
aufs3.x-rcN kbuild patch
diff --git a/fs/Kconfig b/fs/Kconfig
index 7385e54..d5c769c 100644
index 312393f..78632ed 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -208,6 +208,7 @@ source "fs/ufs/Kconfig"
@@ -209,6 +209,7 @@ source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
source "fs/f2fs/Kconfig"
source "fs/efivarfs/Kconfig"
@ -21,16 +21,16 @@ index 7385e54..d5c769c 100644
endif # MISC_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index 47ac07b..0c6a294 100644
index f9cb987..a0c580f 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -125,3 +125,4 @@ obj-y += exofs/ # Multiple modules
@@ -126,3 +126,4 @@ obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_AUFS_FS) += aufs/
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 3ce25b5..9faebdc 100644
index 6929571..351ca48 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -56,6 +56,7 @@ header-y += atmppp.h

View File

@ -1,15 +1,15 @@
From: J. R. Okajima <hooanon05@yahoo.co.jp>
Date: Tue Apr 1 11:01:55 2014 +0900
Subject: aufs3.14 mmap patch
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/66ac1c999bb00cd649d702e8b0c8e8ab7fa9b352/tree/
Date: Tue Apr 15 12:26:44 2014 +0900
Subject: aufs3.x-rcN mmap patch
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/8803c2d8d64effad56733f141431af5a2491b19f/tree/
Bug-Debian: http://bugs.debian.org/541828
Patch headers added by debian/patches/features/all/aufs3/gen-patch
aufs3.14 mmap patch
aufs3.x-rcN mmap patch
diff --git a/fs/buffer.c b/fs/buffer.c
index 27265a8..75427a6 100644
index 9ddb9fc..1059a0b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2448,7 +2448,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
@ -37,10 +37,10 @@ index d4a3574..e44a744 100644
ino = inode->i_ino;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index fb52b54..1aca72e 100644
index 442177b..2fd6081d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -264,7 +264,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
@@ -265,7 +265,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
const char *name = NULL;
if (file) {
@ -51,7 +51,7 @@ index fb52b54..1aca72e 100644
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
@@ -1407,6 +1409,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
@@ -1408,6 +1410,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
if (file) {
@ -75,7 +75,7 @@ index 678455d..ad0ce45 100644
ino = inode->i_ino;
pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c1b7414..02036e0 100644
index bf9811e..3c727c8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -18,6 +18,9 @@
@ -88,7 +88,7 @@ index c1b7414..02036e0 100644
struct mempolicy;
struct anon_vma;
@@ -1152,6 +1155,87 @@ static inline int fixup_user_fault(struct task_struct *tsk,
@@ -1171,6 +1174,87 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
@ -177,10 +177,10 @@ index c1b7414..02036e0 100644
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 290901a..c21588b 100644
index 8967e20..a57b589 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -231,6 +231,7 @@ struct vm_region {
@@ -232,6 +232,7 @@ struct vm_region {
unsigned long vm_top; /* region allocated to here */
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
@ -188,7 +188,7 @@ index 290901a..c21588b 100644
int vm_usage; /* region usage count (access under nommu_region_sem) */
bool vm_icache_flushed : 1; /* true if the icache has been flushed for
@@ -299,6 +300,7 @@ struct vm_area_struct {
@@ -300,6 +301,7 @@ struct vm_area_struct {
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
units, *not* PAGE_CACHE_SIZE */
struct file * vm_file; /* File we map to (can be NULL). */
@ -197,10 +197,10 @@ index 290901a..c21588b 100644
#ifndef CONFIG_MMU
diff --git a/kernel/fork.c b/kernel/fork.c
index a17621c..40d9f6a 100644
index 54a8d26..dcf08b6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -412,7 +412,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
@@ -416,7 +416,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
struct inode *inode = file_inode(file);
struct address_space *mapping = file->f_mapping;
@ -210,10 +210,10 @@ index a17621c..40d9f6a 100644
atomic_dec(&inode->i_writecount);
mutex_lock(&mapping->i_mmap_mutex);
diff --git a/mm/filemap.c b/mm/filemap.c
index 7a13f6a..f1805df 100644
index a82fbe4..f7e38b6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1733,7 +1733,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -2082,7 +2082,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
int ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
@ -256,11 +256,11 @@ index 539eeb9..5e700b1 100644
return error;
}
diff --git a/mm/memory.c b/mm/memory.c
index 22dfa61..81813d9 100644
index d0f0bef..1293484 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2755,7 +2755,7 @@ reuse:
set_page_dirty_balance(dirty_page, page_mkwrite);
@@ -2785,7 +2785,7 @@ reuse:
set_page_dirty_balance(dirty_page);
/* file_update_time outside page_lock */
if (vma->vm_file)
- file_update_time(vma->vm_file);
@ -268,20 +268,11 @@ index 22dfa61..81813d9 100644
}
put_page(dirty_page);
if (page_mkwrite) {
@@ -3467,7 +3467,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* file_update_time outside page_lock */
if (vma->vm_file && !page_mkwrite)
- file_update_time(vma->vm_file);
+ vma_file_update_time(vma);
} else {
unlock_page(vmf.page);
if (anon)
diff --git a/mm/mmap.c b/mm/mmap.c
index 20ff0c3..f743033 100644
index b1202cf..40dd067 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -249,7 +249,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
@@ -250,7 +250,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
@ -290,7 +281,7 @@ index 20ff0c3..f743033 100644
mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
return next;
@@ -859,7 +859,7 @@ again: remove_next = 1 + (end > next->vm_end);
@@ -861,7 +861,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (remove_next) {
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
@ -299,7 +290,7 @@ index 20ff0c3..f743033 100644
}
if (next->anon_vma)
anon_vma_merge(vma, next);
@@ -1639,8 +1639,8 @@ out:
@@ -1641,8 +1641,8 @@ out:
unmap_and_free_vma:
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
@ -309,7 +300,7 @@ index 20ff0c3..f743033 100644
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
@@ -2429,7 +2429,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
@@ -2432,7 +2432,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
goto out_free_mpol;
if (new->vm_file)
@ -318,7 +309,7 @@ index 20ff0c3..f743033 100644
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
@@ -2448,7 +2448,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
@@ -2451,7 +2451,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
if (new->vm_ops && new->vm_ops->close)
new->vm_ops->close(new);
if (new->vm_file)
@ -327,7 +318,7 @@ index 20ff0c3..f743033 100644
unlink_anon_vmas(new);
out_free_mpol:
mpol_put(vma_policy(new));
@@ -2837,7 +2837,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
@@ -2840,7 +2840,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
if (new_vma->vm_file)
@ -354,10 +345,10 @@ index 632df45..02d770e 100644
goto out;
down_read(&mm->mmap_sem);
diff --git a/mm/nommu.c b/mm/nommu.c
index 8740213..ea7e336 100644
index 85f8d66..9f471fa 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -653,7 +653,7 @@ static void __put_nommu_region(struct vm_region *region)
@@ -655,7 +655,7 @@ static void __put_nommu_region(struct vm_region *region)
up_write(&nommu_region_sem);
if (region->vm_file)
@ -366,7 +357,7 @@ index 8740213..ea7e336 100644
/* IO memory and memory shared directly out of the pagecache
* from ramfs/tmpfs mustn't be released here */
@@ -811,7 +811,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
@@ -820,7 +820,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
@ -375,7 +366,7 @@ index 8740213..ea7e336 100644
put_nommu_region(vma->vm_region);
kmem_cache_free(vm_area_cachep, vma);
}
@@ -1377,7 +1377,7 @@ unsigned long do_mmap_pgoff(struct file *file,
@@ -1382,7 +1382,7 @@ unsigned long do_mmap_pgoff(struct file *file,
goto error_just_free;
}
}
@ -384,7 +375,7 @@ index 8740213..ea7e336 100644
kmem_cache_free(vm_region_jar, region);
region = pregion;
result = start;
@@ -1453,10 +1453,10 @@ error_just_free:
@@ -1458,10 +1458,10 @@ error_just_free:
up_write(&nommu_region_sem);
error:
if (region->vm_file)

View File

@ -1,15 +1,15 @@
From: J. R. Okajima <hooanon05@yahoo.co.jp>
Date: Tue Apr 1 11:01:55 2014 +0900
Subject: aufs3.14 standalone patch
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/66ac1c999bb00cd649d702e8b0c8e8ab7fa9b352/tree/
Date: Tue Apr 15 12:26:44 2014 +0900
Subject: aufs3.x-rcN standalone patch
Origin: http://sourceforge.net/p/aufs/aufs3-standalone/ci/8803c2d8d64effad56733f141431af5a2491b19f/tree/
Bug-Debian: http://bugs.debian.org/541828
Patch headers added by debian/patches/features/all/aufs3/gen-patch
aufs3.14 standalone patch
aufs3.x-rcN standalone patch
diff --git a/fs/inode.c b/fs/inode.c
index bc83168..6dd1207 100644
index 2d72083..30b69da 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -57,6 +57,7 @@ static struct hlist_head *inode_hashtable __read_mostly;
@ -20,7 +20,7 @@ index bc83168..6dd1207 100644
/*
* Empty aops. Can be used for the cases where the user does not
@@ -1513,6 +1514,7 @@ int update_time(struct inode *inode, struct timespec *time, int flags)
@@ -1512,6 +1513,7 @@ int update_time(struct inode *inode, struct timespec *time, int flags)
mark_inode_dirty_sync(inode);
return 0;
}
@ -29,10 +29,10 @@ index bc83168..6dd1207 100644
/**
* touch_atime - update the access time
diff --git a/fs/namespace.c b/fs/namespace.c
index 2ffc5a2..785a51f 100644
index 182bc41..c88e101 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -455,6 +455,7 @@ void __mnt_drop_write(struct vfsmount *mnt)
@@ -453,6 +453,7 @@ void __mnt_drop_write(struct vfsmount *mnt)
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
@ -40,7 +40,7 @@ index 2ffc5a2..785a51f 100644
/**
* mnt_drop_write - give up write access to a mount
@@ -1555,6 +1556,7 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
@@ -1564,6 +1565,7 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
}
return 0;
}
@ -121,7 +121,7 @@ index 923fe4a..176b435 100644
static int fsnotify_mark_destroy(void *ignored)
{
diff --git a/fs/open.c b/fs/open.c
index b9ed8b2..3ea66972 100644
index 3d30eb1..311c320 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -62,6 +62,7 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
@ -133,10 +133,10 @@ index b9ed8b2..3ea66972 100644
long vfs_truncate(struct path *path, loff_t length)
{
diff --git a/fs/splice.c b/fs/splice.c
index f26cbaf..ac02366 100644
index 897bde2..ecf04e0 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1124,6 +1124,7 @@ long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
@@ -1116,6 +1116,7 @@ long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
return splice_write(pipe, out, ppos, len, flags);
}
@ -144,7 +144,7 @@ index f26cbaf..ac02366 100644
/*
* Attempt to initiate a splice from a file to a pipe.
@@ -1150,6 +1151,7 @@ long do_splice_to(struct file *in, loff_t *ppos,
@@ -1142,6 +1143,7 @@ long do_splice_to(struct file *in, loff_t *ppos,
return splice_read(in, ppos, pipe, len, flags);
}
@ -169,7 +169,7 @@ index b9d613e..ba3b618 100644
}
+EXPORT_SYMBOL_GPL(cap_mmap_file);
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index d3b6d2c..5076912 100644
index 8365909..20b0098 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -7,6 +7,7 @@
@ -180,7 +180,7 @@ index d3b6d2c..5076912 100644
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
@@ -744,6 +745,7 @@ int __devcgroup_inode_permission(struct inode *inode, int mask)
@@ -740,6 +741,7 @@ int __devcgroup_inode_permission(struct inode *inode, int mask)
return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
access);
}
@ -189,7 +189,7 @@ index d3b6d2c..5076912 100644
int devcgroup_inode_mknod(int mode, dev_t dev)
{
diff --git a/security/security.c b/security/security.c
index 919cad9..f9e9e17 100644
index 8b774f3..e3190b9 100644
--- a/security/security.c
+++ b/security/security.c
@@ -407,6 +407,7 @@ int security_path_rmdir(struct path *dir, struct dentry *dentry)
@ -215,8 +215,8 @@ index 919cad9..f9e9e17 100644
+EXPORT_SYMBOL_GPL(security_path_link);
int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry)
@@ -449,6 +452,7 @@ int security_path_truncate(struct path *path)
struct path *new_dir, struct dentry *new_dentry,
@@ -458,6 +461,7 @@ int security_path_truncate(struct path *path)
return 0;
return security_ops->path_truncate(path);
}
@ -224,7 +224,7 @@ index 919cad9..f9e9e17 100644
int security_path_chmod(struct path *path, umode_t mode)
{
@@ -456,6 +460,7 @@ int security_path_chmod(struct path *path, umode_t mode)
@@ -465,6 +469,7 @@ int security_path_chmod(struct path *path, umode_t mode)
return 0;
return security_ops->path_chmod(path, mode);
}
@ -232,7 +232,7 @@ index 919cad9..f9e9e17 100644
int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
@@ -463,6 +468,7 @@ int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
@@ -472,6 +477,7 @@ int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
return 0;
return security_ops->path_chown(path, uid, gid);
}
@ -240,7 +240,7 @@ index 919cad9..f9e9e17 100644
int security_path_chroot(struct path *path)
{
@@ -539,6 +545,7 @@ int security_inode_readlink(struct dentry *dentry)
@@ -557,6 +563,7 @@ int security_inode_readlink(struct dentry *dentry)
return 0;
return security_ops->inode_readlink(dentry);
}
@ -248,7 +248,7 @@ index 919cad9..f9e9e17 100644
int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
{
@@ -553,6 +560,7 @@ int security_inode_permission(struct inode *inode, int mask)
@@ -571,6 +578,7 @@ int security_inode_permission(struct inode *inode, int mask)
return 0;
return security_ops->inode_permission(inode, mask);
}
@ -256,7 +256,7 @@ index 919cad9..f9e9e17 100644
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
@@ -675,6 +683,7 @@ int security_file_permission(struct file *file, int mask)
@@ -693,6 +701,7 @@ int security_file_permission(struct file *file, int mask)
return fsnotify_perm(file, mask);
}
@ -264,7 +264,7 @@ index 919cad9..f9e9e17 100644
int security_file_alloc(struct file *file)
{
@@ -735,6 +744,7 @@ int security_mmap_file(struct file *file, unsigned long prot,
@@ -753,6 +762,7 @@ int security_mmap_file(struct file *file, unsigned long prot,
return ret;
return ima_file_mmap(file, prot);
}

View File

@ -19,7 +19,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -532,8 +532,8 @@ bytes respectively. Such letter suffixes
@@ -546,8 +546,8 @@ bytes respectively. Such letter suffixes
ccw_timeout_log [S390]
See Documentation/s390/CommonIO for details.
@ -32,7 +32,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
a single hierarchy
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -940,6 +940,14 @@ config MEMCG
@@ -954,6 +954,14 @@ config MEMCG
This config option also selects MM_OWNER config option, which
could in turn add some fork/exit overhead.
@ -49,7 +49,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
depends on MEMCG && SWAP
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5222,7 +5222,7 @@ static void cgroup_release_agent(struct
@@ -4595,7 +4595,7 @@ static void cgroup_release_agent(struct
mutex_unlock(&cgroup_mutex);
}
@ -58,9 +58,9 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
{
struct cgroup_subsys *ss;
char *token;
@@ -5238,17 +5238,29 @@ static int __init cgroup_disable(char *s
*/
for_each_builtin_subsys(ss, i) {
@@ -4607,17 +4607,29 @@ static int __init cgroup_disable(char *s
for_each_subsys(ss, i) {
if (!strcmp(token, ss->name)) {
- ss->disabled = 1;
- printk(KERN_INFO "Disabling %s control group"
@ -89,17 +89,17 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+__setup("cgroup_enable=", cgroup_enable);
+
/**
* css_from_dir - get corresponding css from the dentry of a cgroup dir
* css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir
* @dentry: directory dentry of interest
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -7266,6 +7266,9 @@ static void mem_cgroup_bind(struct cgrou
@@ -7142,6 +7142,9 @@ static void mem_cgroup_bind(struct cgrou
}
struct cgroup_subsys mem_cgroup_subsys = {
.name = "memory",
struct cgroup_subsys memory_cgrp_subsys = {
+#ifdef CONFIG_MEMCG_DISABLED
+ .disabled = 1,
+#endif
.subsys_id = mem_cgroup_subsys_id,
.css_alloc = mem_cgroup_css_alloc,
.css_online = mem_cgroup_css_online,
.css_offline = mem_cgroup_css_offline,

View File

@ -1,29 +0,0 @@
From d6a6675d436897cd1b09e299436df3499abd753e Mon Sep 17 00:00:00 2001
From: Allen Pais <allen.pais@oracle.com>
Date: Fri, 13 Dec 2013 09:44:41 +0530
Subject: [PATCH 1/3] sparc64: use generic rwsem spinlocks rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Allen Pais <allen.pais@oracle.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/sparc/Kconfig | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -179,12 +179,10 @@ config NR_CPUS
source kernel/Kconfig.hz
config RWSEM_GENERIC_SPINLOCK
- bool
- default y if SPARC32
+ def_bool PREEMPT_RT_FULL
config RWSEM_XCHGADD_ALGORITHM
- bool
- default y if SPARC64
+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
config GENERIC_HWEIGHT
bool

View File

@ -1,131 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 19 Mar 2013 14:44:30 +0100
Subject: [PATCH] kernel/SRCU: provide a static initializer
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
There are macros for static initializer for the three out of four
possible notifier types, that are:
ATOMIC_NOTIFIER_HEAD()
BLOCKING_NOTIFIER_HEAD()
RAW_NOTIFIER_HEAD()
This patch provides a static initilizer for the forth type to make it
complete.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/notifier.h | 34 +++++++++++++++++++++++++---------
include/linux/srcu.h | 9 +++++----
2 files changed, 30 insertions(+), 13 deletions(-)
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -6,7 +6,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
*/
-
+
#ifndef _LINUX_NOTIFIER_H
#define _LINUX_NOTIFIER_H
#include <linux/errno.h>
@@ -42,9 +42,7 @@
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
- * often but notifier_blocks will seldom be removed. Also, SRCU notifier
- * chains are slightly more difficult to use because they require special
- * runtime initialization.
+ * often but notifier_blocks will seldom be removed.
*/
typedef int (*notifier_fn_t)(struct notifier_block *nb,
@@ -88,7 +86,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
-/* srcu_notifier_heads must be initialized and cleaned up dynamically */
+/* srcu_notifier_heads must be cleaned up dynamically */
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
@@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(stru
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-/* srcu_notifier_heads cannot be initialized statically */
+
+#define SRCU_NOTIFIER_INIT(name, pcpu) \
+ { \
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
+ .head = NULL, \
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
+ }
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
@@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(stru
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
+#define _SRCU_NOTIFIER_HEAD(name, mod) \
+ static DEFINE_PER_CPU(struct srcu_struct_array, \
+ name##_head_srcu_array); \
+ mod struct srcu_notifier_head name = \
+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
+
+#define SRCU_NOTIFIER_HEAD(name) \
+ _SRCU_NOTIFIER_HEAD(name, )
+
+#define SRCU_NOTIFIER_HEAD_STATIC(name) \
+ _SRCU_NOTIFIER_HEAD(name, static)
+
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
@@ -182,12 +198,12 @@ static inline int notifier_to_errno(int
/*
* Declared notifiers so far. I can imagine quite a few more chains
- * over time (eg laptop power reset chains, reboot chain (to clean
+ * over time (eg laptop power reset chains, reboot chain (to clean
* device units up), device [un]mount chain, module load/unload chain,
- * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
*/
-
+
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct
void process_srcu(struct work_struct *work);
-#define __SRCU_STRUCT_INIT(name) \
+#define __SRCU_STRUCT_INIT(name, pcpu_name) \
{ \
.completed = -300, \
- .per_cpu_ref = &name##_srcu_array, \
+ .per_cpu_ref = &pcpu_name, \
.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
.running = false, \
.batch_queue = RCU_BATCH_INIT(name.batch_queue), \
@@ -104,11 +104,12 @@ void process_srcu(struct work_struct *wo
*/
#define DEFINE_SRCU(name) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- struct srcu_struct name = __SRCU_STRUCT_INIT(name);
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array);
#define DEFINE_STATIC_SRCU(name) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(\
+ name, name##_srcu_array);
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period

View File

@ -1,24 +0,0 @@
From 65513f34449eedb6b84c24a3583266534c1627e4 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 17:09:55 +0100
Subject: [PATCH 2/6] x86/highmem: add a "already used pte" check
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
This is a copy from kmap_atomic_prot().
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/mm/iomap_32.c | 2 ++
1 file changed, 2 insertions(+)
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -65,6 +65,8 @@ void *kmap_atomic_prot_pfn(unsigned long
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ WARN_ON(!pte_none(*(kmap_pte - idx)));
+
#ifdef CONFIG_PREEMPT_RT_FULL
current->kmap_pte[type] = pte;
#endif

View File

@ -1,29 +0,0 @@
From e2ca4d092d9c6e6b07b465b4d81da207bbcc7437 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 21:37:27 +0100
Subject: [PATCH 3/6] arm/highmem: flush tlb on unmap
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
The tlb should be flushed on unmap and thus make the mapping entry
invalid. This is only done in the non-debug case which does not look
right.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/mm/highmem.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -95,10 +95,10 @@ void __kunmap_atomic(void *kvaddr)
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
- set_top_pte(vaddr, __pte(0));
#else
(void) idx; /* to kill a warning */
#endif
+ set_top_pte(vaddr, __pte(0));
kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */

View File

@ -1,45 +0,0 @@
From eef09918aff670a6162d2ae5fe87b393698ef57d Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 1 Mar 2013 11:17:42 +0100
Subject: [PATCH 5/6] futex: Ensure lock/unlock symetry versus pi_lock and
hash bucket lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
In exit_pi_state_list() we have the following locking construct:
spin_lock(&hb->lock);
raw_spin_lock_irq(&curr->pi_lock);
...
spin_unlock(&hb->lock);
In !RT this works, but on RT the migrate_enable() function which is
called from spin_unlock() sees atomic context due to the held pi_lock
and just decrements the migrate_disable_atomic counter of the
task. Now the next call to migrate_disable() sees the counter being
negative and issues a warning. That check should be in
migrate_enable() already.
Fix this by dropping pi_lock before unlocking hb->lock and reaquire
pi_lock after that again. This is safe as the loop code reevaluates
head again under the pi_lock.
Reported-by: Yong Zhang <yong.zhang@windriver.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/futex.c | 2 ++
1 file changed, 2 insertions(+)
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -708,7 +708,9 @@ void exit_pi_state_list(struct task_stru
* task still owns the PI-state:
*/
if (head->next != next) {
+ raw_spin_unlock_irq(&curr->pi_lock);
spin_unlock(&hb->lock);
+ raw_spin_lock_irq(&curr->pi_lock);
continue;
}

View File

@ -1,33 +0,0 @@
From 116a588e1e4b108bfd01b5ae8de602c12aec3323 Mon Sep 17 00:00:00 2001
From: Nicholas Mc Guire <der.herr@hofr.at>
Date: Fri, 17 Jan 2014 20:44:03 +0100
Subject: [PATCH 7/7] API cleanup - use local_lock not __local_lock for soft
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
trivial API cleanup - kernel/softirq.c was mimiking local_lock.
No change of functional behavior
Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/softirq.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -460,12 +460,12 @@ void __init softirq_early_init(void)
static void lock_softirq(int which)
{
- __local_lock(&__get_cpu_var(local_softirq_locks[which]));
+ local_lock(local_softirq_locks[which]);
}
static void unlock_softirq(int which)
{
- __local_unlock(&__get_cpu_var(local_softirq_locks[which]));
+ local_unlock(local_softirq_locks[which]);
}
static void do_single_softirq(int which, int need_rcu_bh_qs)

View File

@ -1,78 +0,0 @@
From b72b514282ffad0d665ea94932b968f388304079 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 21 Mar 2013 19:01:05 +0100
Subject: [PATCH] HACK: printk: drop the logbuf_lock more often
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
The lock is hold with irgs off. The latency drops 500us+ on my arm bugs
with a "full" buffer after executing "dmesg" on the shell.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/printk.c | 27 ++++++++++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1025,6 +1025,7 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
+ int attempts = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
@@ -1036,7 +1037,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
enum log_flags prev;
-
+ int num_msg;
+try_again:
+ attempts++;
+ if (attempts > 10) {
+ len = -EBUSY;
+ goto out;
+ }
+ num_msg = 0;
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
clear_seq = log_first_seq;
@@ -1057,6 +1065,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
+ raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* move first record forward until length fits into the buffer */
@@ -1070,6 +1086,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
+ raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* last message fitting into this dump */
@@ -1110,6 +1134,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
+out:
raw_spin_unlock_irq(&logbuf_lock);
kfree(text);

View File

@ -1,114 +0,0 @@
From 272e319977ab4feb79a28621215a9aca42d60003 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 3 Jan 2014 14:55:48 +0100
Subject: [PATCH] Revert "x86: Disable IST stacks for debug/int 3/stack fault
for PREEMPT_RT"
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
where do I start. Let me explain what is going on here. The code
sequence
| pushf
| pop %edx
| or $0x1,%dh
| push %edx
| mov $0xe0,%eax
| popf
| sysenter
triggers the bug. On 64bit kernel we see the double fault (with 32bit and
64bit userland) and on 32bit kernel there is no problem. The reporter said
that double fault does not happen on 64bit kernel with 64bit userland and
this is because in that case the VDSO uses the "syscall" interface instead
of "sysenter".
The bug. "popf" loads the flags with the TF bit set which enables
"single stepping" and this leads to a debug exception. Usually on 64bit
we have a special IST stack for the debug exception. Due to patch [0] we
do not use the IST stack but the kernel stack instead. On 64bit the
sysenter instruction starts in kernel with the stack address NULL. The
code sequence above enters the debug exception (TF flag) after the
sysenter instruction was executed which sets the stack pointer to NULL
and we have a fault (it seems that the debug exception saves some bytes
on the stack).
To fix the double fault I'm going to drop patch [0]. It is completely
pointless. In do_debug() and do_stack_segment() we disable preemption
which means the task can't leave the CPU. So it does not matter if we run
on IST or on kernel stack.
There is a patch [1] which drops preempt_disable() call for a 32bit
kernel but not for 64bit so there should be no regression.
And [1] seems valid even for this code sequence. We enter the debug
exception with a 256bytes long per cpu stack and migrate to the kernel
stack before calling do_debug().
[0] x86-disable-debug-stack.patch
[1] fix-rt-int3-x86_32-3.2-rt.patch
Cc: stable-rt@vger.kernel.org
Reported-by: Brian Silverman <bsilver16384@gmail.com>
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/include/asm/page_64_types.h | 21 ++++++---------------
arch/x86/kernel/cpu/common.c | 2 --
arch/x86/kernel/dumpstack_64.c | 4 ----
3 files changed, 6 insertions(+), 21 deletions(-)
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -14,21 +14,12 @@
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define STACKFAULT_STACK 0
-# define DOUBLEFAULT_STACK 1
-# define NMI_STACK 2
-# define DEBUG_STACK 0
-# define MCE_STACK 3
-# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */
-#else
-# define STACKFAULT_STACK 1
-# define DOUBLEFAULT_STACK 2
-# define NMI_STACK 3
-# define DEBUG_STACK 4
-# define MCE_STACK 5
-# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-#endif
+#define STACKFAULT_STACK 1
+#define DOUBLEFAULT_STACK 2
+#define NMI_STACK 3
+#define DEBUG_STACK 4
+#define MCE_STACK 5
+#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1116,9 +1116,7 @@ DEFINE_PER_CPU(struct task_struct *, fpu
*/
static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
-#if DEBUG_STACK > 0
[DEBUG_STACK - 1] = DEBUG_STKSZ
-#endif
};
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -21,14 +21,10 @@
(N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
static char x86_stack_ids[][8] = {
-#if DEBUG_STACK > 0
[ DEBUG_STACK-1 ] = "#DB",
-#endif
[ NMI_STACK-1 ] = "NMI",
[ DOUBLEFAULT_STACK-1 ] = "#DF",
-#if STACKFAULT_STACK > 0
[ STACKFAULT_STACK-1 ] = "#SS",
-#endif
[ MCE_STACK-1 ] = "#MC",
#if DEBUG_STKSZ > EXCEPTION_STKSZ
[ N_EXCEPTION_STACKS ...

View File

@ -1,173 +0,0 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Wed, 13 Feb 2013 09:26:05 -0500
Subject: [PATCH] acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
We hit the following bug with 3.6-rt:
[ 5.898990] BUG: scheduling while atomic: swapper/3/0/0x00000002
[ 5.898991] no locks held by swapper/3/0.
[ 5.898993] Modules linked in:
[ 5.898996] Pid: 0, comm: swapper/3 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1
[ 5.898997] Call Trace:
[ 5.899011] [<ffffffff810804e7>] __schedule_bug+0x67/0x90
[ 5.899028] [<ffffffff81577923>] __schedule+0x793/0x7a0
[ 5.899032] [<ffffffff810b4e40>] ? debug_rt_mutex_print_deadlock+0x50/0x200
[ 5.899034] [<ffffffff81577b89>] schedule+0x29/0x70
[ 5.899036] BUG: scheduling while atomic: swapper/7/0/0x00000002
[ 5.899037] no locks held by swapper/7/0.
[ 5.899039] [<ffffffff81578525>] rt_spin_lock_slowlock+0xe5/0x2f0
[ 5.899040] Modules linked in:
[ 5.899041]
[ 5.899045] [<ffffffff81579a58>] ? _raw_spin_unlock_irqrestore+0x38/0x90
[ 5.899046] Pid: 0, comm: swapper/7 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1
[ 5.899047] Call Trace:
[ 5.899049] [<ffffffff81578bc6>] rt_spin_lock+0x16/0x40
[ 5.899052] [<ffffffff810804e7>] __schedule_bug+0x67/0x90
[ 5.899054] [<ffffffff8157d3f0>] ? notifier_call_chain+0x80/0x80
[ 5.899056] [<ffffffff81577923>] __schedule+0x793/0x7a0
[ 5.899059] [<ffffffff812f2034>] acpi_os_acquire_lock+0x1f/0x23
[ 5.899062] [<ffffffff810b4e40>] ? debug_rt_mutex_print_deadlock+0x50/0x200
[ 5.899068] [<ffffffff8130be64>] acpi_write_bit_register+0x33/0xb0
[ 5.899071] [<ffffffff81577b89>] schedule+0x29/0x70
[ 5.899072] [<ffffffff8130be13>] ? acpi_read_bit_register+0x33/0x51
[ 5.899074] [<ffffffff81578525>] rt_spin_lock_slowlock+0xe5/0x2f0
[ 5.899077] [<ffffffff8131d1fc>] acpi_idle_enter_bm+0x8a/0x28e
[ 5.899079] [<ffffffff81579a58>] ? _raw_spin_unlock_irqrestore+0x38/0x90
[ 5.899081] [<ffffffff8107e5da>] ? this_cpu_load+0x1a/0x30
[ 5.899083] [<ffffffff81578bc6>] rt_spin_lock+0x16/0x40
[ 5.899087] [<ffffffff8144c759>] cpuidle_enter+0x19/0x20
[ 5.899088] [<ffffffff8157d3f0>] ? notifier_call_chain+0x80/0x80
[ 5.899090] [<ffffffff8144c777>] cpuidle_enter_state+0x17/0x50
[ 5.899092] [<ffffffff812f2034>] acpi_os_acquire_lock+0x1f/0x23
[ 5.899094] [<ffffffff8144d1a1>] cpuidle899101] [<ffffffff8130be13>] ?
As the acpi code disables interrupts in acpi_idle_enter_bm, and calls
code that grabs the acpi lock, it causes issues as the lock is currently
in RT a sleeping lock.
The lock was converted from a raw to a sleeping lock due to some
previous issues, and tests that showed it didn't seem to matter.
Unfortunately, it did matter for one of our boxes.
This patch converts the lock back to a raw lock. I've run this code on a
few of my own machines, one being my laptop that uses the acpi quite
extensively. I've been able to suspend and resume without issues.
[ tglx: Made the change exclusive for acpi_gbl_hardware_lock ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: John Kacur <jkacur@gmail.com>
Cc: Clark Williams <clark@redhat.com>
Link: http://lkml.kernel.org/r/1360765565.23152.5.camel@gandalf.local.home
Cc: stable-rt@vger.kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/acpi/acpica/acglobal.h | 2 +-
drivers/acpi/acpica/hwregs.c | 4 ++--
drivers/acpi/acpica/hwxface.c | 4 ++--
drivers/acpi/acpica/utmutex.c | 4 ++--
include/acpi/platform/aclinux.h | 14 ++++++++++++++
5 files changed, 21 insertions(+), 7 deletions(-)
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -253,7 +253,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pend
* interrupt level
*/
ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+ACPI_EXTERN acpi_raw_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
/* Mutex for _OSI support */
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
/* Clear the fixed events in PM1 A/B */
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
ACPI_BITMASK_ALL_FIXED_STATUS);
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
if (ACPI_FAILURE(status))
goto exit;
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -374,7 +374,7 @@ acpi_status acpi_write_bit_register(u32
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
/*
* At this point, we know that the parent register is one of the
@@ -435,7 +435,7 @@ acpi_status acpi_write_bit_register(u32
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(voi
return_ACPI_STATUS (status);
}
- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
+ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
@@ -141,7 +141,7 @@ void acpi_ut_mutex_terminate(void)
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
- acpi_os_delete_lock(acpi_gbl_hardware_lock);
+ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -73,6 +73,7 @@
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
+#define acpi_raw_spinlock raw_spinlock_t *
#define acpi_cpu_flags unsigned long
#else /* !__KERNEL__ */
@@ -210,6 +211,19 @@ static inline acpi_thread_id acpi_os_get
})
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
+#define acpi_os_create_raw_lock(__handle) \
+({ \
+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
+ \
+ if (lock) { \
+ *(__handle) = lock; \
+ raw_spin_lock_init(*(__handle)); \
+ } \
+ lock ? AE_OK : AE_NO_MEMORY; \
+})
+
+#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
+
void __iomem *acpi_os_map_memory(acpi_physical_address where, acpi_size length);
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_map_memory

View File

@ -1,48 +0,0 @@
From 155cf657f6ddcade424253eb58d03a170dc9f64f Mon Sep 17 00:00:00 2001
From: Nicholas Mc Guire <der.herr@hofr.at>
Date: Wed, 20 Nov 2013 07:22:09 +0800
Subject: [PATCH 1/2] allow preemption in recursive migrate_disable call
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Minor cleanup in migrate_disable/migrate_enable. The recursive case
does not need to disable preemption as it is "pinned" to the current
cpu any way so it is safe to preempt it.
Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/sched/core.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2695,13 +2695,12 @@ void migrate_disable(void)
WARN_ON_ONCE(p->migrate_disable_atomic);
#endif
- preempt_disable();
if (p->migrate_disable) {
p->migrate_disable++;
- preempt_enable();
return;
}
+ preempt_disable();
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
@@ -2727,13 +2726,12 @@ void migrate_enable(void)
#endif
WARN_ON_ONCE(p->migrate_disable <= 0);
- preempt_disable();
if (migrate_disable_count(p) > 1) {
p->migrate_disable--;
- preempt_enable();
return;
}
+ preempt_disable();
if (unlikely(migrate_disabled_updated(p))) {
/*
* See comment in update_migrate_disable() about locking.

View File

@ -1,277 +0,0 @@
Subject: mm: Fixup all fault handlers to check current->pagefault_disable
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 17 Mar 2011 11:32:28 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Necessary for decoupling pagefault disable from preempt count.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/alpha/mm/fault.c | 2 +-
arch/arm/mm/fault.c | 2 +-
arch/avr32/mm/fault.c | 3 ++-
arch/cris/mm/fault.c | 2 +-
arch/frv/mm/fault.c | 2 +-
arch/ia64/mm/fault.c | 2 +-
arch/m32r/mm/fault.c | 2 +-
arch/m68k/mm/fault.c | 2 +-
arch/microblaze/mm/fault.c | 2 +-
arch/mips/mm/fault.c | 2 +-
arch/mn10300/mm/fault.c | 2 +-
arch/parisc/mm/fault.c | 2 +-
arch/powerpc/mm/fault.c | 2 +-
arch/s390/mm/fault.c | 3 ++-
arch/score/mm/fault.c | 2 +-
arch/sh/mm/fault.c | 2 +-
arch/sparc/mm/fault_32.c | 2 +-
arch/sparc/mm/fault_64.c | 2 +-
arch/tile/mm/fault.c | 2 +-
arch/um/kernel/trap.c | 2 +-
arch/x86/mm/fault.c | 2 +-
arch/xtensa/mm/fault.c | 2 +-
22 files changed, 24 insertions(+), 22 deletions(-)
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns
/* If we're in an interrupt context, or have no user context,
we must not take the fault. */
- if (!mm || in_atomic())
+ if (!mm || in_atomic() || current->pagefault_disabled)
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -277,7 +277,7 @@ do_page_fault(unsigned long addr, unsign
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
if (user_mode(regs))
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned l
* If we're in an interrupt or have no user context, we must
* not take the fault...
*/
- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
+ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) ||
+ current->pagefault_disabled)
goto no_context;
local_irq_enable();
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -113,7 +113,7 @@ do_page_fault(unsigned long address, str
* user context, we must not take the fault.
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
if (user_mode(regs))
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
if (user_mode(__frame))
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long addres
/*
* If we're in an interrupt or have no user context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
#ifdef CONFIG_VIRTUAL_MEM_MAP
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_
* If we're in an interrupt or have no user context or are running in an
* atomic region then we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled
goto bad_area_nosemaphore;
if (error_code & ACE_USERMODE)
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -81,7 +81,7 @@ int do_page_fault(struct pt_regs *regs,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
if (user_mode(regs))
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs,
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
- if (unlikely(in_atomic() || !mm)) {
+ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
if (kernel_mode(regs))
goto bad_area_nosemaphore;
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -89,7 +89,7 @@ static void __kprobes __do_page_fault(st
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto bad_area_nosemaphore;
if (user_mode(regs))
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -183,7 +183,7 @@ void do_page_fault(struct pt_regs *regs,
int fault;
unsigned int flags;
- if (in_atomic())
+ if (in_atomic() || current->pagefault_disabled)
goto no_context;
tsk = current;
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- if (in_atomic() || mm == NULL) {
+ if (in_atomic() || mm == NULL || current->pagefault_disabled) {
if (!user_mode(regs)) {
rc = SIGSEGV;
goto bail;
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -291,7 +291,8 @@ static inline int do_exception(struct pt
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm ||
+ tsk->pagefault_disabled))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -73,7 +73,7 @@ asmlinkage void do_page_fault(struct pt_
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto bad_area_nosemaphore;
if (user_mode(regs))
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -438,7 +438,7 @@ asmlinkage void __kprobes do_page_fault(
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
- if (unlikely(in_atomic() || !mm)) {
+ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -199,7 +199,7 @@ asmlinkage void do_sparc_fault(struct pt
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -324,7 +324,7 @@ asmlinkage void __kprobes do_sparc64_fau
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_enabled)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -357,7 +357,7 @@ static int handle_page_fault(struct pt_r
* If we're in an interrupt, have no user context or are running in an
* atomic region then we must not take the fault.
*/
- if (in_atomic() || !mm) {
+ if (in_atomic() || !mm || current->pagefault_disabled) {
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -38,7 +38,7 @@ int handle_page_fault(unsigned long addr
* If the fault was during atomic operation, don't take the fault, just
* fail.
*/
- if (in_atomic())
+ if (in_atomic() || current->pagefault_disabled)
goto out_nosemaphore;
if (is_user)
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1103,7 +1103,7 @@ static void __kprobes noinline
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
- if (unlikely(in_atomic() || !mm)) {
+ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
/* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm) {
+ if (in_atomic() || !mm || current->pagefault_disabled) {
bad_page_fault(regs, address, SIGSEGV);
return;
}

View File

@ -1,58 +0,0 @@
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Sat, 6 Mar 2010 17:47:10 +0100
Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Setup and remove the interrupt handler in clock event mode selection.
This avoids calling the (shared) interrupt handler when the device is
not used.
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[bigeasy: redo the patch with NR_IRQS_LEGACY which is probably required since
commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6.
Patch based on what Sami Pietikäinen <Sami.Pietikainen@wapice.com> suggested].
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/mach-at91/at91rm9200_time.c | 1 +
arch/arm/mach-at91/at91sam926x_time.c | 5 ++++-
2 files changed, 5 insertions(+), 1 deletion(-)
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -134,6 +134,7 @@ clkevt32k_mode(enum clock_event_mode mod
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
+ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -78,7 +78,7 @@ static struct clocksource pit_clk = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-
+static struct irqaction at91sam926x_pit_irq;
/*
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
*/
@@ -87,6 +87,8 @@ pit_clkevt_mode(enum clock_event_mode mo
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
+ /* Set up irq handler */
+ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
/* update clocksource counter */
pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
@@ -99,6 +101,7 @@ pit_clkevt_mode(enum clock_event_mode mo
case CLOCK_EVT_MODE_UNUSED:
/* disable irq, leaving the clocksource active */
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
+ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
break;
case CLOCK_EVT_MODE_RESUME:
break;

View File

@ -1,33 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 1 May 2010 18:29:35 +0200
Subject: ARM: at91: tclib: Default to tclib timer for RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
RT is not too happy about the shared timer interrupt in AT91
devices. Default to tclib timer for RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/misc/Kconfig | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -63,6 +63,7 @@ config ATMEL_PWM
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
depends on (AVR32 || ARCH_AT91)
+ default y if PREEMPT_RT_FULL
help
Select this if you want a library to allocate the Timer/Counter
blocks found on many Atmel processors. This facilitates using
@@ -95,7 +96,7 @@ config ATMEL_TCB_CLKSRC_BLOCK
config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
bool "TC Block use 32 KiHz clock"
depends on ATMEL_TCB_CLKSRC
- default y
+ default y if !PREEMPT_RT_FULL
help
Select this to use 32 KiHz base clock rate as TC block clock
source for clock events.

View File

@ -1,361 +0,0 @@
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Mon, 19 Sep 2011 14:51:14 -0700
Subject: [PATCH] preempt-rt: Convert arm boot_lock to raw
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
The arm boot_lock is used by the secondary processor startup code. The locking
task is the idle thread, which has idle->sched_class == &idle_sched_class.
idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the
lock, the attempt to wake it when the lock becomes available will fail:
try_to_wake_up()
...
activate_task()
enqueue_task()
p->sched_class->enqueue_task(rq, p, flags)
Fix by converting boot_lock to a raw spin lock.
Signed-off-by: Frank Rowand <frank.rowand@am.sony.com>
Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/mach-exynos/platsmp.c | 12 ++++++------
arch/arm/mach-msm/platsmp.c | 10 +++++-----
arch/arm/mach-omap2/omap-smp.c | 10 +++++-----
arch/arm/mach-prima2/platsmp.c | 10 +++++-----
arch/arm/mach-spear/platsmp.c | 10 +++++-----
arch/arm/mach-sti/platsmp.c | 10 +++++-----
arch/arm/mach-ux500/platsmp.c | 10 +++++-----
arch/arm/plat-versatile/platsmp.c | 10 +++++-----
8 files changed, 41 insertions(+), 41 deletions(-)
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -71,7 +71,7 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)(S5P_VA_SCU);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void exynos_secondary_init(unsigned int cpu)
{
@@ -84,8 +84,8 @@ static void exynos_secondary_init(unsign
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -97,7 +97,7 @@ static int exynos_boot_secondary(unsigne
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -126,7 +126,7 @@ static int exynos_boot_secondary(unsigne
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return -ETIMEDOUT;
}
}
@@ -165,7 +165,7 @@ static int exynos_boot_secondary(unsigne
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -30,7 +30,7 @@
extern void msm_secondary_startup(void);
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static inline int get_core_count(void)
{
@@ -50,8 +50,8 @@ static void msm_secondary_init(unsigned
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static void prepare_cold_cpu(unsigned int cpu)
@@ -88,7 +88,7 @@ static int msm_boot_secondary(unsigned i
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -121,7 +121,7 @@ static int msm_boot_secondary(unsigned i
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -42,7 +42,7 @@
/* SCU base address */
static void __iomem *scu_base;
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void)
{
@@ -73,8 +73,8 @@ static void omap4_secondary_init(unsigne
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -88,7 +88,7 @@ static int omap4_boot_secondary(unsigned
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
@@ -165,7 +165,7 @@ static int omap4_boot_secondary(unsigned
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return 0;
}
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -23,7 +23,7 @@
static void __iomem *scu_base;
static void __iomem *rsc_base;
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static struct map_desc scu_io_desc __initdata = {
.length = SZ_4K,
@@ -56,8 +56,8 @@ static void sirfsoc_secondary_init(unsig
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static struct of_device_id rsc_ids[] = {
@@ -95,7 +95,7 @@ static int sirfsoc_boot_secondary(unsign
/* make sure write buffer is drained */
mb();
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -127,7 +127,7 @@ static int sirfsoc_boot_secondary(unsign
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -20,7 +20,7 @@
#include <mach/spear.h>
#include "generic.h"
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
@@ -36,8 +36,8 @@ static void spear13xx_secondary_init(uns
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -48,7 +48,7 @@ static int spear13xx_boot_secondary(unsi
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -75,7 +75,7 @@ static int spear13xx_boot_secondary(unsi
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
@@ -34,7 +34,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
void sti_secondary_init(unsigned int cpu)
{
@@ -49,8 +49,8 @@ void sti_secondary_init(unsigned int cpu
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -61,7 +61,7 @@ int sti_boot_secondary(unsigned int cpu,
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -92,7 +92,7 @@ int sti_boot_secondary(unsigned int cpu,
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -51,7 +51,7 @@ static void __iomem *scu_base_addr(void)
return NULL;
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void ux500_secondary_init(unsigned int cpu)
{
@@ -64,8 +64,8 @@ static void ux500_secondary_init(unsigne
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -76,7 +76,7 @@ static int ux500_boot_secondary(unsigned
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -97,7 +97,7 @@ static int ux500_boot_secondary(unsigned
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -30,7 +30,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
void versatile_secondary_init(unsigned int cpu)
{
@@ -43,8 +43,8 @@ void versatile_secondary_init(unsigned i
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -55,7 +55,7 @@ int versatile_boot_secondary(unsigned in
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* This is really belt and braces; we hold unintended secondary
@@ -85,7 +85,7 @@ int versatile_boot_secondary(unsigned in
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}

View File

@ -1,21 +0,0 @@
Subject: arm-disable-highmem-on-rt.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:09:28 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1789,7 +1789,7 @@ config HAVE_ARCH_PFN_VALID
config HIGHMEM
bool "High Memory Support"
- depends on MMU
+ depends on MMU && !PREEMPT_RT_FULL
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address

View File

@ -1,140 +0,0 @@
Subject: arm-enable-highmem-for-rt.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 13 Feb 2013 11:03:11 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/Kconfig | 2 -
arch/arm/include/asm/switch_to.h | 8 +++++++
arch/arm/mm/highmem.c | 41 +++++++++++++++++++++++++++++++++++++--
include/linux/highmem.h | 1
4 files changed, 49 insertions(+), 3 deletions(-)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1789,7 +1789,7 @@ config HAVE_ARCH_PFN_VALID
config HIGHMEM
bool "High Memory Support"
- depends on MMU && !PREEMPT_RT_FULL
+ depends on MMU
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -3,6 +3,13 @@
#include <linux/thread_info.h>
+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
+#else
+static inline void
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
+#endif
+
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
@@ -22,6 +29,7 @@ extern struct task_struct *__switch_to(s
#define switch_to(prev,next,last) \
do { \
+ switch_kmaps(prev, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
+ pte_t pte = mk_pte(page, kmap_prot);
unsigned int idx;
unsigned long vaddr;
void *kmap;
@@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
- set_top_pte(vaddr, mk_pte(page, kmap_prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = pte;
+#endif
+ set_top_pte(vaddr, pte);
return (void *)vaddr;
}
@@ -93,6 +97,9 @@ void __kunmap_atomic(void *kvaddr)
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = __pte(0);
+#endif
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#else
@@ -110,6 +117,7 @@ EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn)
{
+ pte_t pte = pfn_pte(pfn, kmap_prot);
unsigned long vaddr;
int idx, type;
@@ -121,7 +129,10 @@ void *kmap_atomic_pfn(unsigned long pfn)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
- set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = pte;
+#endif
+ set_top_pte(vaddr, pte);
return (void *)vaddr;
}
@@ -135,3 +146,29 @@ struct page *kmap_atomic_to_page(const v
return pte_page(get_top_pte(vaddr));
}
+
+#if defined CONFIG_PREEMPT_RT_FULL
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+{
+ int i;
+
+ /*
+ * Clear @prev's kmap_atomic mappings
+ */
+ for (i = 0; i < prev_p->kmap_idx; i++) {
+ int idx = i + KM_TYPE_NR * smp_processor_id();
+
+ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0));
+ }
+ /*
+ * Restore @next_p's kmap_atomic mappings
+ */
+ for (i = 0; i < next_p->kmap_idx; i++) {
+ int idx = i + KM_TYPE_NR * smp_processor_id();
+
+ if (!pte_none(next_p->kmap_pte[i]))
+ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
+ next_p->kmap_pte[i]);
+ }
+}
+#endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>

View File

@ -1,104 +0,0 @@
Subject: arm-preempt-lazy-support.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 31 Oct 2012 12:04:11 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/Kconfig | 1 +
arch/arm/include/asm/thread_info.h | 3 +++
arch/arm/kernel/asm-offsets.c | 1 +
arch/arm/kernel/entry-armv.S | 13 +++++++++++--
arch/arm/kernel/signal.c | 3 ++-
5 files changed, 18 insertions(+), 3 deletions(-)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -58,6 +58,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -60,6 +60,7 @@ struct arm_restart_block {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
@@ -153,6 +154,7 @@ extern int vfp_restore_user_hwstate(stru
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_NEED_RESCHED_LAZY 3
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
@@ -165,6 +167,7 @@ extern int vfp_restore_user_hwstate(stru
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -54,6 +54,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -205,11 +205,18 @@ ENDPROC(__dabt_svc)
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
- ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
+ bne 1f @ return from exeption
+ ldr r0, [tsk, #TI_FLAGS] @ get flags
+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
+ blne svc_preempt @ preempt!
+
+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
+ teq r8, #0 @ if preempt lazy count != 0
movne r0, #0 @ force flags to 0
- tst r0, #_TIF_NEED_RESCHED
+ tst r0, #_TIF_NEED_RESCHED_LAZY
blne svc_preempt
+1:
#endif
svc_exit r5, irq = 1 @ return from exception
@@ -224,6 +231,8 @@ ENDPROC(__irq_svc)
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
+ bne 1b
+ tst r0, #_TIF_NEED_RESCHED_LAZY
moveq pc, r8 @ go again
b 1b
#endif
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -573,7 +573,8 @@ asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
do {
- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
+ _TIF_NEED_RESCHED_LAZY))) {
schedule();
} else {
if (unlikely(!user_mode(regs)))

View File

@ -1,88 +0,0 @@
From 4b82f1531c59f7c2f6e4deca57d74ad732a05cdd Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 20 Sep 2013 14:31:54 +0200
Subject: [PATCH RT] arm/unwind: use a raw_spin_lock
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Mostly unwind is done with irqs enabled however SLUB may call it with
irqs disabled while creating a new SLUB cache.
I had system freeze while loading a module which called
kmem_cache_create() on init. That means SLUB's __slab_alloc() disabled
interrupts and then
->new_slab_objects()
->new_slab()
->setup_object()
->setup_object_debug()
->init_tracking()
->set_track()
->save_stack_trace()
->save_stack_trace_tsk()
->walk_stackframe()
->unwind_frame()
->unwind_find_idx()
=>spin_lock_irqsave(&unwind_lock);
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/kernel/unwind.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -87,7 +87,7 @@ extern const struct unwind_idx __start_u
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
-static DEFINE_SPINLOCK(unwind_lock);
+static DEFINE_RAW_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
@@ -195,7 +195,7 @@ static const struct unwind_idx *unwind_f
/* module unwind tables */
struct unwind_table *table;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
@@ -207,7 +207,7 @@ static const struct unwind_idx *unwind_f
break;
}
}
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
@@ -469,9 +469,9 @@ struct unwind_table *unwind_table_add(un
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
@@ -483,9 +483,9 @@ void unwind_table_del(struct unwind_tabl
if (!tab)
return;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}

View File

@ -1,65 +0,0 @@
From: Steven Rostedt <srostedt@redhat.com>
Date: Fri, 3 Jul 2009 08:44:29 -0500
Subject: ata: Do not disable interrupts in ide code for preempt-rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Use the local_irq_*_nort variants.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/ata/libata-sff.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str
unsigned long flags;
unsigned int consumed;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
return consumed;
}
@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu
unsigned long flags;
/* FIXME: use a bounce buffer */
- local_irq_save(flags);
+ local_irq_save_nort(flags);
buf = kmap_atomic(page);
/* do the actual data transfer */
@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu
do_write);
kunmap_atomic(buf);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
} else {
buf = page_address(page);
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
@@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_
unsigned long flags;
/* FIXME: use bounce buffer */
- local_irq_save(flags);
+ local_irq_save_nort(flags);
buf = kmap_atomic(page);
/* do the actual data transfer */
@@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_
count, rw);
kunmap_atomic(buf);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
} else {
buf = page_address(page);
consumed = ap->ops->sff_data_xfer(dev, buf + offset,

View File

@ -1,78 +0,0 @@
From 7632d1dd96f75bdba997003fa61ab14e57afb0fe Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 9 Apr 2014 10:37:23 +0200
Subject: [PATCH 5/5] block: mq: use cpu_light()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
there is a might sleep splat because get_cpu() disables preemption and
later we grab a lock. As a workaround for this we use get_cpu_light()
and an additional lock to prevent taking the same ctx.
There is a lock member in the ctx already but there some functions which do ++
on the member and this works with irq off but on RT we would need the extra lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-mq.c | 14 +++++++++++---
block/blk-mq.h | 1 +
2 files changed, 12 insertions(+), 3 deletions(-)
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -30,7 +30,11 @@ static void __blk_mq_run_hw_queue(struct
static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
- return per_cpu_ptr(q->queue_ctx, cpu);
+ struct blk_mq_ctx *ctx;
+
+ ctx = per_cpu_ptr(q->queue_ctx, cpu);
+ spin_lock(&ctx->cpu_lock);
+ return ctx;
}
/*
@@ -41,12 +45,13 @@ static struct blk_mq_ctx *__blk_mq_get_c
*/
static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
- return __blk_mq_get_ctx(q, get_cpu());
+ return __blk_mq_get_ctx(q, get_cpu_light());
}
static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
{
- put_cpu();
+ spin_unlock(&ctx->cpu_lock);
+ put_cpu_light();
}
/*
@@ -897,7 +902,9 @@ static void blk_mq_make_request(struct r
if (list_empty(&plug->mq_list))
trace_block_plug(q);
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ spin_unlock(&ctx->cpu_lock);
blk_flush_plug_list(plug, false);
+ spin_lock(&ctx->cpu_lock);
trace_block_plug(q);
}
list_add_tail(&rq->queuelist, &plug->mq_list);
@@ -1212,6 +1219,7 @@ static void blk_mq_init_cpu_queues(struc
memset(__ctx, 0, sizeof(*__ctx));
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
+ spin_lock_init(&__ctx->cpu_lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -7,6 +7,7 @@ struct blk_mq_ctx {
struct list_head rq_list;
} ____cacheline_aligned_in_smp;
+ spinlock_t cpu_lock;
unsigned int cpu;
unsigned int index_hw;
unsigned int ipi_redirect;

View File

@ -1,97 +0,0 @@
Subject: block: Shorten interrupt disabled regions
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 22 Jun 2011 19:47:02 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Moving the blk_sched_flush_plug() call out of the interrupt/preempt
disabled region in the scheduler allows us to replace
local_irq_save/restore(flags) by local_irq_disable/enable() in
blk_flush_plug().
Now instead of doing this we disable interrupts explicitely when we
lock the request_queue and reenable them when we drop the lock. That
allows interrupts to be handled when the plug list contains requests
for more than one queue.
Aside of that this change makes the scope of the irq disabled region
more obvious. The current code confused the hell out of me when
looking at:
local_irq_save(flags);
spin_lock(q->queue_lock);
...
queue_unplugged(q...);
scsi_request_fn();
spin_unlock(q->queue_lock);
spin_lock(shost->host_lock);
spin_unlock_irq(shost->host_lock);
-------------------^^^ ????
spin_lock_irq(q->queue_lock);
spin_unlock(q->lock);
local_irq_restore(flags);
Also add a comment to __blk_run_queue() documenting that
q->request_fn() can drop q->queue_lock and reenable interrupts, but
must return with q->queue_lock held and interrupts disabled.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
---
block/blk-core.c | 12 ++----------
1 file changed, 2 insertions(+), 10 deletions(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2981,7 +2981,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
- spin_unlock(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -3029,7 +3029,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
- unsigned long flags;
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
@@ -3051,11 +3050,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
- /*
- * Save and disable interrupts here, to avoid doing it for every
- * queue lock we have to take.
- */
- local_irq_save(flags);
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
@@ -3068,7 +3062,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
- spin_lock(q->queue_lock);
+ spin_lock_irq(q->queue_lock);
}
/*
@@ -3095,8 +3089,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
-
- local_irq_restore(flags);
}
void blk_finish_plug(struct blk_plug *plug)

View File

@ -1,46 +0,0 @@
Subject: block: Use cpu_chill() for retry loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 20 Dec 2012 18:28:26 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Steven also observed a live lock when there was a
concurrent priority boosting going on.
Use cpu_chill() instead of cpu_relax() to let the system
make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
block/blk-ioc.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include "blk.h"
@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_s
spin_unlock(q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
+ cpu_chill();
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
}
}
@@ -187,7 +188,7 @@ void put_io_context_active(struct io_con
spin_unlock(icq->q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
+ cpu_chill();
goto retry;
}
}

View File

@ -1,35 +0,0 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:58 -0500
Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/asm-generic/bug.h | 14 ++++++++++++++
1 file changed, 14 insertions(+)
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -202,6 +202,20 @@ extern void warn_slowpath_null(const cha
# define WARN_ON_SMP(x) ({0;})
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define BUG_ON_RT(c) BUG_ON(c)
+# define BUG_ON_NONRT(c) do { } while (0)
+# define WARN_ON_RT(condition) WARN_ON(condition)
+# define WARN_ON_NONRT(condition) do { } while (0)
+# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
+#else
+# define BUG_ON_RT(c) do { } while (0)
+# define BUG_ON_NONRT(c) BUG_ON(c)
+# define WARN_ON_RT(condition) do { } while (0)
+# define WARN_ON_NONRT(condition) WARN_ON(condition)
+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif

View File

@ -1,161 +0,0 @@
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: clocksource: TCLIB: Allow higher clock rates for clock events
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
As default the TCLIB uses the 32KiHz base clock rate for clock events.
Add a compile time selection to allow higher clock resulution.
(fixed up by Sami Pietikäinen <Sami.Pietikainen@wapice.com>)
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/clocksource/tcb_clksrc.c | 37 ++++++++++++++++++++++---------------
drivers/misc/Kconfig | 12 ++++++++++--
2 files changed, 32 insertions(+), 17 deletions(-)
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -23,8 +23,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a 16-bit clockevent
- * source, used in either periodic or oneshot mode. This runs
- * at 32 KiHZ, and can handle delays of up to two seconds.
+ * source, used in either periodic or oneshot mode.
*
* A boot clocksource and clockevent source are also currently needed,
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
@@ -74,6 +73,7 @@ static struct clocksource clksrc = {
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
+ u32 freq;
void __iomem *regs;
};
@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_cl
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- * because using one of the divided clocks would usually mean the
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- *
- * A divided clock could be good for high resolution timers, since
- * 30.5 usec resolution can seem "low".
- */
static u32 timer_clock;
static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mod
case CLOCK_EVT_MODE_PERIODIC:
clk_prepare_enable(tcd->clk);
- /* slow clock, count up to RC, then irq and restart */
+ /* count up to RC, then irq and restart */
__raw_writel(timer_clock
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+ __raw_writel((tcd->freq + HZ/2)/HZ,
+ tcaddr + ATMEL_TC_REG(2, RC));
/* Enable clock and interrupts on RC compare */
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mod
case CLOCK_EVT_MODE_ONESHOT:
clk_prepare_enable(tcd->clk);
- /* slow clock, count up to RC, then irq and stop */
+ /* count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
@@ -157,8 +151,12 @@ static struct tc_clkevt_device clkevt =
.name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT,
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
/* Should be lower than at91rm9200's system timer */
.rating = 125,
+#else
+ .rating = 200,
+#endif
.set_next_event = tc_next_event,
.set_mode = tc_mode,
},
@@ -184,8 +182,9 @@ static struct irqaction tc_irqaction = {
.handler = ch2_irq,
};
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
{
+ unsigned divisor = atmel_tc_divisors[divisor_idx];
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
@@ -200,7 +199,11 @@ static int __init setup_clkevents(struct
clkevt.clk = t2_clk;
tc_irqaction.dev_id = &clkevt;
- timer_clock = clk32k_divisor_idx;
+ timer_clock = divisor_idx;
+ if (!divisor)
+ clkevt.freq = 32768;
+ else
+ clkevt.freq = clk_get_rate(t2_clk) / divisor;
clkevt.clkevt.cpumask = cpumask_of(0);
@@ -208,7 +211,7 @@ static int __init setup_clkevents(struct
if (ret)
return ret;
- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
return ret;
}
@@ -345,7 +348,11 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
ret = setup_clkevents(tc, clk32k_divisor_idx);
+#else
+ ret = setup_clkevents(tc, best_divisor_idx);
+#endif
if (ret)
goto err_unregister_clksrc;
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -78,8 +78,7 @@ config ATMEL_TCB_CLKSRC
are combined to make a single 32-bit timer.
When GENERIC_CLOCKEVENTS is defined, the third timer channel
- may be used as a clock event device supporting oneshot mode
- (delays of up to two seconds) based on the 32 KiHz clock.
+ may be used as a clock event device supporting oneshot mode.
config ATMEL_TCB_CLKSRC_BLOCK
int
@@ -93,6 +92,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
TC can be used for other purposes, such as PWM generation and
interval timing.
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ bool "TC Block use 32 KiHz clock"
+ depends on ATMEL_TCB_CLKSRC
+ default y
+ help
+ Select this to use 32 KiHz base clock rate as TC block clock
+ source for clock events.
+
+
config DUMMY_IRQ
tristate "Dummy IRQ handler"
default n

View File

@ -1,184 +0,0 @@
Subject: completion: Use simple wait queues
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 11 Jan 2013 11:23:51 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Completions have no long lasting callbacks and therefor do not need
the complex waitqueue variant. Use simple waitqueues which reduces the
contention on the waitqueue lock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/completion.h | 9 ++++-----
include/linux/uprobes.h | 1 +
kernel/sched/completion.c | 34 +++++++++++++++++-----------------
kernel/sched/core.c | 10 ++++++++--
4 files changed, 30 insertions(+), 24 deletions(-)
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -7,8 +7,7 @@
* Atomic wait-for-completion handler data structures.
* See kernel/sched/completion.c for details.
*/
-
-#include <linux/wait.h>
+#include <linux/wait-simple.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -24,11 +23,11 @@
*/
struct completion {
unsigned int done;
- wait_queue_head_t wait;
+ struct swait_head wait;
};
#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+ { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
@@ -73,7 +72,7 @@ struct completion {
static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_waitqueue_head(&x->wait);
+ init_swait_head(&x->wait);
}
/**
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
#include <linux/types.h>
+#include <linux/wait.h>
struct vm_area_struct;
struct mm_struct;
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -30,10 +30,10 @@ void complete(struct completion *x)
{
unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ __swait_wake_locked(&x->wait, TASK_NORMAL, 1);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
@@ -50,10 +50,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
- __wake_up_locked(&x->wait, TASK_NORMAL, 0);
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ __swait_wake_locked(&x->wait, TASK_NORMAL, 0);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_SWAITER(wait);
- __add_wait_queue_tail_exclusive(&x->wait, &wait);
+ swait_prepare_locked(&x->wait, &wait);
do {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
__set_current_state(state);
- spin_unlock_irq(&x->wait.lock);
+ raw_spin_unlock_irq(&x->wait.lock);
timeout = action(timeout);
- spin_lock_irq(&x->wait.lock);
+ raw_spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
- __remove_wait_queue(&x->wait, &wait);
+ swait_finish_locked(&x->wait, &wait);
if (!x->done)
return timeout;
}
@@ -89,9 +89,9 @@ static inline long __sched
{
might_sleep();
- spin_lock_irq(&x->wait.lock);
+ raw_spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, action, timeout, state);
- spin_unlock_irq(&x->wait.lock);
+ raw_spin_unlock_irq(&x->wait.lock);
return timeout;
}
@@ -267,12 +267,12 @@ bool try_wait_for_completion(struct comp
unsigned long flags;
int ret = 1;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
@@ -290,10 +290,10 @@ bool completion_done(struct completion *
unsigned long flags;
int ret = 1;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2689,7 +2689,10 @@ void migrate_disable(void)
}
#ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(p->migrate_disable_atomic);
+ if (unlikely(p->migrate_disable_atomic)) {
+ tracing_off();
+ WARN_ON_ONCE(1);
+ }
#endif
if (p->migrate_disable) {
@@ -2720,7 +2723,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(p->migrate_disable_atomic);
+ if (unlikely(p->migrate_disable_atomic)) {
+ tracing_off();
+ WARN_ON_ONCE(1);
+ }
#endif
WARN_ON_ONCE(p->migrate_disable <= 0);

View File

@ -1,21 +0,0 @@
Subject: cond-resched-lock-rt-tweak.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 22:51:33 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2722,7 +2722,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#ifdef CONFIG_PREEMPT_COUNT
+#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0

View File

@ -1,48 +0,0 @@
Subject: cond-resched-softirq-fix.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 14 Jul 2011 09:56:44 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 4 ++++
kernel/sched/core.c | 2 ++
2 files changed, 6 insertions(+)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2733,12 +2733,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
+#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
+#else
+# define cond_resched_softirq() cond_resched()
+#endif
static inline void cond_resched_rcu(void)
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4227,6 +4227,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
+#ifndef CONFIG_PREEMPT_RT_FULL
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
@@ -4240,6 +4241,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
+#endif
/**
* yield - yield the current processor to other threads.

View File

@ -1,34 +0,0 @@
From 56f43bce737d3f28ad470c95fa84f824cb0d55ad Mon Sep 17 00:00:00 2001
From: Nicholas Mc Guire <der.herr@hofr.at>
Date: Thu, 21 Nov 2013 22:52:30 -0500
Subject: [PATCH 2/2] condition migration_disable on lock acquisition
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
No need to unconditionally migrate_disable (what is it protecting ?) and
re-enable on failure to acquire the lock.
This patch moves the migrate_disable to be conditioned on sucessful lock
acquisition only.
Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/locking/rt.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
--- a/kernel/locking/rt.c
+++ b/kernel/locking/rt.c
@@ -182,11 +182,10 @@ int __lockfunc rt_write_trylock(rwlock_t
{
int ret = rt_mutex_trylock(&rwlock->lock);
- migrate_disable();
- if (ret)
+ if (ret) {
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- else
- migrate_enable();
+ migrate_disable();
+ }
return ret;
}

View File

@ -1,57 +0,0 @@
From 107fb2b43f5c80686ee6454713f4963728ca2737 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 5 Dec 2013 09:16:52 -0500
Subject: [PATCH] cpu hotplug: Document why PREEMPT_RT uses a spinlock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
The patch:
cpu: Make hotplug.lock a "sleeping" spinlock on RT
Tasks can block on hotplug.lock in pin_current_cpu(), but their
state might be != RUNNING. So the mutex wakeup will set the state
unconditionally to RUNNING. That might cause spurious unexpected
wakeups. We could provide a state preserving mutex_lock() function,
but this is semantically backwards. So instead we convert the
hotplug.lock() to a spinlock for RT, which has the state preserving
semantics already.
Fixed a bug where the hotplug lock on PREEMPT_RT can be called after a
task set its state to TASK_UNINTERRUPTIBLE and before it called
schedule. If the hotplug_lock used a mutex, and there was contention,
the current task's state would be turned to TASK_RUNNABLE and the
schedule call will not sleep. This caused unexpected results.
Although the patch had a description of the change, the code had no
comments about it. This causes confusion to those that review the code,
and as PREEMPT_RT is held in a quilt queue and not git, it's not as easy
to see why a change was made. Even if it was in git, the code should
still have a comment for something as subtle as this.
Document the rational for using a spinlock on PREEMPT_RT in the hotplug
lock code.
Reported-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/cpu.c | 8 ++++++++
1 file changed, 8 insertions(+)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -84,6 +84,14 @@ struct hotplug_pcp {
int grab_lock;
struct completion synced;
#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Note, on PREEMPT_RT, the hotplug lock must save the state of
+ * the task, otherwise the mutex will cause the task to fail
+ * to sleep when required. (Because it's called from migrate_disable())
+ *
+ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
+ * state.
+ */
spinlock_t lock;
#else
struct mutex mutex;

View File

@ -1,126 +0,0 @@
Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
From: Steven Rostedt <rostedt@goodmis.org>
Date: Fri, 02 Mar 2012 10:36:57 -0500
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Tasks can block on hotplug.lock in pin_current_cpu(), but their state
might be != RUNNING. So the mutex wakeup will set the state
unconditionally to RUNNING. That might cause spurious unexpected
wakeups. We could provide a state preserving mutex_lock() function,
but this is semantically backwards. So instead we convert the
hotplug.lock() to a spinlock for RT, which has the state preserving
semantics already.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/cpu.c | 35 ++++++++++++++++++++++++++---------
1 file changed, 26 insertions(+), 9 deletions(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -51,7 +51,12 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* Makes the lock keep the task's state */
+ spinlock_t lock;
+#else
struct mutex lock; /* Synchronizes accesses to refcount, */
+#endif
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -59,10 +64,22 @@ static struct {
int refcount;
} cpu_hotplug = {
.active_writer = NULL,
+#ifdef CONFIG_PREEMPT_RT_FULL
+ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
+#else
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+#endif
.refcount = 0,
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
+# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
+#else
+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
+# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
+#endif
+
struct hotplug_pcp {
struct task_struct *unplug;
int refcount;
@@ -92,8 +109,8 @@ void pin_current_cpu(void)
return;
}
preempt_enable();
- mutex_lock(&cpu_hotplug.lock);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_lock();
+ hotplug_unlock();
preempt_disable();
goto retry;
}
@@ -165,9 +182,9 @@ void get_online_cpus(void)
might_sleep();
if (cpu_hotplug.active_writer == current)
return;
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
cpu_hotplug.refcount++;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -176,14 +193,14 @@ void put_online_cpus(void)
{
if (cpu_hotplug.active_writer == current)
return;
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
if (WARN_ON(!cpu_hotplug.refcount))
cpu_hotplug.refcount++; /* try to fix things up */
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
wake_up_process(cpu_hotplug.active_writer);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -215,11 +232,11 @@ void cpu_hotplug_begin(void)
cpu_hotplug.active_writer = current;
for (;;) {
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
if (likely(!cpu_hotplug.refcount))
break;
__set_current_state(TASK_UNINTERRUPTIBLE);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
schedule();
}
}
@@ -227,7 +244,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
/*

View File

@ -1,548 +0,0 @@
From: Steven Rostedt <srostedt@redhat.com>
Date: Mon, 16 Jul 2012 08:07:43 +0000
Subject: cpu/rt: Rework cpu down for PREEMPT_RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Bringing a CPU down is a pain with the PREEMPT_RT kernel because
tasks can be preempted in many more places than in non-RT. In
order to handle per_cpu variables, tasks may be pinned to a CPU
for a while, and even sleep. But these tasks need to be off the CPU
if that CPU is going down.
Several synchronization methods have been tried, but when stressed
they failed. This is a new approach.
A sync_tsk thread is still created and tasks may still block on a
lock when the CPU is going down, but how that works is a bit different.
When cpu_down() starts, it will create the sync_tsk and wait on it
to inform that current tasks that are pinned on the CPU are no longer
pinned. But new tasks that are about to be pinned will still be allowed
to do so at this time.
Then the notifiers are called. Several notifiers will bring down tasks
that will enter these locations. Some of these tasks will take locks
of other tasks that are on the CPU. If we don't let those other tasks
continue, but make them block until CPU down is done, the tasks that
the notifiers are waiting on will never complete as they are waiting
for the locks held by the tasks that are blocked.
Thus we still let the task pin the CPU until the notifiers are done.
After the notifiers run, we then make new tasks entering the pinned
CPU sections grab a mutex and wait. This mutex is now a per CPU mutex
in the hotplug_pcp descriptor.
To help things along, a new function in the scheduler code is created
called migrate_me(). This function will try to migrate the current task
off the CPU this is going down if possible. When the sync_tsk is created,
all tasks will then try to migrate off the CPU going down. There are
several cases that this wont work, but it helps in most cases.
After the notifiers are called and if a task can't migrate off but enters
the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex
until the CPU down is complete. Then the scheduler will force the migration
anyway.
Also, I found that THREAD_BOUND need to also be accounted for in the
pinned CPU, and the migrate_disable no longer treats them special.
This helps fix issues with ksoftirqd and workqueue that unbind on CPU down.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 7 +
kernel/cpu.c | 240 +++++++++++++++++++++++++++++++++++++++++---------
kernel/sched/core.c | 82 ++++++++++++++++-
3 files changed, 284 insertions(+), 45 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1995,6 +1995,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
+int migrate_me(void);
+void tell_sched_cpu_down_begin(int cpu);
+void tell_sched_cpu_down_done(int cpu);
+
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -2007,6 +2011,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
+static inline int migrate_me(void) { return 0; }
+static inline void tell_sched_cpu_down_begin(int cpu) { }
+static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -51,12 +51,7 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* Makes the lock keep the task's state */
- spinlock_t lock;
-#else
struct mutex lock; /* Synchronizes accesses to refcount, */
-#endif
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -64,28 +59,46 @@ static struct {
int refcount;
} cpu_hotplug = {
.active_writer = NULL,
-#ifdef CONFIG_PREEMPT_RT_FULL
- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
-#else
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-#endif
.refcount = 0,
};
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
-# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
-#else
-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
-#endif
-
+/**
+ * hotplug_pcp - per cpu hotplug descriptor
+ * @unplug: set when pin_current_cpu() needs to sync tasks
+ * @sync_tsk: the task that waits for tasks to finish pinned sections
+ * @refcount: counter of tasks in pinned sections
+ * @grab_lock: set when the tasks entering pinned sections should wait
+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
+ * @mutex_init: zero if the mutex hasn't been initialized yet.
+ *
+ * Although @unplug and @sync_tsk may point to the same task, the @unplug
+ * is used as a flag and still exists after @sync_tsk has exited and
+ * @sync_tsk set to NULL.
+ */
struct hotplug_pcp {
struct task_struct *unplug;
+ struct task_struct *sync_tsk;
int refcount;
+ int grab_lock;
struct completion synced;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ spinlock_t lock;
+#else
+ struct mutex mutex;
+#endif
+ int mutex_init;
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
+#else
+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
+#endif
+
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
/**
@@ -99,18 +112,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
void pin_current_cpu(void)
{
struct hotplug_pcp *hp;
+ int force = 0;
retry:
hp = &__get_cpu_var(hotplug_pcp);
- if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
hp->unplug == current) {
hp->refcount++;
return;
}
- preempt_enable();
- hotplug_lock();
- hotplug_unlock();
+ if (hp->grab_lock) {
+ preempt_enable();
+ hotplug_lock(hp);
+ hotplug_unlock(hp);
+ } else {
+ preempt_enable();
+ /*
+ * Try to push this task off of this CPU.
+ */
+ if (!migrate_me()) {
+ preempt_disable();
+ hp = &__get_cpu_var(hotplug_pcp);
+ if (!hp->grab_lock) {
+ /*
+ * Just let it continue it's already pinned
+ * or about to sleep.
+ */
+ force = 1;
+ goto retry;
+ }
+ preempt_enable();
+ }
+ }
preempt_disable();
goto retry;
}
@@ -131,26 +165,84 @@ void unpin_current_cpu(void)
wake_up_process(hp->unplug);
}
-/*
- * FIXME: Is this really correct under all circumstances ?
- */
+static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (hp->refcount) {
+ schedule_preempt_disabled();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+}
+
static int sync_unplug_thread(void *data)
{
struct hotplug_pcp *hp = data;
preempt_disable();
hp->unplug = current;
+ wait_for_pinned_cpus(hp);
+
+ /*
+ * This thread will synchronize the cpu_down() with threads
+ * that have pinned the CPU. When the pinned CPU count reaches
+ * zero, we inform the cpu_down code to continue to the next step.
+ */
set_current_state(TASK_UNINTERRUPTIBLE);
- while (hp->refcount) {
- schedule_preempt_disabled();
+ preempt_enable();
+ complete(&hp->synced);
+
+ /*
+ * If all succeeds, the next step will need tasks to wait till
+ * the CPU is offline before continuing. To do this, the grab_lock
+ * is set and tasks going into pin_current_cpu() will block on the
+ * mutex. But we still need to wait for those that are already in
+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
+ * will kick this thread out.
+ */
+ while (!hp->grab_lock && !kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+
+ /* Make sure grab_lock is seen before we see a stale completion */
+ smp_mb();
+
+ /*
+ * Now just before cpu_down() enters stop machine, we need to make
+ * sure all tasks that are in pinned CPU sections are out, and new
+ * tasks will now grab the lock, keeping them from entering pinned
+ * CPU sections.
+ */
+ if (!kthread_should_stop()) {
+ preempt_disable();
+ wait_for_pinned_cpus(hp);
+ preempt_enable();
+ complete(&hp->synced);
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
set_current_state(TASK_UNINTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
- preempt_enable();
- complete(&hp->synced);
+
+ /*
+ * Force this thread off this CPU as it's going down and
+ * we don't want any more work on this CPU.
+ */
+ current->flags &= ~PF_NO_SETAFFINITY;
+ do_set_cpus_allowed(current, cpu_present_mask);
+ migrate_me();
return 0;
}
+static void __cpu_unplug_sync(struct hotplug_pcp *hp)
+{
+ wake_up_process(hp->sync_tsk);
+ wait_for_completion(&hp->synced);
+}
+
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
@@ -158,23 +250,83 @@ static int sync_unplug_thread(void *data
static int cpu_unplug_begin(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
- struct task_struct *tsk;
+ int err;
+
+ /* Protected by cpu_hotplug.lock */
+ if (!hp->mutex_init) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+ spin_lock_init(&hp->lock);
+#else
+ mutex_init(&hp->mutex);
+#endif
+ hp->mutex_init = 1;
+ }
+
+ /* Inform the scheduler to migrate tasks off this CPU */
+ tell_sched_cpu_down_begin(cpu);
init_completion(&hp->synced);
- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
- if (IS_ERR(tsk))
- return (PTR_ERR(tsk));
- kthread_bind(tsk, cpu);
- wake_up_process(tsk);
- wait_for_completion(&hp->synced);
+
+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(hp->sync_tsk)) {
+ err = PTR_ERR(hp->sync_tsk);
+ hp->sync_tsk = NULL;
+ return err;
+ }
+ kthread_bind(hp->sync_tsk, cpu);
+
+ /*
+ * Wait for tasks to get out of the pinned sections,
+ * it's still OK if new tasks enter. Some CPU notifiers will
+ * wait for tasks that are going to enter these sections and
+ * we must not have them block.
+ */
+ __cpu_unplug_sync(hp);
+
return 0;
}
+static void cpu_unplug_sync(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ init_completion(&hp->synced);
+ /* The completion needs to be initialzied before setting grab_lock */
+ smp_wmb();
+
+ /* Grab the mutex before setting grab_lock */
+ hotplug_lock(hp);
+ hp->grab_lock = 1;
+
+ /*
+ * The CPU notifiers have been completed.
+ * Wait for tasks to get out of pinned CPU sections and have new
+ * tasks block until the CPU is completely down.
+ */
+ __cpu_unplug_sync(hp);
+
+ /* All done with the sync thread */
+ kthread_stop(hp->sync_tsk);
+ hp->sync_tsk = NULL;
+}
+
static void cpu_unplug_done(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
hp->unplug = NULL;
+ /* Let all tasks know cpu unplug is finished before cleaning up */
+ smp_wmb();
+
+ if (hp->sync_tsk)
+ kthread_stop(hp->sync_tsk);
+
+ if (hp->grab_lock) {
+ hotplug_unlock(hp);
+ /* protected by cpu_hotplug.lock */
+ hp->grab_lock = 0;
+ }
+ tell_sched_cpu_down_done(cpu);
}
void get_online_cpus(void)
@@ -182,9 +334,9 @@ void get_online_cpus(void)
might_sleep();
if (cpu_hotplug.active_writer == current)
return;
- hotplug_lock();
+ mutex_lock(&cpu_hotplug.lock);
cpu_hotplug.refcount++;
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -194,14 +346,13 @@ void put_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
- hotplug_lock();
+ mutex_lock(&cpu_hotplug.lock);
if (WARN_ON(!cpu_hotplug.refcount))
cpu_hotplug.refcount++; /* try to fix things up */
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
wake_up_process(cpu_hotplug.active_writer);
- hotplug_unlock();
-
+ mutex_unlock(&cpu_hotplug.lock);
}
EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -232,11 +383,11 @@ void cpu_hotplug_begin(void)
cpu_hotplug.active_writer = current;
for (;;) {
- hotplug_lock();
+ mutex_lock(&cpu_hotplug.lock);
if (likely(!cpu_hotplug.refcount))
break;
__set_current_state(TASK_UNINTERRUPTIBLE);
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
schedule();
}
}
@@ -244,7 +395,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
}
/*
@@ -458,6 +609,9 @@ static int __ref _cpu_down(unsigned int
smpboot_park_threads(cpu);
+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
+ cpu_unplug_sync(cpu);
+
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2647,7 +2647,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
+ if (in_atomic()) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
@@ -2677,7 +2677,7 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
+ if (in_atomic()) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif
@@ -4717,6 +4717,84 @@ void do_set_cpus_allowed(struct task_str
cpumask_copy(&p->cpus_allowed, new_mask);
}
+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
+static DEFINE_MUTEX(sched_down_mutex);
+static cpumask_t sched_down_cpumask;
+
+void tell_sched_cpu_down_begin(int cpu)
+{
+ mutex_lock(&sched_down_mutex);
+ cpumask_set_cpu(cpu, &sched_down_cpumask);
+ mutex_unlock(&sched_down_mutex);
+}
+
+void tell_sched_cpu_down_done(int cpu)
+{
+ mutex_lock(&sched_down_mutex);
+ cpumask_clear_cpu(cpu, &sched_down_cpumask);
+ mutex_unlock(&sched_down_mutex);
+}
+
+/**
+ * migrate_me - try to move the current task off this cpu
+ *
+ * Used by the pin_current_cpu() code to try to get tasks
+ * to move off the current CPU as it is going down.
+ * It will only move the task if the task isn't pinned to
+ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
+ * and the task has to be in a RUNNING state. Otherwise the
+ * movement of the task will wake it up (change its state
+ * to running) when the task did not expect it.
+ *
+ * Returns 1 if it succeeded in moving the current task
+ * 0 otherwise.
+ */
+int migrate_me(void)
+{
+ struct task_struct *p = current;
+ struct migration_arg arg;
+ struct cpumask *cpumask;
+ struct cpumask *mask;
+ unsigned long flags;
+ unsigned int dest_cpu;
+ struct rq *rq;
+
+ /*
+ * We can not migrate tasks bounded to a CPU or tasks not
+ * running. The movement of the task will wake it up.
+ */
+ if (p->flags & PF_NO_SETAFFINITY || p->state)
+ return 0;
+
+ mutex_lock(&sched_down_mutex);
+ rq = task_rq_lock(p, &flags);
+
+ cpumask = &__get_cpu_var(sched_cpumasks);
+ mask = &p->cpus_allowed;
+
+ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
+
+ if (!cpumask_weight(cpumask)) {
+ /* It's only on this CPU? */
+ task_rq_unlock(rq, p, &flags);
+ mutex_unlock(&sched_down_mutex);
+ return 0;
+ }
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
+
+ arg.task = p;
+ arg.dest_cpu = dest_cpu;
+
+ task_rq_unlock(rq, p, &flags);
+
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
+ mutex_unlock(&sched_down_mutex);
+
+ return 1;
+}
+
/*
* This is how migration works:
*

View File

@ -1,27 +0,0 @@
Subject: cpu-rt-variants.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 17 Jun 2011 15:42:38 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/smp.h | 8 ++++++++
1 file changed, 8 insertions(+)
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -182,6 +182,14 @@ static inline void kick_all_cpus_sync(vo
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define get_cpu_light() get_cpu()
+# define put_cpu_light() put_cpu()
+#else
+# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
+# define put_cpu_light() migrate_enable()
+#endif
+
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:

View File

@ -1,108 +0,0 @@
From a1b3b9eafb916f839a09dcde745518a5ad6703db Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 4 Mar 2014 12:28:32 -0500
Subject: [PATCH] cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
We hit another bug that was caused by switching cpu_chill() from
msleep() to hrtimer_nanosleep().
This time it is a livelock. The problem is that hrtimer_nanosleep()
calls schedule with the state == TASK_INTERRUPTIBLE. But these means
that if a signal is pending, the scheduler wont schedule, and will
simply change the current task state back to TASK_RUNNING. This
nullifies the whole point of cpu_chill() in the first place. That is,
if a task is spinning on a try_lock() and it preempted the owner of the
lock, if it has a signal pending, it will never give up the CPU to let
the owner of the lock run.
I made a static function __hrtimer_nanosleep() that takes a fifth
parameter "state", which determines the task state of that the
nanosleep() will be in. The normal hrtimer_nanosleep() will act the
same, but cpu_chill() will call the __hrtimer_nanosleep() directly with
the TASK_UNINTERRUPTIBLE state.
cpu_chill() only cares that the first sleep happens, and does not care
about the state of the restart schedule (in hrtimer_nanosleep_restart).
Cc: stable-rt@vger.kernel.org
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/hrtimer.c | 25 ++++++++++++++++++-------
1 file changed, 18 insertions(+), 7 deletions(-)
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1770,12 +1770,13 @@ void hrtimer_init_sleeper(struct hrtimer
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
+ unsigned long state)
{
hrtimer_init_sleeper(t, current);
do {
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(state);
hrtimer_start_expires(&t->timer, mode);
if (!hrtimer_active(&t->timer))
t->task = NULL;
@@ -1819,7 +1820,8 @@ long __sched hrtimer_nanosleep_restart(s
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
- if (do_nanosleep(&t, HRTIMER_MODE_ABS))
+ /* cpu_chill() does not care about restart state. */
+ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
goto out;
rmtp = restart->nanosleep.rmtp;
@@ -1836,8 +1838,10 @@ long __sched hrtimer_nanosleep_restart(s
return ret;
}
-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
- const enum hrtimer_mode mode, const clockid_t clockid)
+static long
+__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+ const enum hrtimer_mode mode, const clockid_t clockid,
+ unsigned long state)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
@@ -1850,7 +1854,7 @@ long hrtimer_nanosleep(struct timespec *
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
- if (do_nanosleep(&t, mode))
+ if (do_nanosleep(&t, mode, state))
goto out;
/* Absolute timers do not update the rmtp value and restart: */
@@ -1877,6 +1881,12 @@ long hrtimer_nanosleep(struct timespec *
return ret;
}
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+ const enum hrtimer_mode mode, const clockid_t clockid)
+{
+ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
+}
+
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
@@ -1903,7 +1913,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
- hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
+ TASK_UNINTERRUPTIBLE);
if (!freeze_flag)
current->flags &= ~PF_NOFREEZE;
}

View File

@ -1,54 +0,0 @@
From linux-rt-users-owner@vger.kernel.org Thu Nov 7 03:07:12 2013
From: Tiejun Chen <tiejun.chen@windriver.com>
Subject: [v1][PATCH] cpu_down: move migrate_enable() back
Date: Thu, 7 Nov 2013 10:06:07 +0800
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to
use migrate_enable()/migrate_disable() to replace that combination
of preempt_enable() and preempt_disable(), but actually in
!CONFIG_PREEMPT_RT_FULL case, migrate_enable()/migrate_disable()
are still equal to preempt_enable()/preempt_disable(). So that
followed cpu_hotplug_begin()/cpu_unplug_begin(cpu) would go schedule()
to trigger schedule_debug() like this:
_cpu_down()
|
+ migrate_disable() = preempt_disable()
|
+ cpu_hotplug_begin() or cpu_unplug_begin()
|
+ schedule()
|
+ __schedule()
|
+ preempt_disable();
|
+ __schedule_bug() is true!
So we should move migrate_enable() as the original scheme.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
kernel/cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -601,6 +601,7 @@ static int __ref _cpu_down(unsigned int
err = -EBUSY;
goto restore_cpus;
}
+ migrate_enable();
cpu_hotplug_begin();
err = cpu_unplug_begin(cpu);
@@ -673,7 +674,6 @@ static int __ref _cpu_down(unsigned int
out_release:
cpu_unplug_done(cpu);
out_cancel:
- migrate_enable();
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);

View File

@ -1,35 +0,0 @@
Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 14 Dec 2011 01:03:49 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
We can't deal with the cpumask allocations which happen in atomic
context (see arch/x86/kernel/apic/io_apic.c) on RT right now.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/Kconfig | 2 +-
lib/Kconfig | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -811,7 +811,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
- select CPUMASK_OFFSTACK
+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -357,6 +357,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
+ depends on !PREEMPT_RT_FULL
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids

View File

@ -1,243 +0,0 @@
From 0fcf777e2f217e61564bd30a2c39cb49d0e0b8c3 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 21 Feb 2014 17:24:04 +0100
Subject: [PATCH] crypto: Reduce preempt disabled regions, more algos
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Don Estabrook reported
| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2462 migrate_enable+0x17b/0x200()
| kernel: WARNING: CPU: 3 PID: 865 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
and his backtrace showed some crypto functions which looked fine.
The problem is the following sequence:
glue_xts_crypt_128bit()
{
blkcipher_walk_virt(); /* normal migrate_disable() */
glue_fpu_begin(); /* get atomic */
while (nbytes) {
__glue_xts_crypt_128bit();
blkcipher_walk_done(); /* with nbytes = 0, migrate_enable()
* while we are atomic */
};
glue_fpu_end() /* no longer atomic */
}
and this is why the counter get out of sync and the warning is printed.
The other problem is that we are non-preemptible between
glue_fpu_begin() and glue_fpu_end() and the latency grows. To fix this,
I shorten the FPU off region and ensure blkcipher_walk_done() is called
with preemption enabled. This might hurt the performance because we now
enable/disable the FPU state more often but we gain lower latency and
the bug is gone.
Cc: stable-rt@vger.kernel.org
Reported-by: Don Estabrook <don.estabrook@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/crypto/cast5_avx_glue.c | 21 +++++++++------------
arch/x86/crypto/glue_helper.c | 31 +++++++++++++++----------------
2 files changed, 24 insertions(+), 28 deletions(-)
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -60,7 +60,7 @@ static inline void cast5_fpu_end(bool fp
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
{
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
@@ -76,7 +76,7 @@ static int ecb_crypt(struct blkcipher_de
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+ fpu_enabled = cast5_fpu_begin(false, nbytes);
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
@@ -104,10 +104,9 @@ static int ecb_crypt(struct blkcipher_de
} while (nbytes >= bsize);
done:
+ cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, walk, nbytes);
}
-
- cast5_fpu_end(fpu_enabled);
return err;
}
@@ -231,7 +230,7 @@ static unsigned int __cbc_decrypt(struct
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -240,12 +239,11 @@ static int cbc_decrypt(struct blkcipher_
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes)) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+ fpu_enabled = cast5_fpu_begin(false, nbytes);
nbytes = __cbc_decrypt(desc, &walk);
+ cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- cast5_fpu_end(fpu_enabled);
return err;
}
@@ -315,7 +313,7 @@ static unsigned int __ctr_crypt(struct b
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -324,13 +322,12 @@ static int ctr_crypt(struct blkcipher_de
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+ fpu_enabled = cast5_fpu_begin(false, nbytes);
nbytes = __ctr_crypt(desc, &walk);
+ cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- cast5_fpu_end(fpu_enabled);
-
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
unsigned int nbytes, i, func_bytes;
- bool fpu_enabled = false;
+ bool fpu_enabled;
int err;
err = blkcipher_walk_virt(desc, walk);
@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const
u8 *wdst = walk->dst.virt.addr;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
+ desc, false, nbytes);
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const
}
done:
+ glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, walk, nbytes);
}
- glue_fpu_end(fpu_enabled);
return err;
}
@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct
while ((nbytes = walk.nbytes)) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
+ desc, false, nbytes);
nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
+ glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- glue_fpu_end(fpu_enabled);
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
@@ -278,7 +278,7 @@ int glue_ctr_crypt_128bit(const struct c
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -287,13 +287,12 @@ int glue_ctr_crypt_128bit(const struct c
while ((nbytes = walk.nbytes) >= bsize) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
+ desc, false, nbytes);
nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
+ glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- glue_fpu_end(fpu_enabled);
-
if (walk.nbytes) {
glue_ctr_crypt_final_128bit(
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
@@ -348,7 +347,7 @@ int glue_xts_crypt_128bit(const struct c
void *tweak_ctx, void *crypt_ctx)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -361,21 +360,21 @@ int glue_xts_crypt_128bit(const struct c
/* set minimum length to bsize, for tweak_fn */
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled,
+ desc, false,
nbytes < bsize ? bsize : nbytes);
-
/* calculate first value of T */
tweak_fn(tweak_ctx, walk.iv, walk.iv);
+ glue_fpu_end(fpu_enabled);
while (nbytes) {
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ desc, false, nbytes);
nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
+ glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
nbytes = walk.nbytes;
}
-
- glue_fpu_end(fpu_enabled);
-
return err;
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);

View File

@ -1,24 +0,0 @@
Subject: debugobjects-rt.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:41:35 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/debugobjects.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -308,7 +308,10 @@ static void
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (preempt_count() == 0 && !irqs_disabled())
+#endif
+ fill_pool();
db = get_bucket((unsigned long) addr);

View File

@ -1,88 +0,0 @@
From 27bcad87397de27b92b8651630771e032cf87116 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 11 Apr 2014 20:12:43 +0200
Subject: [PATCH] disable lazy preempt on x86-64
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/Kconfig | 2 +-
arch/x86/kernel/entry_64.S | 28 ++++++++--------------------
2 files changed, 9 insertions(+), 21 deletions(-)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,7 +21,7 @@ config X86_64
### Arch settings
config X86
def_bool y
- select HAVE_PREEMPT_LAZY
+ select HAVE_PREEMPT_LAZY if X86_32
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -658,8 +658,8 @@ GLOBAL(system_call_after_swapgs)
/* Handle reschedules */
/* edx: work, edi: workmask */
sysret_careful:
- testl $_TIF_NEED_RESCHED_MASK,%edx
- jz sysret_signal
+ bt $TIF_NEED_RESCHED,%edx
+ jnc sysret_signal
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
@@ -771,8 +771,8 @@ GLOBAL(int_with_check)
/* First do a reschedule test. */
/* edx: work, edi: workmask */
int_careful:
- testl $_TIF_NEED_RESCHED_MASK,%edx
- jz int_very_careful
+ bt $TIF_NEED_RESCHED,%edx
+ jnc int_very_careful
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
@@ -1071,8 +1071,8 @@ ENTRY(native_iret)
/* edi: workmask, edx: work */
retint_careful:
CFI_RESTORE_STATE
- testl $_TIF_NEED_RESCHED_MASK,%edx
- jz retint_signal
+ bt $TIF_NEED_RESCHED,%edx
+ jnc retint_signal
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
@@ -1104,19 +1104,7 @@ ENTRY(native_iret)
/* rcx: threadinfo. interrupts off. */
ENTRY(retint_kernel)
cmpl $0,PER_CPU_VAR(__preempt_count)
- jz check_int_off
-
- # atleast preempt count == 0 ?
- cmpl $_TIF_NEED_RESCHED,PER_CPU_VAR(__preempt_count)
- jnz retint_restore_args
-
- cmpl $0, TI_preempt_lazy_count(%rcx)
- jnz retint_restore_args
-
- bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
- jnc retint_restore_args
-
-check_int_off:
+ jnz retint_restore_args
bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
jnc retint_restore_args
call preempt_schedule_irq
@@ -1552,7 +1540,7 @@ ENTRY(paranoid_exit)
movq %rsp,%rdi /* &pt_regs */
call sync_regs
movq %rax,%rsp /* switch stack for scheduling */
- testl $_TIF_NEED_RESCHED_MASK,%ebx
+ testl $_TIF_NEED_RESCHED,%ebx
jnz paranoid_schedule
movl %ebx,%edx /* arg3: thread flags */
TRACE_IRQS_ON

View File

@ -1,35 +0,0 @@
Subject: dm: Make rt aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 14 Nov 2011 23:06:09 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
interrupts legitimately enabled here as we cant deadlock against the
irq thread due to the "sleeping spinlocks" conversion.
Reported-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
Cc: stable-rt@vger.kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/md/dm.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1716,14 +1716,14 @@ static void dm_request_fn(struct request
if (map_request(ti, clone, md))
goto requeued;
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
spin_lock(q->queue_lock);
}
goto out;
requeued:
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
spin_lock(q->queue_lock);
delay_and_out:

View File

@ -1,26 +0,0 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:24 -0500
Subject: drivers/net: Use disable_irq_nosync() in 8139too
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Use disable_irq_nosync() instead of disable_irq() as this might be
called in atomic context with netpoll.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/realtek/8139too.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2215,7 +2215,7 @@ static void rtl8139_poll_controller(stru
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
- disable_irq(irq);
+ disable_irq_nosync(irq);
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}

View File

@ -1,127 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 20 Jun 2009 11:36:54 +0200
Subject: drivers/net: fix livelock issues
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro
optimization. The reason is that the softirq thread is rescheduling
itself on that return value. Depending on priorities it starts to
monoplize the CPU and livelock on UP systems.
Remove it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +-----
drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +--
drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +--
drivers/net/ethernet/neterion/s2io.c | 7 +------
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++----
drivers/net/ethernet/tehuti/tehuti.c | 9 ++-------
drivers/net/rionet.c | 6 +-----
7 files changed, 9 insertions(+), 31 deletions(-)
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2217,11 +2217,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
}
tpd_req = atl1c_cal_tpd_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
- if (netif_msg_pktdata(adapter))
- dev_info(&adapter->pdev->dev, "tx locked\n");
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&adapter->tx_lock, flags);
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1883,8 +1883,7 @@ static netdev_tx_t atl1e_xmit_frame(stru
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
- return NETDEV_TX_LOCKED;
+ spin_lock_irqsave(&adapter->tx_lock, flags);
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1663,8 +1663,7 @@ static int t1_sge_tx(struct sk_buff *skb
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
- if (!spin_trylock(&q->lock))
- return NETDEV_TX_LOCKED;
+ spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4089,12 +4089,7 @@ static netdev_tx_t s2io_xmit(struct sk_b
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
- if (do_spin_lock)
- spin_lock_irqsave(&fifo->tx_lock, flags);
- else {
- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&fifo->tx_lock, flags);
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2141,10 +2141,8 @@ static int pch_gbe_xmit_frame(struct sk_
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
+
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struc
unsigned long flags;
ENTER;
- local_irq_save(flags);
- if (!spin_trylock(&priv->tx_lock)) {
- local_irq_restore(flags);
- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
- BDX_DRV_NAME, ndev->name);
- return NETDEV_TX_LOCKED;
- }
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_b
unsigned long flags;
int add_num = 1;
- local_irq_save(flags);
- if (!spin_trylock(&rnet->tx_lock)) {
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&rnet->tx_lock, flags);
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;

View File

@ -1,56 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Apr 2010 20:20:57 +0200
Subject: drivers: net: gianfar: Make RT aware
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
The adjust_link() disables interrupts before taking the queue
locks. On RT those locks are converted to "sleeping" locks and
therefor the local_irq_save/restore must be converted to
local_irq_save/restore_nort.
Reported-by: Xianghua Xiao <xiaoxianghua@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Xianghua Xiao <xiaoxianghua@gmail.com>
---
drivers/net/ethernet/freescale/gianfar.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1716,7 +1716,7 @@ void stop_gfar(struct net_device *dev)
/* Lock it down */
- local_irq_save(flags);
+ local_irq_save_nort(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -1724,7 +1724,7 @@ void stop_gfar(struct net_device *dev)
unlock_rx_qs(priv);
unlock_tx_qs(priv);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
/* Free the IRQs */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -3105,7 +3105,7 @@ static void adjust_link(struct net_devic
struct phy_device *phydev = priv->phydev;
int new_state = 0;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
lock_tx_qs(priv);
if (phydev->link) {
@@ -3179,7 +3179,7 @@ static void adjust_link(struct net_devic
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
unlock_tx_qs(priv);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
/* Update the hash table based on the current list of multicast

View File

@ -1,49 +0,0 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Fri, 3 Jul 2009 08:30:00 -0500
Subject: drivers/net: vortex fix locking issues
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Argh, cut and paste wasn't enough...
Use this patch instead. It needs an irq disable. But, believe it or not,
on SMP this is actually better. If the irq is shared (as it is in Mark's
case), we don't stop the irq of other devices from being handled on
another CPU (unfortunately for Mark, he pinned all interrupts to one CPU).
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
drivers/net/ethernet/3com/3c59x.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -842,9 +842,9 @@ static void poll_vortex(struct net_devic
{
struct vortex_private *vp = netdev_priv(dev);
unsigned long flags;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
#endif
@@ -1917,12 +1917,12 @@ static void vortex_tx_timeout(struct net
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
if (vp->full_bus_master_tx)
boomerang_interrupt(dev->irq, dev);
else
vortex_interrupt(dev->irq, dev);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
}

View File

@ -1,33 +0,0 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:30 -0500
Subject: drivers: random: Reduce preempt disabled region
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
No need to keep preemption disabled across the whole function.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/char/random.c | 3 ---
1 file changed, 3 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -772,8 +772,6 @@ static void add_timer_randomness(struct
} sample;
long delta, delta2, delta3;
- preempt_disable();
-
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -814,7 +812,6 @@ static void add_timer_randomness(struct
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
- preempt_enable();
}
void add_input_randomness(unsigned int type, unsigned int code,

View File

@ -1,55 +0,0 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:01 -0500
Subject: [PATCH] serial: 8250: Clean up the locking for -rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
In -RT the spin_lock_irqsave() does not spin but sleep if the lock is
taken. Before that, local_irq_save() is invoked which disables
interrupts even on -RT. Therefore local_irq_save() + spin_lock() does not
work.
In the ->sysrq and oops_in_progress case it is save to trylock the lock
i.e. this is what we do now anyway except for ->sysrq where we assume
that the lock is already taken.
The spin_lock_irqsave() grabs the lock and disables the interrupts on
vanilla (the same behavior) and on -RT it won't disable interrupts.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[bigeasy: add a patch description]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/tty/serial/8250/8250_core.c | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2882,14 +2882,10 @@ serial8250_console_write(struct console
touch_nmi_watchdog();
- local_irq_save(flags);
- if (port->sysrq) {
- /* serial8250_handle_irq() already took the lock */
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
- } else
- spin_lock(&port->lock);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
/*
* First save the IER then disable the interrupts
@@ -2921,8 +2917,7 @@ serial8250_console_write(struct console
serial8250_modem_status(up);
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int __init serial8250_console_setup(struct console *co, char *options)

View File

@ -1,39 +0,0 @@
Subject: drivers-tty-fix-omap-lock-crap.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 28 Jul 2011 13:32:57 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/tty/serial/omap-serial.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1266,13 +1266,10 @@ serial_omap_console_write(struct console
pm_runtime_get_sync(up->dev);
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
else
- spin_lock(&up->port.lock);
+ spin_lock_irqsave(&up->port.lock, flags);
/*
* First save the IER then disable the interrupts
@@ -1301,8 +1298,7 @@ serial_omap_console_write(struct console
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init

View File

@ -1,45 +0,0 @@
Subject: drivers-tty-pl011-irq-disable-madness.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 08 Jan 2013 21:36:51 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/tty/serial/amba-pl011.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1919,13 +1919,19 @@ pl011_console_write(struct console *co,
clk_enable(uap->clk);
- local_irq_save(flags);
+ /*
+ * local_irq_save(flags);
+ *
+ * This local_irq_save() is nonsense. If we come in via sysrq
+ * handling then interrupts are already disabled. Aside of
+ * that the port.sysrq check is racy on SMP regardless.
+ */
if (uap->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&uap->port.lock);
+ locked = spin_trylock_irqsave(&uap->port.lock, flags);
else
- spin_lock(&uap->port.lock);
+ spin_lock_irqsave(&uap->port.lock, flags);
/*
* First save the CR then disable the interrupts
@@ -1947,8 +1953,7 @@ pl011_console_write(struct console *co,
writew(old_cr, uap->port.membase + UART011_CR);
if (locked)
- spin_unlock(&uap->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
clk_disable(uap->clk);
}

View File

@ -1,60 +0,0 @@
From d841118ac80c5bfb18f47984bc40687eed08b714 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 25 Apr 2013 18:12:52 +0200
Subject: [PATCH] drm/i915: drop trace_i915_gem_ring_dispatch on rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
This tracepoint is responsible for:
|[<814cc358>] __schedule_bug+0x4d/0x59
|[<814d24cc>] __schedule+0x88c/0x930
|[<814d3b90>] ? _raw_spin_unlock_irqrestore+0x40/0x50
|[<814d3b95>] ? _raw_spin_unlock_irqrestore+0x45/0x50
|[<810b57b5>] ? task_blocks_on_rt_mutex+0x1f5/0x250
|[<814d27d9>] schedule+0x29/0x70
|[<814d3423>] rt_spin_lock_slowlock+0x15b/0x278
|[<814d3786>] rt_spin_lock+0x26/0x30
|[<a00dced9>] gen6_gt_force_wake_get+0x29/0x60 [i915]
|[<a00e183f>] gen6_ring_get_irq+0x5f/0x100 [i915]
|[<a00b2a33>] ftrace_raw_event_i915_gem_ring_dispatch+0xe3/0x100 [i915]
|[<a00ac1b3>] i915_gem_do_execbuffer.isra.13+0xbd3/0x1430 [i915]
|[<810f8943>] ? trace_buffer_unlock_commit+0x43/0x60
|[<8113e8d2>] ? ftrace_raw_event_kmem_alloc+0xd2/0x180
|[<8101d063>] ? native_sched_clock+0x13/0x80
|[<a00acf29>] i915_gem_execbuffer2+0x99/0x280 [i915]
|[<a00114a3>] drm_ioctl+0x4c3/0x570 [drm]
|[<8101d0d9>] ? sched_clock+0x9/0x10
|[<a00ace90>] ? i915_gem_execbuffer+0x480/0x480 [i915]
|[<810f1c18>] ? rb_commit+0x68/0xa0
|[<810f1c6c>] ? ring_buffer_unlock_commit+0x1c/0xa0
|[<81197467>] do_vfs_ioctl+0x97/0x540
|[<81021318>] ? ftrace_raw_event_sys_enter+0xd8/0x130
|[<811979a1>] sys_ioctl+0x91/0xb0
|[<814db931>] tracesys+0xe1/0xe6
Chris Wilson does not like to move i915_trace_irq_get() out of the macro
|No. This enables the IRQ, as well as making a number of
|very expensively serialised read, unconditionally.
so it is gone now on RT.
Cc: stable-rt@vger.kernel.org
Reported-by: Joakim Hernberg <jbh@alchemy.lu>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 ++
1 file changed, 2 insertions(+)
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1243,7 +1243,9 @@ i915_gem_do_execbuffer(struct drm_device
goto err;
}
+#ifndef CONFIG_PREEMPT_RT_BASE
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+#endif
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);

View File

@ -1,45 +0,0 @@
Subject: early-printk-consolidate.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 23 Jul 2011 11:04:08 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/sparc/kernel/setup_32.c | 1 +
arch/sparc/kernel/setup_64.c | 8 +++++++-
2 files changed, 8 insertions(+), 1 deletion(-)
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -309,6 +309,7 @@ void __init setup_arch(char **cmdline_p)
boot_flags_init(*cmdline_p);
+ early_console = &prom_early_console;
register_console(&prom_early_console);
printk("ARCH: ");
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -555,6 +555,12 @@ static void __init init_sparc64_elf_hwca
pause_patch();
}
+static inline void register_prom_console(void)
+{
+ early_console = &prom_early_console;
+ register_console(&prom_early_console);
+}
+
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
@@ -566,7 +572,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_EARLYFB
if (btext_find_display())
#endif
- register_console(&prom_early_console);
+ register_prom_console();
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");

View File

@ -1,27 +0,0 @@
Subject: epoll.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 16:35:35 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/eventpoll.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *pri
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
- int this_cpu = get_cpu();
+ int this_cpu = get_cpu_light();
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
- put_cpu();
+ put_cpu_light();
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)

View File

@ -1,23 +0,0 @@
Subject: filemap-fix-up.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 17 Jun 2011 18:56:24 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/n/tip-m6yuzd6ul717hlnl2gj6p3ou@git.kernel.org
---
mm/filemap.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1974,7 +1974,7 @@ size_t iov_iter_copy_from_user_atomic(st
char *kaddr;
size_t copied;
- BUG_ON(!in_atomic());
+ BUG_ON(!pagefault_disabled());
kaddr = kmap_atomic(page);
if (likely(i->nr_segs == 1)) {
int left;

View File

@ -1,113 +0,0 @@
From: Steven Rostedt <rostedt@goodmis.org>
Subject: x86: Do not disable preemption in int3 on 32bit
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Preemption must be disabled before enabling interrupts in do_trap
on x86_64 because the stack in use for int3 and debug is a per CPU
stack set by th IST. But 32bit does not have an IST and the stack
still belongs to the current task and there is no problem in scheduling
out the task.
Keep preemption enabled on X86_32 when enabling interrupts for
do_trap().
The name of the function is changed from preempt_conditional_sti/cli()
to conditional_sti/cli_ist(), to annotate that this function is used
when the stack is on the IST.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++---------
1 file changed, 23 insertions(+), 9 deletions(-)
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -86,9 +86,21 @@ static inline void conditional_sti(struc
local_irq_enable();
}
-static inline void preempt_conditional_sti(struct pt_regs *regs)
+static inline void conditional_sti_ist(struct pt_regs *regs)
{
+#ifdef CONFIG_X86_64
+ /*
+ * X86_64 uses a per CPU stack on the IST for certain traps
+ * like int3. The task can not be preempted when using one
+ * of these stacks, thus preemption must be disabled, otherwise
+ * the stack can be corrupted if the task is scheduled out,
+ * and another task comes in and uses this stack.
+ *
+ * On x86_32 the task keeps its own stack and it is OK if the
+ * task schedules out.
+ */
preempt_count_inc();
+#endif
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
@@ -99,11 +111,13 @@ static inline void conditional_cli(struc
local_irq_disable();
}
-static inline void preempt_conditional_cli(struct pt_regs *regs)
+static inline void conditional_cli_ist(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_disable();
+#ifdef CONFIG_X86_64
preempt_count_dec();
+#endif
}
static int __kprobes
@@ -232,9 +246,9 @@ dotraplinkage void do_stack_segment(stru
prev_state = exception_enter();
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
- preempt_conditional_sti(regs);
+ conditional_sti_ist(regs);
do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
- preempt_conditional_cli(regs);
+ conditional_cli_ist(regs);
}
exception_exit(prev_state);
}
@@ -343,9 +357,9 @@ dotraplinkage void __kprobes notrace do_
* as we may switch to the interrupt stack.
*/
debug_stack_usage_inc();
- preempt_conditional_sti(regs);
+ conditional_sti_ist(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
- preempt_conditional_cli(regs);
+ conditional_cli_ist(regs);
debug_stack_usage_dec();
exit:
exception_exit(prev_state);
@@ -451,12 +465,12 @@ dotraplinkage void __kprobes do_debug(st
debug_stack_usage_inc();
/* It's safe to allow irq's after DR6 has been saved */
- preempt_conditional_sti(regs);
+ conditional_sti_ist(regs);
if (regs->flags & X86_VM_MASK) {
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
X86_TRAP_DB);
- preempt_conditional_cli(regs);
+ conditional_cli_ist(regs);
debug_stack_usage_dec();
goto exit;
}
@@ -476,7 +490,7 @@ dotraplinkage void __kprobes do_debug(st
si_code = get_si_code(tsk->thread.debugreg6);
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
send_sigtrap(tsk, regs, error_code, si_code);
- preempt_conditional_cli(regs);
+ conditional_cli_ist(regs);
debug_stack_usage_dec();
exit:

View File

@ -1,55 +0,0 @@
From 53a9508f5983092928b0e6e12f400b686e1f04b1 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 28 Oct 2013 11:50:06 +0100
Subject: [PATCH] a few open coded completions
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/net/wireless/orinoco/orinoco_usb.c | 2 +-
drivers/usb/gadget/f_fs.c | 2 +-
drivers/usb/gadget/inode.c | 4 ++--
3 files changed, 4 insertions(+), 4 deletions(-)
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -713,7 +713,7 @@ static void ezusb_req_ctx_wait(struct ez
while (!ctx->done.done && msecs--)
udelay(1000);
} else {
- wait_event_interruptible(ctx->done.wait,
+ swait_event_interruptible(ctx->done.wait,
ctx->done.done);
}
break;
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1120,7 +1120,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
- waitqueue_active(&ffs->ep0req_completion.wait));
+ swaitqueue_active(&ffs->ep0req_completion.wait));
kfree(ffs->dev_name);
kfree(ffs);
}
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -340,7 +340,7 @@ ep_io (struct ep_data *epdata, void *buf
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
- value = wait_event_interruptible (done.wait, done.done);
+ value = swait_event_interruptible (done.wait, done.done);
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
@@ -349,7 +349,7 @@ ep_io (struct ep_data *epdata, void *buf
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
- wait_event (done.wait, done.done);
+ swait_event (done.wait, done.done);
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {

View File

@ -1,21 +0,0 @@
Subject: fs-block-rt-support.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 14 Jun 2011 17:05:09 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
block/blk-core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -197,7 +197,7 @@ EXPORT_SYMBOL(blk_delay_queue);
**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
+ WARN_ON_NONRT(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);

View File

@ -1,86 +0,0 @@
Subject: fs: dcache: Use cpu_chill() in trylock loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:00:34 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
fs/autofs4/autofs_i.h | 1 +
fs/autofs4/expire.c | 2 +-
fs/dcache.c | 5 +++--
fs/namespace.c | 3 ++-
4 files changed, 7 insertions(+), 4 deletions(-)
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <asm/current.h>
#include <asm/uaccess.h>
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -157,7 +157,7 @@ static struct dentry *get_next_positive_
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
- cpu_relax();
+ cpu_chill();
goto relock;
}
spin_unlock(&p->d_lock);
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
@@ -517,7 +518,7 @@ dentry_kill(struct dentry *dentry, int u
relock:
if (unlock_on_failure) {
spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
}
return dentry; /* try again with same dentry */
}
@@ -2379,7 +2380,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <linux/security.h>
#include <linux/idr.h>
#include <linux/acct.h> /* acct_auto_close_mnt */
@@ -345,7 +346,7 @@ int __mnt_want_write(struct vfsmount *m)
smp_mb();
while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
preempt_enable();
- cpu_relax();
+ cpu_chill();
preempt_disable();
}
/*

View File

@ -1,30 +0,0 @@
From: Mike Galbraith <mgalbraith@suse.de>
Date: Wed, 11 Jul 2012 22:05:20 +0000
Subject: fs, jbd: pull your plug when waiting for space
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
With an -rt kernel, and a heavy sync IO load, tasks can jam
up on journal locks without unplugging, which can lead to
terminal IO starvation. Unplug and schedule when waiting for space.
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Theodore Tso <tytso@mit.edu>
Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/jbd/checkpoint.c | 2 ++
1 file changed, 2 insertions(+)
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *jou
if (journal->j_flags & JFS_ABORT)
return;
spin_unlock(&journal->j_state_lock);
+ if (current->plug)
+ io_schedule();
mutex_lock(&journal->j_checkpoint_mutex);
/*

View File

@ -1,101 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 10:11:25 +0100
Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
bit_spin_locks break under RT.
Based on a previous patch from Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--
include/linux/buffer_head.h | 10 ++++++++++
include/linux/jbd_common.h | 24 ++++++++++++++++++++++++
2 files changed, 34 insertions(+)
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -77,6 +77,11 @@ struct buffer_head {
atomic_t b_count; /* users using this buffer_head */
#ifdef CONFIG_PREEMPT_RT_BASE
spinlock_t b_uptodate_lock;
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
+ spinlock_t b_state_lock;
+ spinlock_t b_journal_head_lock;
+#endif
#endif
};
@@ -108,6 +113,11 @@ static inline void buffer_head_init_lock
{
#ifdef CONFIG_PREEMPT_RT_BASE
spin_lock_init(&bh->b_uptodate_lock);
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
+ spin_lock_init(&bh->b_state_lock);
+ spin_lock_init(&bh->b_journal_head_lock);
+#endif
#endif
}
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -15,32 +15,56 @@ static inline struct journal_head *bh2jh
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_State, &bh->b_state);
+#else
+ spin_lock(&bh->b_state_lock);
+#endif
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_trylock(BH_State, &bh->b_state);
+#else
+ return spin_trylock(&bh->b_state_lock);
+#endif
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_is_locked(BH_State, &bh->b_state);
+#else
+ return spin_is_locked(&bh->b_state_lock);
+#endif
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_State, &bh->b_state);
+#else
+ spin_unlock(&bh->b_state_lock);
+#endif
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_JournalHead, &bh->b_state);
+#else
+ spin_lock(&bh->b_journal_head_lock);
+#endif
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_JournalHead, &bh->b_state);
+#else
+ spin_unlock(&bh->b_journal_head_lock);
+#endif
}
#endif

View File

@ -1,33 +0,0 @@
From c28e07715162bb1e1567a935b45772ca85a5267c Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 17 Feb 2014 17:30:03 +0100
Subject: [PATCH] fs: jbd2: pull your plug when waiting for space
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Two cps in parallel managed to stall the the ext4 fs. It seems that
journal code is either waiting for locks or sleeping waiting for
something to happen. This seems similar to what Mike observed on ext3,
here is his description:
|With an -rt kernel, and a heavy sync IO load, tasks can jam
|up on journal locks without unplugging, which can lead to
|terminal IO starvation. Unplug and schedule when waiting
|for space.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
fs/jbd2/checkpoint.c | 2 ++
1 file changed, 2 insertions(+)
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -125,6 +125,8 @@ void __jbd2_log_wait_for_space(journal_t
if (journal->j_flags & JBD2_ABORT)
return;
write_unlock(&journal->j_state_lock);
+ if (current->plug)
+ io_schedule();
mutex_lock(&journal->j_checkpoint_mutex);
/*

View File

@ -1,31 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 19 Jul 2009 08:44:27 -0500
Subject: fs: namespace preemption fix
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
On RT we cannot loop with preemption disabled here as
mnt_make_readonly() might have been preempted. We can safely enable
preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT
as well.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/namespace.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -343,8 +343,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
+ preempt_enable();
cpu_relax();
+ preempt_disable();
+ }
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until

View File

@ -1,60 +0,0 @@
From: Mike Galbraith <efault@gmx.de>
Date: Fri, 3 Jul 2009 08:44:12 -0500
Subject: fs: ntfs: disable interrupt only on !RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
> * Nick Piggin <nickpiggin@yahoo.com.au> wrote:
>
> > > [10138.175796] [<c0105de3>] show_trace+0x12/0x14
> > > [10138.180291] [<c0105dfb>] dump_stack+0x16/0x18
> > > [10138.184769] [<c011609f>] native_smp_call_function_mask+0x138/0x13d
> > > [10138.191117] [<c0117606>] smp_call_function+0x1e/0x24
> > > [10138.196210] [<c012f85c>] on_each_cpu+0x25/0x50
> > > [10138.200807] [<c0115c74>] flush_tlb_all+0x1e/0x20
> > > [10138.205553] [<c016caaf>] kmap_high+0x1b6/0x417
> > > [10138.210118] [<c011ec88>] kmap+0x4d/0x4f
> > > [10138.214102] [<c026a9d8>] ntfs_end_buffer_async_read+0x228/0x2f9
> > > [10138.220163] [<c01a0e9e>] end_bio_bh_io_sync+0x26/0x3f
> > > [10138.225352] [<c01a2b09>] bio_endio+0x42/0x6d
> > > [10138.229769] [<c02c2a08>] __end_that_request_first+0x115/0x4ac
> > > [10138.235682] [<c02c2da7>] end_that_request_chunk+0x8/0xa
> > > [10138.241052] [<c0365943>] ide_end_request+0x55/0x10a
> > > [10138.246058] [<c036dae3>] ide_dma_intr+0x6f/0xac
> > > [10138.250727] [<c0366d83>] ide_intr+0x93/0x1e0
> > > [10138.255125] [<c015afb4>] handle_IRQ_event+0x5c/0xc9
> >
> > Looks like ntfs is kmap()ing from interrupt context. Should be using
> > kmap_atomic instead, I think.
>
> it's not atomic interrupt context but irq thread context - and -rt
> remaps kmap_atomic() to kmap() internally.
Hm. Looking at the change to mm/bounce.c, perhaps I should do this
instead?
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/ntfs/aops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- local_irq_save(flags);
+ local_irq_save_nort(flags);
kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
kunmap_atomic(kaddr);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);

View File

@ -1,162 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 09:18:52 +0100
Subject: buffer_head: Replace bh_uptodate_lock for -rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Wrap the bit_spin_lock calls into a separate inline and add the RT
replacements with a real spinlock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/buffer.c | 21 +++++++--------------
fs/ntfs/aops.c | 10 +++-------
include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 44 insertions(+), 21 deletions(-)
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -322,8 +322,7 @@ static void end_buffer_async_read(struct
* decide that the page is now completely done.
*/
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -336,8 +335,7 @@ static void end_buffer_async_read(struct
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
/*
* If none of the buffers had errors and they are all
@@ -349,9 +347,7 @@ static void end_buffer_async_read(struct
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
/*
@@ -385,8 +381,7 @@ void end_buffer_async_write(struct buffe
}
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_write(bh);
unlock_buffer(bh);
@@ -398,15 +393,12 @@ void end_buffer_async_write(struct buffe
}
tmp = tmp->b_this_page;
}
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
end_page_writeback(page);
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
EXPORT_SYMBOL(end_buffer_async_write);
@@ -3336,6 +3328,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
+ buffer_head_init_locks(ret);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s
unlock_page(page);
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
/**
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -75,8 +75,42 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spinlock_t b_uptodate_lock;
+#endif
};
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
+{
+ unsigned long flags;
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
+#else
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
+#endif
+ return flags;
+}
+
+static inline void
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
+ local_irq_restore(flags);
+#else
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
+#endif
+}
+
+static inline void buffer_head_init_locks(struct buffer_head *bh)
+{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spin_lock_init(&bh->b_uptodate_lock);
+#endif
+}
+
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.

View File

@ -1,82 +0,0 @@
Subject: ftrace-migrate-disable-tracing.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:56:42 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/ftrace_event.h | 2 ++
kernel/trace/trace.c | 11 +++++++----
kernel/trace/trace_events.c | 1 +
kernel/trace/trace_output.c | 5 +++++
4 files changed, 15 insertions(+), 4 deletions(-)
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -57,6 +57,8 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
+ unsigned short migrate_disable;
+ unsigned short padding;
};
#define FTRACE_MAX_EVENT \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -462,7 +462,7 @@ int __trace_puts(unsigned long ip, const
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
- event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
irq_flags, preempt_count());
if (!event)
return 0;
@@ -1552,6 +1552,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
+
+ entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0;
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
@@ -2462,9 +2464,10 @@ static void print_lat_help_header(struct
seq_puts(m, "# | / _----=> need-resched \n");
seq_puts(m, "# || / _---=> hardirq/softirq \n");
seq_puts(m, "# ||| / _--=> preempt-depth \n");
- seq_puts(m, "# |||| / delay \n");
- seq_puts(m, "# cmd pid ||||| time | caller \n");
- seq_puts(m, "# \\ / ||||| \\ | / \n");
+ seq_puts(m, "# |||| / _--=> migrate-disable\n");
+ seq_puts(m, "# ||||| / delay \n");
+ seq_puts(m, "# cmd pid |||||| time | caller \n");
+ seq_puts(m, "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -160,6 +160,7 @@ static int trace_define_common_fields(vo
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
+ __common_field(unsigned short, migrate_disable);
return ret;
}
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -650,6 +650,11 @@ int trace_print_lat_fmt(struct trace_seq
else
ret = trace_seq_putc(s, '.');
+ if (entry->migrate_disable)
+ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
+ else
+ ret = trace_seq_putc(s, '.');
+
return ret;
}

View File

@ -1,113 +0,0 @@
From: Steven Rostedt <rostedt@goodmis.org>
Subject: futex: Fix bug on when a requeued RT task times out
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Requeue with timeout causes a bug with PREEMPT_RT_FULL.
The bug comes from a timed out condition.
TASK 1 TASK 2
------ ------
futex_wait_requeue_pi()
futex_wait_queue_me()
<timed out>
double_lock_hb();
raw_spin_lock(pi_lock);
if (current->pi_blocked_on) {
} else {
current->pi_blocked_on = PI_WAKE_INPROGRESS;
run_spin_unlock(pi_lock);
spin_lock(hb->lock); <-- blocked!
plist_for_each_entry_safe(this) {
rt_mutex_start_proxy_lock();
task_blocks_on_rt_mutex();
BUG_ON(task->pi_blocked_on)!!!!
The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the
problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to
grab the hb->lock, which it fails to do so. As the hb->lock is a mutex,
it will block and set the "pi_blocked_on" to the hb->lock.
When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails
because the task1's pi_blocked_on is no longer set to that, but instead,
set to the hb->lock.
The fix:
When calling rt_mutex_start_proxy_lock() a check is made to see
if the proxy tasks pi_blocked_on is set. If so, exit out early.
Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies
the proxy task that it is being requeued, and will handle things
appropriately.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/locking/rtmutex.c | 32 +++++++++++++++++++++++++++++++-
kernel/locking/rtmutex_common.h | 1 +
2 files changed, 32 insertions(+), 1 deletion(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -71,7 +71,8 @@ static void fixup_rt_mutex_waiters(struc
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
{
- return waiter && waiter != PI_WAKEUP_INPROGRESS;
+ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
+ waiter != PI_REQUEUE_INPROGRESS;
}
/*
@@ -1110,6 +1111,35 @@ int rt_mutex_start_proxy_lock(struct rt_
return 1;
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * In PREEMPT_RT there's an added race.
+ * If the task, that we are about to requeue, times out,
+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
+ * to skip this task. But right after the task sets
+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
+ * This will replace the PI_WAKEUP_INPROGRESS with the actual
+ * lock that it blocks on. We *must not* place this task
+ * on this proxy lock in that case.
+ *
+ * To prevent this race, we first take the task's pi_lock
+ * and check if it has updated its pi_blocked_on. If it has,
+ * we assume that it woke up and we return -EAGAIN.
+ * Otherwise, we set the task's pi_blocked_on to
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
+ * it will know that we are in the process of requeuing it.
+ */
+ raw_spin_lock_irq(&task->pi_lock);
+ if (task->pi_blocked_on) {
+ raw_spin_unlock_irq(&task->pi_lock);
+ raw_spin_unlock(&lock->wait_lock);
+ return -EAGAIN;
+ }
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
+ raw_spin_unlock_irq(&task->pi_lock);
+#endif
+
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
if (ret && !rt_mutex_owner(lock)) {
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -105,6 +105,7 @@ static inline struct task_struct *rt_mut
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,

View File

@ -1,38 +0,0 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
Subject: genirq: disable irqpoll on -rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Creates long latencies for no value
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/irq/spurious.c | 8 ++++++++
1 file changed, 8 insertions(+)
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -346,6 +346,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
static int __init irqfixup_setup(char *str)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
+ return 1;
+#endif
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
@@ -358,6 +362,10 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
+ return 1;
+#endif
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");

View File

@ -1,146 +0,0 @@
From 76666dbbdd40e963e7df84c123fc9aea4a2bcc69 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 21 Aug 2013 17:48:46 +0200
Subject: [PATCH] genirq: do not invoke the affinity callback via a workqueue
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Joe Korty reported, that __irq_set_affinity_locked() schedules a
workqueue while holding a rawlock which results in a might_sleep()
warning.
This patch moves the invokation into a process context so that we only
wakeup() a process while holding the lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/interrupt.h | 1
kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 77 insertions(+), 3 deletions(-)
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -224,6 +224,7 @@ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
+ struct list_head list;
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -164,6 +164,62 @@ int irq_do_set_affinity(struct irq_data
return ret;
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
+static struct task_struct *set_affinity_helper;
+static LIST_HEAD(affinity_list);
+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
+
+static int set_affinity_thread(void *unused)
+{
+ while (1) {
+ struct irq_affinity_notify *notify;
+ int empty;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ raw_spin_lock_irq(&affinity_list_lock);
+ empty = list_empty(&affinity_list);
+ raw_spin_unlock_irq(&affinity_list_lock);
+
+ if (empty)
+ schedule();
+ if (kthread_should_stop())
+ break;
+ set_current_state(TASK_RUNNING);
+try_next:
+ notify = NULL;
+
+ raw_spin_lock_irq(&affinity_list_lock);
+ if (!list_empty(&affinity_list)) {
+ notify = list_first_entry(&affinity_list,
+ struct irq_affinity_notify, list);
+ list_del_init(&notify->list);
+ }
+ raw_spin_unlock_irq(&affinity_list_lock);
+
+ if (!notify)
+ continue;
+ _irq_affinity_notify(notify);
+ goto try_next;
+ }
+ return 0;
+}
+
+static void init_helper_thread(void)
+{
+ if (set_affinity_helper)
+ return;
+ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
+ "affinity-cb");
+ WARN_ON(IS_ERR(set_affinity_helper));
+}
+#else
+
+static inline void init_helper_thread(void) { }
+
+#endif
+
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -182,7 +238,17 @@ int __irq_set_affinity_locked(struct irq
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ raw_spin_lock(&affinity_list_lock);
+ if (list_empty(&desc->affinity_notify->list))
+ list_add_tail(&affinity_list,
+ &desc->affinity_notify->list);
+ raw_spin_unlock(&affinity_list_lock);
+ wake_up_process(set_affinity_helper);
+#else
schedule_work(&desc->affinity_notify->work);
+#endif
}
irqd_set(data, IRQD_AFFINITY_SET);
@@ -223,10 +289,8 @@ int irq_set_affinity_hint(unsigned int i
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-static void irq_affinity_notify(struct work_struct *work)
+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
{
- struct irq_affinity_notify *notify =
- container_of(work, struct irq_affinity_notify, work);
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
@@ -248,6 +312,13 @@ static void irq_affinity_notify(struct w
kref_put(&notify->kref, notify->release);
}
+static void irq_affinity_notify(struct work_struct *work)
+{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
+ _irq_affinity_notify(notify);
+}
+
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
@@ -277,6 +348,8 @@ irq_set_affinity_notifier(unsigned int i
notify->irq = irq;
kref_init(&notify->kref);
INIT_WORK(&notify->work, irq_affinity_notify);
+ INIT_LIST_HEAD(&notify->list);
+ init_helper_thread();
}
raw_spin_lock_irqsave(&desc->lock, flags);

View File

@ -1,46 +0,0 @@
Subject: genirq-force-threading.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 03 Apr 2011 11:57:29 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/interrupt.h | 6 +++++-
kernel/irq/manage.c | 2 ++
2 files changed, 7 insertions(+), 1 deletion(-)
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -318,9 +318,13 @@ static inline int disable_irq_wake(unsig
#ifdef CONFIG_IRQ_FORCED_THREADING
+# ifndef CONFIG_PREEMPT_RT_BASE
extern bool force_irqthreads;
+# else
+# define force_irqthreads (true)
+# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads (false)
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -22,6 +22,7 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
+# ifndef CONFIG_PREEMPT_RT_BASE
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
@@ -30,6 +31,7 @@ static int __init setup_forced_irqthread
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
+# endif
#endif
/**

View File

@ -1,21 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 18 Mar 2011 10:22:04 +0100
Subject: genirq: Disable DEBUG_SHIRQ for rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/Kconfig.debug | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -606,7 +606,7 @@ endmenu # "Memory Debugging"
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !PREEMPT_RT_BASE
help
Enable this to generate a spurious interrupt as soon as a shared
interrupt handler is registered, and just before one is deregistered.

View File

@ -1,205 +0,0 @@
Subject: hotplug: Lightweight get online cpus
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 15 Jun 2011 12:36:06 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
get_online_cpus() is a heavy weight function which involves a global
mutex. migrate_disable() wants a simpler construct which prevents only
a CPU from going doing while a task is in a migrate disabled section.
Implement a per cpu lockless mechanism, which serializes only in the
real unplug case on a global mutex. That serialization affects only
tasks on the cpu which should be brought down.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 4 +
kernel/cpu.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 124 insertions(+), 2 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -182,6 +182,8 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
+extern void pin_current_cpu(void);
+extern void unpin_current_cpu(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -196,6 +198,8 @@ static inline void cpu_hotplug_done(void
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
+static inline void pin_current_cpu(void) { }
+static inline void unpin_current_cpu(void) { }
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -63,6 +63,101 @@ static struct {
.refcount = 0,
};
+struct hotplug_pcp {
+ struct task_struct *unplug;
+ int refcount;
+ struct completion synced;
+};
+
+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+
+/**
+ * pin_current_cpu - Prevent the current cpu from being unplugged
+ *
+ * Lightweight version of get_online_cpus() to prevent cpu from being
+ * unplugged when code runs in a migration disabled region.
+ *
+ * Must be called with preemption disabled (preempt_count = 1)!
+ */
+void pin_current_cpu(void)
+{
+ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
+
+retry:
+ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
+ hp->unplug == current) {
+ hp->refcount++;
+ return;
+ }
+ preempt_enable();
+ mutex_lock(&cpu_hotplug.lock);
+ mutex_unlock(&cpu_hotplug.lock);
+ preempt_disable();
+ goto retry;
+}
+
+/**
+ * unpin_current_cpu - Allow unplug of current cpu
+ *
+ * Must be called with preemption or interrupts disabled!
+ */
+void unpin_current_cpu(void)
+{
+ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
+
+ WARN_ON(hp->refcount <= 0);
+
+ /* This is safe. sync_unplug_thread is pinned to this cpu */
+ if (!--hp->refcount && hp->unplug && hp->unplug != current)
+ wake_up_process(hp->unplug);
+}
+
+/*
+ * FIXME: Is this really correct under all circumstances ?
+ */
+static int sync_unplug_thread(void *data)
+{
+ struct hotplug_pcp *hp = data;
+
+ preempt_disable();
+ hp->unplug = current;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (hp->refcount) {
+ schedule_preempt_disabled();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+ preempt_enable();
+ complete(&hp->synced);
+ return 0;
+}
+
+/*
+ * Start the sync_unplug_thread on the target cpu and wait for it to
+ * complete.
+ */
+static int cpu_unplug_begin(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+ struct task_struct *tsk;
+
+ init_completion(&hp->synced);
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
+ if (IS_ERR(tsk))
+ return (PTR_ERR(tsk));
+ kthread_bind(tsk, cpu);
+ wake_up_process(tsk);
+ wait_for_completion(&hp->synced);
+ return 0;
+}
+
+static void cpu_unplug_done(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ hp->unplug = NULL;
+}
+
void get_online_cpus(void)
{
might_sleep();
@@ -282,13 +377,14 @@ static int __ref take_cpu_down(void *_pa
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
- int err, nr_calls = 0;
+ int mycpu, err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
.mod = mod,
.hcpu = hcpu,
};
+ cpumask_var_t cpumask;
if (num_online_cpus() == 1)
return -EBUSY;
@@ -296,7 +392,27 @@ static int __ref _cpu_down(unsigned int
if (!cpu_online(cpu))
return -EINVAL;
+ /* Move the downtaker off the unplug cpu */
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
+ set_cpus_allowed_ptr(current, cpumask);
+ free_cpumask_var(cpumask);
+ preempt_disable();
+ mycpu = smp_processor_id();
+ if (mycpu == cpu) {
+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
+ preempt_enable();
+ return -EBUSY;
+ }
+ preempt_enable();
+
cpu_hotplug_begin();
+ err = cpu_unplug_begin(cpu);
+ if (err) {
+ printk("cpu_unplug_begin(%d) failed\n", cpu);
+ goto out_cancel;
+ }
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
@@ -333,7 +449,7 @@ static int __ref _cpu_down(unsigned int
/* CPU didn't die: tell everyone. Can't complain. */
smpboot_unpark_threads(cpu);
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
- goto out_release;
+ goto out_cancel;
}
BUG_ON(cpu_online(cpu));
@@ -356,6 +472,8 @@ static int __ref _cpu_down(unsigned int
check_for_tasks(cpu);
out_release:
+ cpu_unplug_done(cpu);
+out_cancel:
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);

View File

@ -1,25 +0,0 @@
Subject: hotplug: sync_unplug: No "\n" in task name
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Sun, 16 Oct 2011 18:56:43 +0800
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Otherwise the output will look a little odd.
Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -142,7 +142,7 @@ static int cpu_unplug_begin(unsigned int
struct task_struct *tsk;
init_completion(&hp->synced);
- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
if (IS_ERR(tsk))
return (PTR_ERR(tsk));
kthread_bind(tsk, cpu);

View File

@ -1,37 +0,0 @@
Subject: hotplug-use-migrate-disable.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 19:35:29 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/cpu.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -400,14 +400,13 @@ static int __ref _cpu_down(unsigned int
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
- preempt_disable();
+ migrate_disable();
mycpu = smp_processor_id();
if (mycpu == cpu) {
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
- preempt_enable();
+ migrate_enable();
return -EBUSY;
}
- preempt_enable();
cpu_hotplug_begin();
err = cpu_unplug_begin(cpu);
@@ -476,6 +475,7 @@ static int __ref _cpu_down(unsigned int
out_release:
cpu_unplug_done(cpu);
out_cancel:
+ migrate_enable();
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);

View File

@ -1,119 +0,0 @@
From 180cdb93d796bf52c919f5e3df30af83aa6d46ca Mon Sep 17 00:00:00 2001
From: Yang Shi <yang.shi@windriver.com>
Date: Mon, 16 Sep 2013 14:09:19 -0700
Subject: [PATCH] hrtimer: Move schedule_work call to helper thread
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
When run ltp leapsec_timer test, the following call trace is caught:
BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: swapper/1
Preemption disabled at:[<ffffffff810857f3>] cpu_startup_entry+0x133/0x310
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.10.10-rt3 #2
Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010
ffffffff81c2f800 ffff880076843e40 ffffffff8169918d ffff880076843e58
ffffffff8106db31 ffff88007684b4a0 ffff880076843e70 ffffffff8169d9c0
ffff88007684b4a0 ffff880076843eb0 ffffffff81059da1 0000001876851200
Call Trace:
<IRQ> [<ffffffff8169918d>] dump_stack+0x19/0x1b
[<ffffffff8106db31>] __might_sleep+0xf1/0x170
[<ffffffff8169d9c0>] rt_spin_lock+0x20/0x50
[<ffffffff81059da1>] queue_work_on+0x61/0x100
[<ffffffff81065aa1>] clock_was_set_delayed+0x21/0x30
[<ffffffff810883be>] do_timer+0x40e/0x660
[<ffffffff8108f487>] tick_do_update_jiffies64+0xf7/0x140
[<ffffffff8108fe42>] tick_check_idle+0x92/0xc0
[<ffffffff81044327>] irq_enter+0x57/0x70
[<ffffffff816a040e>] smp_apic_timer_interrupt+0x3e/0x9b
[<ffffffff8169f80a>] apic_timer_interrupt+0x6a/0x70
<EOI> [<ffffffff8155ea1c>] ? cpuidle_enter_state+0x4c/0xc0
[<ffffffff8155eb68>] cpuidle_idle_call+0xd8/0x2d0
[<ffffffff8100b59e>] arch_cpu_idle+0xe/0x30
[<ffffffff8108585e>] cpu_startup_entry+0x19e/0x310
[<ffffffff8168efa2>] start_secondary+0x1ad/0x1b0
The clock_was_set_delayed is called in hard IRQ handler (timer interrupt), which
calls schedule_work.
Under PREEMPT_RT_FULL, schedule_work calls spinlocks which could sleep, so it's
not safe to call schedule_work in interrupt context.
Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca
(rt,ntp: Move call to schedule_delayed_work() to helper thread)
from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which
makes a similar change.
add a helper thread which does the call to schedule_work and wake up that
thread instead of calling schedule_work directly.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Yang Shi <yang.shi@windriver.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/hrtimer.c | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -48,6 +48,7 @@
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
#include <linux/timer.h>
+#include <linux/kthread.h>
#include <linux/freezer.h>
#include <asm/uaccess.h>
@@ -742,6 +743,44 @@ static void clock_was_set_work(struct wo
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * RT can not call schedule_work from real interrupt context.
+ * Need to make a thread to do the real work.
+ */
+static struct task_struct *clock_set_delay_thread;
+static bool do_clock_set_delay;
+
+static int run_clock_set_delay(void *ignore)
+{
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (do_clock_set_delay) {
+ do_clock_set_delay = false;
+ schedule_work(&hrtimer_work);
+ }
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+void clock_was_set_delayed(void)
+{
+ do_clock_set_delay = true;
+ /* Make visible before waking up process */
+ smp_wmb();
+ wake_up_process(clock_set_delay_thread);
+}
+
+static __init int create_clock_set_delay_thread(void)
+{
+ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd");
+ BUG_ON(!clock_set_delay_thread);
+ return 0;
+}
+early_initcall(create_clock_set_delay_thread);
+#else /* PREEMPT_RT_FULL */
/*
* Called from timekeeping and resume code to reprogramm the hrtimer
* interrupt device on all cpus.
@@ -750,6 +789,7 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
+#endif
#else

View File

@ -1,453 +0,0 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:31 -0500
Subject: hrtimer: fixup hrtimer callback changes for preempt-rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
In preempt-rt we can not call the callbacks which take sleeping locks
from the timer interrupt context.
Bring back the softirq split for now, until we fixed the signal
delivery problem for real.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
include/linux/hrtimer.h | 3
kernel/hrtimer.c | 216 ++++++++++++++++++++++++++++++++++++++++-------
kernel/sched/core.c | 1
kernel/sched/rt.c | 1
kernel/time/tick-sched.c | 1
kernel/watchdog.c | 1
6 files changed, 195 insertions(+), 28 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -111,6 +111,8 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
+ struct list_head cb_entry;
+ int irqsafe;
#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
ktime_t praecox;
#endif
@@ -150,6 +152,7 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
+ struct list_head expired;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -609,8 +609,7 @@ static int hrtimer_reprogram(struct hrti
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interrupt context. The
- * reprogramming is handled either by the softirq, which called the
- * callback or at the end of the hrtimer_interrupt.
+ * reprogramming is handled at the end of the hrtimer_interrupt.
*/
if (hrtimer_callback_running(timer))
return 0;
@@ -645,6 +644,9 @@ static int hrtimer_reprogram(struct hrti
return res;
}
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
+static int hrtimer_rt_defer(struct hrtimer *timer);
+
/*
* Initialize the high resolution related parts of cpu_base
*/
@@ -661,9 +663,18 @@ static inline void hrtimer_init_hres(str
* and expiry check is done in the hrtimer_interrupt or in the softirq.
*/
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
+ struct hrtimer_clock_base *base,
+ int wakeup)
{
- return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
+ if (!(base->cpu_base->hres_active && hrtimer_reprogram(timer, base)))
+ return 0;
+ if (!wakeup)
+ return -ETIME;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ if (!hrtimer_rt_defer(timer))
+ return -ETIME;
+#endif
+ return 1;
}
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
@@ -748,12 +759,18 @@ static inline int hrtimer_switch_to_hres
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
+ struct hrtimer_clock_base *base,
+ int wakeup)
{
return 0;
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
+static inline int hrtimer_reprogram(struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
+ return 0;
+}
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -889,9 +906,9 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
- if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base))
+ if (base && base->cpu_base && !timer->irqsafe)
wait_event(base->cpu_base->wait,
- !(timer->state & HRTIMER_STATE_CALLBACK));
+ !(timer->state & HRTIMER_STATE_CALLBACK));
}
#else
@@ -941,6 +958,11 @@ static void __remove_hrtimer(struct hrti
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
+ if (unlikely(!list_empty(&timer->cb_entry))) {
+ list_del_init(&timer->cb_entry);
+ goto out;
+ }
+
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
@@ -1048,9 +1070,19 @@ int __hrtimer_start_range_ns(struct hrti
*
* XXX send_remote_softirq() ?
*/
- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
- && hrtimer_enqueue_reprogram(timer, new_base)) {
- if (wakeup) {
+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
+ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
+ if (ret < 0) {
+ /*
+ * In case we failed to reprogram the timer (mostly
+ * because out current timer is already elapsed),
+ * remove it again and report a failure. This avoids
+ * stale base->first entries.
+ */
+ debug_deactivate(timer);
+ __remove_hrtimer(timer, new_base,
+ timer->state & HRTIMER_STATE_CALLBACK, 0);
+ } else if (ret > 0) {
/*
* We need to drop cpu_base->lock to avoid a
* lock ordering issue vs. rq->lock.
@@ -1058,9 +1090,7 @@ int __hrtimer_start_range_ns(struct hrti
raw_spin_unlock(&new_base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
local_irq_restore(flags);
- return ret;
- } else {
- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ return 0;
}
}
@@ -1229,6 +1259,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
+ INIT_LIST_HEAD(&timer->cb_entry);
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
@@ -1312,10 +1343,128 @@ static void __run_hrtimer(struct hrtimer
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
-#ifdef CONFIG_HIGH_RES_TIMERS
-
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+#ifdef CONFIG_PREEMPT_RT_BASE
+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
+ /*
+ * Note, we clear the callback flag before we requeue the
+ * timer otherwise we trigger the callback_running() check
+ * in hrtimer_reprogram().
+ */
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
+
+ if (restart != HRTIMER_NORESTART) {
+ BUG_ON(hrtimer_active(timer));
+ /*
+ * Enqueue the timer, if it's the leftmost timer then
+ * we need to reprogram it.
+ */
+ if (!enqueue_hrtimer(timer, base))
+ return;
+
+#ifndef CONFIG_HIGH_RES_TIMERS
+ }
+#else
+ if (base->cpu_base->hres_active &&
+ hrtimer_reprogram(timer, base))
+ goto requeue;
+
+ } else if (hrtimer_active(timer)) {
+ /*
+ * If the timer was rearmed on another CPU, reprogram
+ * the event device.
+ */
+ if (&timer->node == base->active.next &&
+ base->cpu_base->hres_active &&
+ hrtimer_reprogram(timer, base))
+ goto requeue;
+ }
+ return;
+
+requeue:
+ /*
+ * Timer is expired. Thus move it from tree to pending list
+ * again.
+ */
+ __remove_hrtimer(timer, base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &base->expired);
+#endif
+}
+
+/*
+ * The changes in mainline which removed the callback modes from
+ * hrtimer are not yet working with -rt. The non wakeup_process()
+ * based callbacks which involve sleeping locks need to be treated
+ * seperately.
+ */
+static void hrtimer_rt_run_pending(void)
+{
+ enum hrtimer_restart (*fn)(struct hrtimer *);
+ struct hrtimer_cpu_base *cpu_base;
+ struct hrtimer_clock_base *base;
+ struct hrtimer *timer;
+ int index, restart;
+
+ local_irq_disable();
+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
+
+ raw_spin_lock(&cpu_base->lock);
+
+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
+ base = &cpu_base->clock_base[index];
+
+ while (!list_empty(&base->expired)) {
+ timer = list_first_entry(&base->expired,
+ struct hrtimer, cb_entry);
+
+ /*
+ * Same as the above __run_hrtimer function
+ * just we run with interrupts enabled.
+ */
+ debug_hrtimer_deactivate(timer);
+ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
+ timer_stats_account_hrtimer(timer);
+ fn = timer->function;
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+ restart = fn(timer);
+ raw_spin_lock_irq(&cpu_base->lock);
+
+ hrtimer_rt_reprogram(restart, timer, base);
+ }
+ }
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+
+ wake_up_timer_waiters(cpu_base);
+}
+
+static int hrtimer_rt_defer(struct hrtimer *timer)
+{
+ if (timer->irqsafe)
+ return 0;
+
+ __remove_hrtimer(timer, timer->base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &timer->base->expired);
+ return 1;
+}
+
+#else
+
+static inline void hrtimer_rt_run_pending(void)
+{
+ hrtimer_peek_ahead_timers();
+}
+
+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
+
+#endif
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+
/*
* High resolution timer interrupt
* Called with interrupts disabled
@@ -1324,7 +1473,7 @@ void hrtimer_interrupt(struct clock_even
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
- int i, retries = 0;
+ int i, retries = 0, raise = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1393,7 +1542,10 @@ void hrtimer_interrupt(struct clock_even
break;
}
- __run_hrtimer(timer, &basenow);
+ if (!hrtimer_rt_defer(timer))
+ __run_hrtimer(timer, &basenow);
+ else
+ raise = 1;
}
}
@@ -1408,6 +1560,10 @@ void hrtimer_interrupt(struct clock_even
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
+
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+
return;
}
@@ -1487,18 +1643,18 @@ void hrtimer_peek_ahead_timers(void)
__hrtimer_peek_ahead_timers();
local_irq_restore(flags);
}
-
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
- hrtimer_peek_ahead_timers();
-}
-
#else /* CONFIG_HIGH_RES_TIMERS */
static inline void __hrtimer_peek_ahead_timers(void) { }
#endif /* !CONFIG_HIGH_RES_TIMERS */
+
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+ hrtimer_rt_run_pending();
+}
+
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
@@ -1531,7 +1687,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
- int index, gettime = 1;
+ int index, gettime = 1, raise = 0;
if (hrtimer_hres_active())
return;
@@ -1556,12 +1712,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- __run_hrtimer(timer, &base->softirq_time);
+ if (!hrtimer_rt_defer(timer))
+ __run_hrtimer(timer, &base->softirq_time);
+ else
+ raise = 1;
}
raw_spin_unlock(&cpu_base->lock);
}
- wake_up_timer_waiters(cpu_base);
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
/*
@@ -1583,6 +1743,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
+ sl->timer.irqsafe = 1;
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -1719,6 +1880,7 @@ static void init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
}
hrtimer_init_hres(cpu_base);
@@ -1837,9 +1999,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
}
/**
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -489,6 +489,7 @@ static void init_rq_hrtick(struct rq *rq
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
+ rq->hrtick_timer.irqsafe = 1;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -43,6 +43,7 @@ void init_rt_bandwidth(struct rt_bandwid
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rt_b->rt_period_timer.irqsafe = 1;
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1111,6 +1111,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ts->sched_timer.irqsafe = 1;
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -357,6 +357,7 @@ static void watchdog_enable(unsigned int
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
+ hrtimer->irqsafe = 1;
/* Enable the perf event */
watchdog_nmi_enable(cpu);

View File

@ -1,38 +0,0 @@
Subject: hrtimer: Raise softirq if hrtimer irq stalled
From: Watanabe <shunsuke.watanabe@tel.com>
Date: Sun, 28 Oct 2012 11:13:44 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
When the hrtimer stall detection hits the softirq is not raised.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
kernel/hrtimer.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1560,11 +1560,7 @@ void hrtimer_interrupt(struct clock_even
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
-
- if (raise)
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-
- return;
+ goto out;
}
/*
@@ -1608,6 +1604,9 @@ void hrtimer_interrupt(struct clock_even
tick_program_event(expires_next, 1);
printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
ktime_to_ns(delta));
+out:
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
/*

View File

@ -1,196 +0,0 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:34 -0500
Subject: hrtimers: prepare full preemption
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Make cancellation of a running callback in softirq context safe
against preemption.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/hrtimer.h | 10 ++++++++++
kernel/hrtimer.c | 33 ++++++++++++++++++++++++++++++++-
kernel/itimer.c | 1 +
kernel/posix-timers.c | 33 +++++++++++++++++++++++++++++++++
4 files changed, 76 insertions(+), 1 deletion(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -193,6 +193,9 @@ struct hrtimer_cpu_base {
unsigned long nr_hangs;
ktime_t max_hang_time;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ wait_queue_head_t wait;
+#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
};
@@ -388,6 +391,13 @@ static inline int hrtimer_restart(struct
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
+/* Softirq preemption could deadlock timer removal */
+#ifdef CONFIG_PREEMPT_RT_BASE
+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
+#else
+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
+#endif
+
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -872,6 +872,32 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
+
+/**
+ * hrtimer_wait_for_timer - Wait for a running timer
+ *
+ * @timer: timer to wait for
+ *
+ * The function waits in case the timers callback function is
+ * currently executed on the waitqueue of the timer base. The
+ * waitqueue is woken up after the timer callback function has
+ * finished execution.
+ */
+void hrtimer_wait_for_timer(const struct hrtimer *timer)
+{
+ struct hrtimer_clock_base *base = timer->base;
+
+ if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base))
+ wait_event(base->cpu_base->wait,
+ !(timer->state & HRTIMER_STATE_CALLBACK));
+}
+
+#else
+# define wake_up_timer_waiters(b) do { } while (0)
+#endif
+
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
@@ -1124,7 +1150,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
- cpu_relax();
+ hrtimer_wait_for_timer(timer);
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
@@ -1534,6 +1560,8 @@ void hrtimer_run_queues(void)
}
raw_spin_unlock(&cpu_base->lock);
}
+
+ wake_up_timer_waiters(cpu_base);
}
/*
@@ -1694,6 +1722,9 @@ static void init_hrtimers_cpu(int cpu)
}
hrtimer_init_hres(cpu_base);
+#ifdef CONFIG_PREEMPT_RT_BASE
+ init_waitqueue_head(&cpu_base->wait);
+#endif
}
#ifdef CONFIG_HOTPLUG_CPU
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
+ hrtimer_wait_for_timer(&tsk->signal->real_timer);
goto again;
}
expires = timeval_to_ktime(value->it_value);
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -818,6 +818,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
return overrun;
}
+/*
+ * Protected by RCU!
+ */
+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (kc->timer_set == common_timer_set)
+ hrtimer_wait_for_timer(&timr->it.real.timer);
+ else
+ /* FIXME: Whacky hack for posix-cpu-timers */
+ schedule_timeout(1);
+#endif
+}
+
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
static int
@@ -895,6 +909,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
if (!timr)
return -EINVAL;
+ rcu_read_lock();
kc = clockid_to_kclock(timr->it_clock);
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
@@ -903,9 +918,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
+ timer_wait_for_callback(kc, timr);
rtn = NULL; // We already got the old time...
+ rcu_read_unlock();
goto retry;
}
+ rcu_read_unlock();
if (old_setting && !error &&
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
@@ -943,10 +961,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
if (!timer)
return -EINVAL;
+ rcu_read_lock();
if (timer_delete_hook(timer) == TIMER_RETRY) {
unlock_timer(timer, flags);
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
+ timer);
+ rcu_read_unlock();
goto retry_delete;
}
+ rcu_read_unlock();
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
@@ -972,8 +995,18 @@ static void itimer_delete(struct k_itime
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
+ /* On RT we can race with a deletion */
+ if (!timer->it_signal) {
+ unlock_timer(timer, flags);
+ return;
+ }
+
if (timer_delete_hook(timer) == TIMER_RETRY) {
+ rcu_read_lock();
unlock_timer(timer, flags);
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
+ timer);
+ rcu_read_unlock();
goto retry_delete;
}
list_del(&timer->list);

View File

@ -1,27 +0,0 @@
From c19bf3baaa55918486b868ab17aae0c0c220e51f Mon Sep 17 00:00:00 2001
From: Mike Galbraith <bitbucket@online.de>
Date: Fri, 30 Aug 2013 07:57:25 +0200
Subject: [PATCH] hwlat-detector: Don't ignore threshold module parameter
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
If the user specified a threshold at module load time, use it.
Cc: stable-rt@vger.kernel.org
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Mike Galbraith <bitbucket@online.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/misc/hwlat_detector.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/misc/hwlat_detector.c
+++ b/drivers/misc/hwlat_detector.c
@@ -414,7 +414,7 @@ static int init_stats(void)
goto out;
__reset_stats();
- data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */
+ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */

View File

@ -1,128 +0,0 @@
From 7a036d4dfcf3f2d3247ff7f739284f4b5056bdcb Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Mon, 19 Aug 2013 17:33:25 -0400
Subject: [PATCH 1/3] hwlat-detector: Update hwlat_detector to add outer loop
detection
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
The hwlat_detector reads two timestamps in a row, then reports any
gap between those calls. The problem is, it misses everything between
the second reading of the time stamp to the first reading of the time stamp
in the next loop. That's were most of the time is spent, which means,
chances are likely that it will miss all hardware latencies. This
defeats the purpose.
By also testing the first time stamp from the previous loop second
time stamp (the outer loop), we are more likely to find a latency.
Setting the threshold to 1, here's what the report now looks like:
1347415723.0232202770 0 2
1347415725.0234202822 0 2
1347415727.0236202875 0 2
1347415729.0238202928 0 2
1347415731.0240202980 0 2
1347415734.0243203061 0 2
1347415736.0245203113 0 2
1347415738.0247203166 2 0
1347415740.0249203219 0 3
1347415742.0251203272 0 3
1347415743.0252203299 0 3
1347415745.0254203351 0 2
1347415747.0256203404 0 2
1347415749.0258203457 0 2
1347415751.0260203510 0 2
1347415754.0263203589 0 2
1347415756.0265203642 0 2
1347415758.0267203695 0 2
1347415760.0269203748 0 2
1347415762.0271203801 0 2
1347415764.0273203853 2 0
There's some hardware latency that takes 2 microseconds to run.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/misc/hwlat_detector.c | 32 ++++++++++++++++++++++++++------
1 file changed, 26 insertions(+), 6 deletions(-)
--- a/drivers/misc/hwlat_detector.c
+++ b/drivers/misc/hwlat_detector.c
@@ -143,6 +143,7 @@ static void detector_exit(void);
struct sample {
u64 seqnum; /* unique sequence */
u64 duration; /* ktime delta */
+ u64 outer_duration; /* ktime delta (outer loop) */
struct timespec timestamp; /* wall time */
unsigned long lost;
};
@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample(
*/
static int get_sample(void *unused)
{
- ktime_t start, t1, t2;
+ ktime_t start, t1, t2, last_t2;
s64 diff, total = 0;
u64 sample = 0;
+ u64 outer_sample = 0;
int ret = 1;
+ last_t2.tv64 = 0;
start = ktime_get(); /* start timestamp */
do {
@@ -231,7 +234,22 @@ static int get_sample(void *unused)
t1 = ktime_get(); /* we'll look for a discontinuity */
t2 = ktime_get();
+ if (last_t2.tv64) {
+ /* Check the delta from outer loop (t2 to next t1) */
+ diff = ktime_to_us(ktime_sub(t1, last_t2));
+ /* This shouldn't happen */
+ if (diff < 0) {
+ pr_err(BANNER "time running backwards\n");
+ goto out;
+ }
+ if (diff > outer_sample)
+ outer_sample = diff;
+ }
+ last_t2 = t2;
+
total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
+
+ /* This checks the inner loop (t1 to t2) */
diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
/* This shouldn't happen */
@@ -246,12 +264,13 @@ static int get_sample(void *unused)
} while (total <= data.sample_width);
/* If we exceed the threshold value, we have found a hardware latency */
- if (sample > data.threshold) {
+ if (sample > data.threshold || outer_sample > data.threshold) {
struct sample s;
data.count++;
s.seqnum = data.count;
s.duration = sample;
+ s.outer_duration = outer_sample;
s.timestamp = CURRENT_TIME;
__buffer_add_sample(&s);
@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct
}
}
- len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
- sample->timestamp.tv_sec,
- sample->timestamp.tv_nsec,
- sample->duration);
+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
+ sample->timestamp.tv_sec,
+ sample->timestamp.tv_nsec,
+ sample->duration,
+ sample->outer_duration);
/* handling partial reads is more trouble than it's worth */

View File

@ -1,185 +0,0 @@
From 42b3963c5d3dcdb54226fc6bbb6b5fbcf3f2ddee Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Mon, 19 Aug 2013 17:33:27 -0400
Subject: [PATCH 3/3] hwlat-detector: Use thread instead of stop machine
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
There's no reason to use stop machine to search for hardware latency.
Simply disabling interrupts while running the loop will do enough to
check if something comes in that wasn't disabled by interrupts being
off, which is exactly what stop machine does.
Instead of using stop machine, just have the thread disable interrupts
while it checks for hardware latency.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/misc/hwlat_detector.c | 60 ++++++++++++++++++------------------------
1 file changed, 26 insertions(+), 34 deletions(-)
--- a/drivers/misc/hwlat_detector.c
+++ b/drivers/misc/hwlat_detector.c
@@ -41,7 +41,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ring_buffer.h>
-#include <linux/stop_machine.h>
#include <linux/time.h>
#include <linux/hrtimer.h>
#include <linux/kthread.h>
@@ -107,7 +106,6 @@ struct data; /* Global state */
/* Sampling functions */
static int __buffer_add_sample(struct sample *sample);
static struct sample *buffer_get_sample(struct sample *sample);
-static int get_sample(void *unused);
/* Threading and state */
static int kthread_fn(void *unused);
@@ -149,7 +147,7 @@ struct sample {
unsigned long lost;
};
-/* keep the global state somewhere. Mostly used under stop_machine. */
+/* keep the global state somewhere. */
static struct data {
struct mutex lock; /* protect changes */
@@ -172,7 +170,7 @@ static struct data {
* @sample: The new latency sample value
*
* This receives a new latency sample and records it in a global ring buffer.
- * No additional locking is used in this case - suited for stop_machine use.
+ * No additional locking is used in this case.
*/
static int __buffer_add_sample(struct sample *sample)
{
@@ -229,18 +227,18 @@ static struct sample *buffer_get_sample(
#endif
/**
* get_sample - sample the CPU TSC and look for likely hardware latencies
- * @unused: This is not used but is a part of the stop_machine API
*
* Used to repeatedly capture the CPU TSC (or similar), looking for potential
- * hardware-induced latency. Called under stop_machine, with data.lock held.
+ * hardware-induced latency. Called with interrupts disabled and with
+ * data.lock held.
*/
-static int get_sample(void *unused)
+static int get_sample(void)
{
time_type start, t1, t2, last_t2;
s64 diff, total = 0;
u64 sample = 0;
u64 outer_sample = 0;
- int ret = 1;
+ int ret = -1;
init_time(last_t2, 0);
start = time_get(); /* start timestamp */
@@ -279,10 +277,14 @@ static int get_sample(void *unused)
} while (total <= data.sample_width);
+ ret = 0;
+
/* If we exceed the threshold value, we have found a hardware latency */
if (sample > data.threshold || outer_sample > data.threshold) {
struct sample s;
+ ret = 1;
+
data.count++;
s.seqnum = data.count;
s.duration = sample;
@@ -295,7 +297,6 @@ static int get_sample(void *unused)
data.max_sample = sample;
}
- ret = 0;
out:
return ret;
}
@@ -305,32 +306,30 @@ static int get_sample(void *unused)
* @unused: A required part of the kthread API.
*
* Used to periodically sample the CPU TSC via a call to get_sample. We
- * use stop_machine, whith does (intentionally) introduce latency since we
+ * disable interrupts, which does (intentionally) introduce latency since we
* need to ensure nothing else might be running (and thus pre-empting).
* Obviously this should never be used in production environments.
*
- * stop_machine will schedule us typically only on CPU0 which is fine for
- * almost every real-world hardware latency situation - but we might later
- * generalize this if we find there are any actualy systems with alternate
- * SMI delivery or other non CPU0 hardware latencies.
+ * Currently this runs on which ever CPU it was scheduled on, but most
+ * real-worald hardware latency situations occur across several CPUs,
+ * but we might later generalize this if we find there are any actualy
+ * systems with alternate SMI delivery or other hardware latencies.
*/
static int kthread_fn(void *unused)
{
- int err = 0;
- u64 interval = 0;
+ int ret;
+ u64 interval;
while (!kthread_should_stop()) {
mutex_lock(&data.lock);
- err = stop_machine(get_sample, unused, 0);
- if (err) {
- /* Houston, we have a problem */
- mutex_unlock(&data.lock);
- goto err_out;
- }
+ local_irq_disable();
+ ret = get_sample();
+ local_irq_enable();
- wake_up(&data.wq); /* wake up reader(s) */
+ if (ret > 0)
+ wake_up(&data.wq); /* wake up reader(s) */
interval = data.sample_window - data.sample_width;
do_div(interval, USEC_PER_MSEC); /* modifies interval value */
@@ -338,15 +337,10 @@ static int kthread_fn(void *unused)
mutex_unlock(&data.lock);
if (msleep_interruptible(interval))
- goto out;
+ break;
}
- goto out;
-err_out:
- pr_err(BANNER "could not call stop_machine, disabling\n");
- enabled = 0;
-out:
- return err;
+ return 0;
}
/**
@@ -442,8 +436,7 @@ static int init_stats(void)
* This function provides a generic read implementation for the global state
* "data" structure debugfs filesystem entries. It would be nice to use
* simple_attr_read directly, but we need to make sure that the data.lock
- * spinlock is held during the actual read (even though we likely won't ever
- * actually race here as the updater runs under a stop_machine context).
+ * is held during the actual read.
*/
static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos, const u64 *entry)
@@ -478,8 +471,7 @@ static ssize_t simple_data_read(struct f
* This function provides a generic write implementation for the global state
* "data" structure debugfs filesystem entries. It would be nice to use
* simple_attr_write directly, but we need to make sure that the data.lock
- * spinlock is held during the actual write (even though we likely won't ever
- * actually race here as the updater runs under a stop_machine context).
+ * is held during the actual write.
*/
static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos, u64 *entry)

View File

@ -1,94 +0,0 @@
From 4aaca90c0255caee9a55371afaecb32365123762 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Mon, 19 Aug 2013 17:33:26 -0400
Subject: [PATCH 2/3] hwlat-detector: Use trace_clock_local if available
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
As ktime_get() calls into the timing code which does a read_seq(), it
may be affected by other CPUS that touch that lock. To remove this
dependency, use the trace_clock_local() which is already exported
for module use. If CONFIG_TRACING is enabled, use that as the clock,
otherwise use ktime_get().
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/misc/hwlat_detector.c | 34 +++++++++++++++++++++++++---------
1 file changed, 25 insertions(+), 9 deletions(-)
--- a/drivers/misc/hwlat_detector.c
+++ b/drivers/misc/hwlat_detector.c
@@ -51,6 +51,7 @@
#include <linux/version.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/trace_clock.h>
#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample(
return sample;
}
+#ifndef CONFIG_TRACING
+#define time_type ktime_t
+#define time_get() ktime_get()
+#define time_to_us(x) ktime_to_us(x)
+#define time_sub(a, b) ktime_sub(a, b)
+#define init_time(a, b) (a).tv64 = b
+#define time_u64(a) ((a).tv64)
+#else
+#define time_type u64
+#define time_get() trace_clock_local()
+#define time_to_us(x) div_u64(x, 1000)
+#define time_sub(a, b) ((a) - (b))
+#define init_time(a, b) (a = b)
+#define time_u64(a) a
+#endif
/**
* get_sample - sample the CPU TSC and look for likely hardware latencies
* @unused: This is not used but is a part of the stop_machine API
@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample(
*/
static int get_sample(void *unused)
{
- ktime_t start, t1, t2, last_t2;
+ time_type start, t1, t2, last_t2;
s64 diff, total = 0;
u64 sample = 0;
u64 outer_sample = 0;
int ret = 1;
- last_t2.tv64 = 0;
- start = ktime_get(); /* start timestamp */
+ init_time(last_t2, 0);
+ start = time_get(); /* start timestamp */
do {
- t1 = ktime_get(); /* we'll look for a discontinuity */
- t2 = ktime_get();
+ t1 = time_get(); /* we'll look for a discontinuity */
+ t2 = time_get();
- if (last_t2.tv64) {
+ if (time_u64(last_t2)) {
/* Check the delta from outer loop (t2 to next t1) */
- diff = ktime_to_us(ktime_sub(t1, last_t2));
+ diff = time_to_us(time_sub(t1, last_t2));
/* This shouldn't happen */
if (diff < 0) {
pr_err(BANNER "time running backwards\n");
@@ -247,10 +263,10 @@ static int get_sample(void *unused)
}
last_t2 = t2;
- total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
+ total = time_to_us(time_sub(t2, start)); /* sample width */
/* This checks the inner loop (t1 to t2) */
- diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
+ diff = time_to_us(time_sub(t2, t1)); /* current diff */
/* This shouldn't happen */
if (diff < 0) {

File diff suppressed because it is too large Load Diff

View File

@ -1,35 +0,0 @@
From 5145351047b216cca13aaca99f939a9a594c6c4d Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 21 Mar 2013 11:35:49 +0100
Subject: [PATCH 2/3] i2c/omap: drop the lock hard irq context
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
The lock is taken while reading two registers. On RT the first lock is
taken in hard irq where it might sleep and in the threaded irq.
The threaded irq runs in oneshot mode so the hard irq does not run until
the thread the completes so there is no reason to grab the lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/i2c/busses/i2c-omap.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -879,15 +879,12 @@ omap_i2c_isr(int irq, void *dev_id)
u16 mask;
u16 stat;
- spin_lock(&dev->lock);
- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
if (stat & mask)
ret = IRQ_WAKE_THREAD;
- spin_unlock(&dev->lock);
-
return ret;
}

View File

@ -1,23 +0,0 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: gpu/i915: don't open code these things
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions")
the owner check is still there.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/i915_gem.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4975,7 +4975,7 @@ static bool mutex_is_locked_by(struct mu
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */

View File

@ -1,170 +0,0 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:16 -0500
Subject: ide: Do not disable interrupts for PREEMPT-RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.14/patches-3.14.0-rt1.tar.xz
Use the local_irq_*_nort variants.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/ide/alim15x3.c | 4 ++--
drivers/ide/hpt366.c | 4 ++--
drivers/ide/ide-io-std.c | 8 ++++----
drivers/ide/ide-io.c | 2 +-
drivers/ide/ide-iops.c | 4 ++--
drivers/ide/ide-probe.c | 4 ++--
drivers/ide/ide-taskfile.c | 6 +++---
7 files changed, 16 insertions(+), 16 deletions(-)
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
- local_irq_save(flags);
+ local_irq_save_nort(flags);
if (m5229_revision < 0xC2) {
/*
@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct p
}
pci_dev_put(north);
pci_dev_put(isa_dev);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
return 0;
}
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *h
dma_old = inb(base + 2);
- local_irq_save(flags);
+ local_irq_save_nort(flags);
dma_new = dma_old;
pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *h
if (dma_new != dma_old)
outb(dma_new, base + 2);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
--- a/drivers/ide/ide-io-std.c
+++ b/drivers/ide/ide-io-std.c
@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
+ local_irq_save_nort(flags);
ata_vlb_sync(io_ports->nsect_addr);
}
@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive,
insl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
if (((len + 1) & 3) < 2)
return;
@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
+ local_irq_save_nort(flags);
ata_vlb_sync(io_ports->nsect_addr);
}
@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive,
outsl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
if (((len + 1) & 3) < 2)
return;
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
/* local CPU only, as if we were handling an interrupt */
- local_irq_disable();
+ local_irq_disable_nort();
if (hwif->polling) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive,
if ((stat & ATA_BUSY) == 0)
break;
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
*rstat = stat;
return -EBUSY;
}
}
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
/*
* Allow status to settle, then read it again.
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri
int bswap = 1;
/* local CPU only; some systems need this */
- local_irq_save(flags);
+ local_irq_save_nort(flags);
/* read 512 bytes of id info */
hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
drive->dev_flags |= IDE_DFLAG_ID_READ;
#ifdef DEBUG
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
page_is_high = PageHighMem(page);
if (page_is_high)
- local_irq_save(flags);
+ local_irq_save_nort(flags);
buf = kmap_atomic(page) + offset;
@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
kunmap_atomic(buf);
if (page_is_high)
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
len -= nr_bytes;
}
@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr
}
if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
- local_irq_disable();
+ local_irq_disable_nort();
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);

Some files were not shown because too many files have changed in this diff Show More