95 lines
3.3 KiB
Diff
95 lines
3.3 KiB
Diff
From: Alex Williamson <alex.williamson@redhat.com>
|
|
Date: Wed, 3 Apr 2019 12:36:21 -0600
|
|
Subject: vfio/type1: Limit DMA mappings per container
|
|
Origin: https://git.kernel.org/linus/492855939bdb59c6f947b0b5b44af9ad82b7e38c
|
|
Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-3882
|
|
|
|
Memory backed DMA mappings are accounted against a user's locked
|
|
memory limit, including multiple mappings of the same memory. This
|
|
accounting bounds the number of such mappings that a user can create.
|
|
However, DMA mappings that are not backed by memory, such as DMA
|
|
mappings of device MMIO via mmaps, do not make use of page pinning
|
|
and therefore do not count against the user's locked memory limit.
|
|
These mappings still consume memory, but the memory is not well
|
|
associated to the process for the purpose of oom killing a task.
|
|
|
|
To add bounding on this use case, we introduce a limit to the total
|
|
number of concurrent DMA mappings that a user is allowed to create.
|
|
This limit is exposed as a tunable module option where the default
|
|
value of 64K is expected to be well in excess of any reasonable use
|
|
case (a large virtual machine configuration would typically only make
|
|
use of tens of concurrent mappings).
|
|
|
|
This fixes CVE-2019-3882.
|
|
|
|
Reviewed-by: Eric Auger <eric.auger@redhat.com>
|
|
Tested-by: Eric Auger <eric.auger@redhat.com>
|
|
Reviewed-by: Peter Xu <peterx@redhat.com>
|
|
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
|
|
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
|
|
---
|
|
drivers/vfio/vfio_iommu_type1.c | 14 ++++++++++++++
|
|
1 file changed, 14 insertions(+)
|
|
|
|
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
|
|
index 73652e21efec..d0f731c9920a 100644
|
|
--- a/drivers/vfio/vfio_iommu_type1.c
|
|
+++ b/drivers/vfio/vfio_iommu_type1.c
|
|
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
|
|
MODULE_PARM_DESC(disable_hugepages,
|
|
"Disable VFIO IOMMU support for IOMMU hugepages.");
|
|
|
|
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
|
|
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
|
|
+MODULE_PARM_DESC(dma_entry_limit,
|
|
+ "Maximum number of user DMA mappings per container (65535).");
|
|
+
|
|
struct vfio_iommu {
|
|
struct list_head domain_list;
|
|
struct vfio_domain *external_domain; /* domain for external user */
|
|
struct mutex lock;
|
|
struct rb_root dma_list;
|
|
struct blocking_notifier_head notifier;
|
|
+ unsigned int dma_avail;
|
|
bool v2;
|
|
bool nesting;
|
|
};
|
|
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
|
|
vfio_unlink_dma(iommu, dma);
|
|
put_task_struct(dma->task);
|
|
kfree(dma);
|
|
+ iommu->dma_avail++;
|
|
}
|
|
|
|
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
|
|
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
|
|
goto out_unlock;
|
|
}
|
|
|
|
+ if (!iommu->dma_avail) {
|
|
+ ret = -ENOSPC;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
|
|
if (!dma) {
|
|
ret = -ENOMEM;
|
|
goto out_unlock;
|
|
}
|
|
|
|
+ iommu->dma_avail--;
|
|
dma->iova = iova;
|
|
dma->vaddr = vaddr;
|
|
dma->prot = prot;
|
|
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
|
|
|
|
INIT_LIST_HEAD(&iommu->domain_list);
|
|
iommu->dma_list = RB_ROOT;
|
|
+ iommu->dma_avail = dma_entry_limit;
|
|
mutex_init(&iommu->lock);
|
|
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
|
|
|
|
--
|
|
2.11.0
|
|
|