aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-06 11:27:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-06 11:27:21 -0700
commit105b64c83872c39d86c1e3dea9ee4185c62114dc (patch)
tree2df0af26f84b9cabfa30d89f4d79c9fc43dcd6d5
parentae52f797904ef0e98951e43d15a274fa8e80bbe5 (diff)
parent13a0d1ae7ee6b438f5537711a8c60cba00554943 (diff)
downloadlinux-105b64c83872c39d86c1e3dea9ee4185c62114dc.tar.gz
Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd
Pull iommufd fixes from Jason Gunthorpe: - An invalid VA range can be be put in a pages and eventually trigger WARN_ON, reject it early - Use of the wrong start index value when doing the complex batch carry scheme - Wrong store ordering resulting in corrupting data used in a later calculation that corrupted the batch structure during carry * tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd: iommufd: Do not corrupt the pfn list when doing batch carry iommufd: Fix unpinning of pages when an access is present iommufd: Check for uptr overflow
-rw-r--r--drivers/iommu/iommufd/pages.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index f8d92c9bb65b60..3c47846cc5efe8 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -294,9 +294,9 @@ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
batch->npfns[batch->end - 1] < keep_pfns);
batch->total_pfns = keep_pfns;
- batch->npfns[0] = keep_pfns;
batch->pfns[0] = batch->pfns[batch->end - 1] +
(batch->npfns[batch->end - 1] - keep_pfns);
+ batch->npfns[0] = keep_pfns;
batch->end = 0;
}
@@ -1142,6 +1142,7 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
bool writable)
{
struct iopt_pages *pages;
+ unsigned long end;
/*
* The iommu API uses size_t as the length, and protect the DIV_ROUND_UP
@@ -1150,6 +1151,9 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
if (length > SIZE_MAX - PAGE_SIZE || length == 0)
return ERR_PTR(-EINVAL);
+ if (check_add_overflow((unsigned long)uptr, length, &end))
+ return ERR_PTR(-EOVERFLOW);
+
pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT);
if (!pages)
return ERR_PTR(-ENOMEM);
@@ -1203,13 +1207,21 @@ iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area,
unsigned long start =
max(start_index, *unmapped_end_index);
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+ batch->total_pfns)
+ WARN_ON(*unmapped_end_index -
+ batch->total_pfns !=
+ start_index);
batch_from_domain(batch, domain, area, start,
last_index);
- batch_last_index = start + batch->total_pfns - 1;
+ batch_last_index = start_index + batch->total_pfns - 1;
} else {
batch_last_index = last_index;
}
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+ WARN_ON(batch_last_index > real_last_index);
+
/*
* unmaps must always 'cut' at a place where the pfns are not
* contiguous to pair with the maps that always install