Introduce a new parameter batch_desc to the QI based IOTLB/dev-IOTLB
invalidation operations to support batching invalidation descriptors.
This batch_desc is a pointer to the descriptor entry in a batch cmds
buffer. If the batch_desc is NULL, it indicates that batch submission
is not being used, and descriptors will be submitted individually.
Also fix an issue reported by checkpatch about "unsigned mask":
"Prefer 'unsigned int' to bare use of 'unsigned'"
Signed-off-by: Tina Zhang<tina.zhang@xxxxxxxxx>
---
drivers/iommu/intel/cache.c | 33 +++++++++++-------
drivers/iommu/intel/dmar.c | 67 ++++++++++++++++++++-----------------
drivers/iommu/intel/iommu.c | 27 +++++++++------
drivers/iommu/intel/iommu.h | 21 ++++++++----
drivers/iommu/intel/pasid.c | 20 ++++++-----
5 files changed, 100 insertions(+), 68 deletions(-)
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index e8418cdd8331..dcf5e0e6af17 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -278,7 +278,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
case CACHE_TAG_NESTING_IOTLB:
if (domain->use_first_level) {
qi_flush_piotlb(iommu, tag->domain_id,
- tag->pasid, addr, pages, ih);
+ tag->pasid, addr, pages, ih, NULL);
} else {
/*
* Fallback to domain selective flush if no
@@ -287,11 +287,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, tag->domain_id,
- 0, 0, DMA_TLB_DSI_FLUSH);
+ 0, 0, DMA_TLB_DSI_FLUSH,
+ NULL);
else
iommu->flush.flush_iotlb(iommu, tag->domain_id,
addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
+ DMA_TLB_PSI_FLUSH,
+ NULL);