void-packages/srcpkgs/linux4.11/patches/iommu-amd-flush-IOTLB-for-s...

61 lines
2.1 KiB
Diff

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 63cacf5..1edeebec 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2227,15 +2227,26 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
static void __queue_flush(struct flush_queue *queue)
{
- struct protection_domain *domain;
- unsigned long flags;
int idx;
- /* First flush TLB of all known domains */
- spin_lock_irqsave(&amd_iommu_pd_lock, flags);
- list_for_each_entry(domain, &amd_iommu_pd_list, list)
- domain_flush_tlb(domain);
- spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
+ /* First flush TLB of all domains which were added to flush queue */
+ for (idx = 0; idx < queue->next; ++idx) {
+ struct flush_queue_entry *entry;
+
+ entry = queue->entries + idx;
+
+ /*
+ * There might be cases where multiple IOVA entries for the
+ * same domain are queued in the flush queue. To avoid
+ * flushing the same domain again, we check whether the
+ * flag is set or not. This improves the efficiency of
+ * flush operation.
+ */
+ if (!entry->dma_dom->domain.already_flushed) {
+ entry->dma_dom->domain.already_flushed = true;
+ domain_flush_tlb(&entry->dma_dom->domain);
+ }
+ }
/* Wait until flushes have completed */
domain_flush_complete(NULL);
@@ -2289,6 +2300,8 @@ static void queue_add(struct dma_ops_domain *dma_dom,
pages = __roundup_pow_of_two(pages);
address >>= PAGE_SHIFT;
+ dma_dom->domain.already_flushed = false;
+
queue = get_cpu_ptr(&flush_queue);
spin_lock_irqsave(&queue->lock, flags);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 4de8f41..4f5519d 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -454,6 +454,8 @@ struct protection_domain {
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
+ bool already_flushed; /* flag to avoid flushing the same domain again
+ in a single invocation of __queue_flush() */
};
/*