Loading drivers/iommu/iova.c +32 −0 Original line number Diff line number Diff line Loading @@ -33,6 +33,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, static void init_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); static void fq_destroy_all_entries(struct iova_domain *iovad); static void fq_flush_timeout(unsigned long data); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, Loading Loading @@ -62,7 +63,11 @@ static void free_iova_flush_queue(struct iova_domain *iovad) if (!iovad->fq) return; if (timer_pending(&iovad->fq_timer)) del_timer(&iovad->fq_timer); fq_destroy_all_entries(iovad); free_percpu(iovad->fq); iovad->fq = NULL; Loading Loading @@ -95,6 +100,9 @@ int init_iova_flush_queue(struct iova_domain *iovad, spin_lock_init(&fq->lock); } setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad); atomic_set(&iovad->fq_timer_on, 0); return 0; } EXPORT_SYMBOL_GPL(init_iova_flush_queue); Loading Loading @@ -539,6 +547,25 @@ static void fq_destroy_all_entries(struct iova_domain *iovad) } } static void fq_flush_timeout(unsigned long data) { struct iova_domain *iovad = (struct iova_domain *)data; int cpu; atomic_set(&iovad->fq_timer_on, 0); iova_domain_flush(iovad); for_each_possible_cpu(cpu) { unsigned long flags; struct iova_fq *fq; fq = per_cpu_ptr(iovad->fq, cpu); spin_lock_irqsave(&fq->lock, flags); fq_ring_free(iovad, fq); spin_unlock_irqrestore(&fq->lock, flags); } } void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, unsigned long data) Loading Loading @@ -569,6 +596,11 @@ void queue_iova(struct iova_domain *iovad, fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); spin_unlock_irqrestore(&fq->lock, flags); if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) mod_timer(&iovad->fq_timer, jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); put_cpu_ptr(iovad->fq); } EXPORT_SYMBOL_GPL(queue_iova); Loading include/linux/iova.h +8 −0 Original line number Diff line number Diff line Loading @@ -48,6 +48,9 @@ typedef void (* iova_entry_dtor)(unsigned long data); /* Number of entries per Flush Queue */ #define IOVA_FQ_SIZE 256 /* Timeout (in ms) after which entries are flushed from the Flush-Queue */ #define IOVA_FQ_TIMEOUT 10 /* Flush Queue entry for defered flushing */ struct iova_fq_entry { unsigned long iova_pfn; Loading Loading @@ -86,6 +89,11 @@ struct iova_domain { atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that have been finished */ struct timer_list fq_timer; /* Timer to regularily empty the flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ }; static inline unsigned long iova_size(struct iova *iova) Loading Loading
drivers/iommu/iova.c +32 −0 Original line number Diff line number Diff line Loading @@ -33,6 +33,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, static void init_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); static void fq_destroy_all_entries(struct iova_domain *iovad); static void fq_flush_timeout(unsigned long data); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, Loading Loading @@ -62,7 +63,11 @@ static void free_iova_flush_queue(struct iova_domain *iovad) if (!iovad->fq) return; if (timer_pending(&iovad->fq_timer)) del_timer(&iovad->fq_timer); fq_destroy_all_entries(iovad); free_percpu(iovad->fq); iovad->fq = NULL; Loading Loading @@ -95,6 +100,9 @@ int init_iova_flush_queue(struct iova_domain *iovad, spin_lock_init(&fq->lock); } setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad); atomic_set(&iovad->fq_timer_on, 0); return 0; } EXPORT_SYMBOL_GPL(init_iova_flush_queue); Loading Loading @@ -539,6 +547,25 @@ static void fq_destroy_all_entries(struct iova_domain *iovad) } } static void fq_flush_timeout(unsigned long data) { struct iova_domain *iovad = (struct iova_domain *)data; int cpu; atomic_set(&iovad->fq_timer_on, 0); iova_domain_flush(iovad); for_each_possible_cpu(cpu) { unsigned long flags; struct iova_fq *fq; fq = per_cpu_ptr(iovad->fq, cpu); spin_lock_irqsave(&fq->lock, flags); fq_ring_free(iovad, fq); spin_unlock_irqrestore(&fq->lock, flags); } } void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, unsigned long data) Loading Loading @@ -569,6 +596,11 @@ void queue_iova(struct iova_domain *iovad, fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); spin_unlock_irqrestore(&fq->lock, flags); if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) mod_timer(&iovad->fq_timer, jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); put_cpu_ptr(iovad->fq); } EXPORT_SYMBOL_GPL(queue_iova); Loading
include/linux/iova.h +8 −0 Original line number Diff line number Diff line Loading @@ -48,6 +48,9 @@ typedef void (* iova_entry_dtor)(unsigned long data); /* Number of entries per Flush Queue */ #define IOVA_FQ_SIZE 256 /* Timeout (in ms) after which entries are flushed from the Flush-Queue */ #define IOVA_FQ_TIMEOUT 10 /* Flush Queue entry for defered flushing */ struct iova_fq_entry { unsigned long iova_pfn; Loading Loading @@ -86,6 +89,11 @@ struct iova_domain { atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that have been finished */ struct timer_list fq_timer; /* Timer to regularily empty the flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ }; static inline unsigned long iova_size(struct iova *iova) Loading