@@ -42,11 +42,6 @@ struct iommu_dma_msi_page {
42
42
phys_addr_t phys ;
43
43
};
44
44
45
- enum iommu_dma_cookie_type {
46
- IOMMU_DMA_IOVA_COOKIE ,
47
- IOMMU_DMA_MSI_COOKIE ,
48
- };
49
-
50
45
enum iommu_dma_queue_type {
51
46
IOMMU_DMA_OPTS_PER_CPU_QUEUE ,
52
47
IOMMU_DMA_OPTS_SINGLE_QUEUE ,
@@ -59,35 +54,31 @@ struct iommu_dma_options {
59
54
};
60
55
61
56
struct iommu_dma_cookie {
62
- enum iommu_dma_cookie_type type ;
57
+ struct iova_domain iovad ;
58
+ struct list_head msi_page_list ;
59
+ /* Flush queue */
63
60
union {
64
- /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
65
- struct {
66
- struct iova_domain iovad ;
67
- /* Flush queue */
68
- union {
69
- struct iova_fq * single_fq ;
70
- struct iova_fq __percpu * percpu_fq ;
71
- };
72
- /* Number of TLB flushes that have been started */
73
- atomic64_t fq_flush_start_cnt ;
74
- /* Number of TLB flushes that have been finished */
75
- atomic64_t fq_flush_finish_cnt ;
76
- /* Timer to regularily empty the flush queues */
77
- struct timer_list fq_timer ;
78
- /* 1 when timer is active, 0 when not */
79
- atomic_t fq_timer_on ;
80
- };
81
- /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
82
- dma_addr_t msi_iova ;
61
+ struct iova_fq * single_fq ;
62
+ struct iova_fq __percpu * percpu_fq ;
83
63
};
84
- struct list_head msi_page_list ;
85
-
64
+ /* Number of TLB flushes that have been started */
65
+ atomic64_t fq_flush_start_cnt ;
66
+ /* Number of TLB flushes that have been finished */
67
+ atomic64_t fq_flush_finish_cnt ;
68
+ /* Timer to regularily empty the flush queues */
69
+ struct timer_list fq_timer ;
70
+ /* 1 when timer is active, 0 when not */
71
+ atomic_t fq_timer_on ;
86
72
/* Domain for flush queue callback; NULL if flush queue not in use */
87
- struct iommu_domain * fq_domain ;
73
+ struct iommu_domain * fq_domain ;
88
74
/* Options for dma-iommu use */
89
- struct iommu_dma_options options ;
90
- struct mutex mutex ;
75
+ struct iommu_dma_options options ;
76
+ struct mutex mutex ;
77
+ };
78
+
79
+ struct iommu_dma_msi_cookie {
80
+ dma_addr_t msi_iova ;
81
+ struct list_head msi_page_list ;
91
82
};
92
83
93
84
static DEFINE_STATIC_KEY_FALSE (iommu_deferred_attach_enabled );
@@ -369,40 +360,26 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
369
360
return 0 ;
370
361
}
371
362
372
- static inline size_t cookie_msi_granule (struct iommu_dma_cookie * cookie )
373
- {
374
- if (cookie -> type == IOMMU_DMA_IOVA_COOKIE )
375
- return cookie -> iovad .granule ;
376
- return PAGE_SIZE ;
377
- }
378
-
379
- static struct iommu_dma_cookie * cookie_alloc (enum iommu_dma_cookie_type type )
380
- {
381
- struct iommu_dma_cookie * cookie ;
382
-
383
- cookie = kzalloc (sizeof (* cookie ), GFP_KERNEL );
384
- if (cookie ) {
385
- INIT_LIST_HEAD (& cookie -> msi_page_list );
386
- cookie -> type = type ;
387
- }
388
- return cookie ;
389
- }
390
-
391
363
/**
392
364
* iommu_get_dma_cookie - Acquire DMA-API resources for a domain
393
365
* @domain: IOMMU domain to prepare for DMA-API usage
394
366
*/
395
367
int iommu_get_dma_cookie (struct iommu_domain * domain )
396
368
{
397
- if (domain -> iova_cookie )
369
+ struct iommu_dma_cookie * cookie ;
370
+
371
+ if (domain -> cookie_type != IOMMU_COOKIE_NONE )
398
372
return - EEXIST ;
399
373
400
- domain -> iova_cookie = cookie_alloc ( IOMMU_DMA_IOVA_COOKIE );
401
- if (!domain -> iova_cookie )
374
+ cookie = kzalloc ( sizeof ( * cookie ), GFP_KERNEL );
375
+ if (!cookie )
402
376
return - ENOMEM ;
403
377
404
- mutex_init (& domain -> iova_cookie -> mutex );
378
+ mutex_init (& cookie -> mutex );
379
+ INIT_LIST_HEAD (& cookie -> msi_page_list );
405
380
iommu_domain_set_sw_msi (domain , iommu_dma_sw_msi );
381
+ domain -> cookie_type = IOMMU_COOKIE_DMA_IOVA ;
382
+ domain -> iova_cookie = cookie ;
406
383
return 0 ;
407
384
}
408
385
@@ -420,29 +397,30 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
420
397
*/
421
398
int iommu_get_msi_cookie (struct iommu_domain * domain , dma_addr_t base )
422
399
{
423
- struct iommu_dma_cookie * cookie ;
400
+ struct iommu_dma_msi_cookie * cookie ;
424
401
425
402
if (domain -> type != IOMMU_DOMAIN_UNMANAGED )
426
403
return - EINVAL ;
427
404
428
- if (domain -> iova_cookie )
405
+ if (domain -> cookie_type != IOMMU_COOKIE_NONE )
429
406
return - EEXIST ;
430
407
431
- cookie = cookie_alloc ( IOMMU_DMA_MSI_COOKIE );
408
+ cookie = kzalloc ( sizeof ( * cookie ), GFP_KERNEL );
432
409
if (!cookie )
433
410
return - ENOMEM ;
434
411
435
412
cookie -> msi_iova = base ;
436
- domain -> iova_cookie = cookie ;
413
+ INIT_LIST_HEAD ( & cookie -> msi_page_list ) ;
437
414
iommu_domain_set_sw_msi (domain , iommu_dma_sw_msi );
415
+ domain -> cookie_type = IOMMU_COOKIE_DMA_MSI ;
416
+ domain -> msi_cookie = cookie ;
438
417
return 0 ;
439
418
}
440
419
EXPORT_SYMBOL (iommu_get_msi_cookie );
441
420
442
421
/**
443
422
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
444
- * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
445
- * iommu_get_msi_cookie()
423
+ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
446
424
*/
447
425
void iommu_put_dma_cookie (struct iommu_domain * domain )
448
426
{
@@ -454,20 +432,27 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
454
432
return ;
455
433
#endif
456
434
457
- if (!cookie )
458
- return ;
459
-
460
- if (cookie -> type == IOMMU_DMA_IOVA_COOKIE && cookie -> iovad .granule ) {
435
+ if (cookie -> iovad .granule ) {
461
436
iommu_dma_free_fq (cookie );
462
437
put_iova_domain (& cookie -> iovad );
463
438
}
439
+ list_for_each_entry_safe (msi , tmp , & cookie -> msi_page_list , list )
440
+ kfree (msi );
441
+ kfree (cookie );
442
+ }
464
443
465
- list_for_each_entry_safe (msi , tmp , & cookie -> msi_page_list , list ) {
466
- list_del (& msi -> list );
444
+ /**
445
+ * iommu_put_msi_cookie - Release a domain's MSI mapping resources
446
+ * @domain: IOMMU domain previously prepared by iommu_get_msi_cookie()
447
+ */
448
+ void iommu_put_msi_cookie (struct iommu_domain * domain )
449
+ {
450
+ struct iommu_dma_msi_cookie * cookie = domain -> msi_cookie ;
451
+ struct iommu_dma_msi_page * msi , * tmp ;
452
+
453
+ list_for_each_entry_safe (msi , tmp , & cookie -> msi_page_list , list )
467
454
kfree (msi );
468
- }
469
455
kfree (cookie );
470
- domain -> iova_cookie = NULL ;
471
456
}
472
457
473
458
/**
@@ -687,7 +672,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
687
672
struct iova_domain * iovad ;
688
673
int ret ;
689
674
690
- if (!cookie || cookie -> type != IOMMU_DMA_IOVA_COOKIE )
675
+ if (!cookie || domain -> cookie_type != IOMMU_COOKIE_DMA_IOVA )
691
676
return - EINVAL ;
692
677
693
678
iovad = & cookie -> iovad ;
@@ -777,9 +762,9 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
777
762
struct iova_domain * iovad = & cookie -> iovad ;
778
763
unsigned long shift , iova_len , iova ;
779
764
780
- if (cookie -> type == IOMMU_DMA_MSI_COOKIE ) {
781
- cookie -> msi_iova += size ;
782
- return cookie -> msi_iova - size ;
765
+ if (domain -> cookie_type == IOMMU_COOKIE_DMA_MSI ) {
766
+ domain -> msi_cookie -> msi_iova += size ;
767
+ return domain -> msi_cookie -> msi_iova - size ;
783
768
}
784
769
785
770
shift = iova_shift (iovad );
@@ -816,16 +801,16 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
816
801
return (dma_addr_t )iova << shift ;
817
802
}
818
803
819
- static void iommu_dma_free_iova (struct iommu_dma_cookie * cookie ,
820
- dma_addr_t iova , size_t size , struct iommu_iotlb_gather * gather )
804
+ static void iommu_dma_free_iova (struct iommu_domain * domain , dma_addr_t iova ,
805
+ size_t size , struct iommu_iotlb_gather * gather )
821
806
{
822
- struct iova_domain * iovad = & cookie -> iovad ;
807
+ struct iova_domain * iovad = & domain -> iova_cookie -> iovad ;
823
808
824
809
/* The MSI case is only ever cleaning up its most recent allocation */
825
- if (cookie -> type == IOMMU_DMA_MSI_COOKIE )
826
- cookie -> msi_iova -= size ;
810
+ if (domain -> cookie_type == IOMMU_COOKIE_DMA_MSI )
811
+ domain -> msi_cookie -> msi_iova -= size ;
827
812
else if (gather && gather -> queued )
828
- queue_iova (cookie , iova_pfn (iovad , iova ),
813
+ queue_iova (domain -> iova_cookie , iova_pfn (iovad , iova ),
829
814
size >> iova_shift (iovad ),
830
815
& gather -> freelist );
831
816
else
@@ -853,7 +838,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
853
838
854
839
if (!iotlb_gather .queued )
855
840
iommu_iotlb_sync (domain , & iotlb_gather );
856
- iommu_dma_free_iova (cookie , dma_addr , size , & iotlb_gather );
841
+ iommu_dma_free_iova (domain , dma_addr , size , & iotlb_gather );
857
842
}
858
843
859
844
static dma_addr_t __iommu_dma_map (struct device * dev , phys_addr_t phys ,
@@ -881,7 +866,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
881
866
return DMA_MAPPING_ERROR ;
882
867
883
868
if (iommu_map (domain , iova , phys - iova_off , size , prot , GFP_ATOMIC )) {
884
- iommu_dma_free_iova (cookie , iova , size , NULL );
869
+ iommu_dma_free_iova (domain , iova , size , NULL );
885
870
return DMA_MAPPING_ERROR ;
886
871
}
887
872
return iova + iova_off ;
@@ -1018,7 +1003,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
1018
1003
out_free_sg :
1019
1004
sg_free_table (sgt );
1020
1005
out_free_iova :
1021
- iommu_dma_free_iova (cookie , iova , size , NULL );
1006
+ iommu_dma_free_iova (domain , iova , size , NULL );
1022
1007
out_free_pages :
1023
1008
__iommu_dma_free_pages (pages , count );
1024
1009
return NULL ;
@@ -1495,7 +1480,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1495
1480
return __finalise_sg (dev , sg , nents , iova );
1496
1481
1497
1482
out_free_iova :
1498
- iommu_dma_free_iova (cookie , iova , iova_len , NULL );
1483
+ iommu_dma_free_iova (domain , iova , iova_len , NULL );
1499
1484
out_restore_sg :
1500
1485
__invalidate_sg (sg , nents );
1501
1486
out :
@@ -1773,17 +1758,47 @@ void iommu_setup_dma_ops(struct device *dev)
1773
1758
dev -> dma_iommu = false;
1774
1759
}
1775
1760
1761
+ static bool has_msi_cookie (const struct iommu_domain * domain )
1762
+ {
1763
+ return domain && (domain -> cookie_type == IOMMU_COOKIE_DMA_IOVA ||
1764
+ domain -> cookie_type == IOMMU_COOKIE_DMA_MSI );
1765
+ }
1766
+
1767
+ static size_t cookie_msi_granule (const struct iommu_domain * domain )
1768
+ {
1769
+ switch (domain -> cookie_type ) {
1770
+ case IOMMU_COOKIE_DMA_IOVA :
1771
+ return domain -> iova_cookie -> iovad .granule ;
1772
+ case IOMMU_COOKIE_DMA_MSI :
1773
+ return PAGE_SIZE ;
1774
+ default :
1775
+ unreachable ();
1776
+ };
1777
+ }
1778
+
1779
+ static struct list_head * cookie_msi_pages (const struct iommu_domain * domain )
1780
+ {
1781
+ switch (domain -> cookie_type ) {
1782
+ case IOMMU_COOKIE_DMA_IOVA :
1783
+ return & domain -> iova_cookie -> msi_page_list ;
1784
+ case IOMMU_COOKIE_DMA_MSI :
1785
+ return & domain -> msi_cookie -> msi_page_list ;
1786
+ default :
1787
+ unreachable ();
1788
+ };
1789
+ }
1790
+
1776
1791
static struct iommu_dma_msi_page * iommu_dma_get_msi_page (struct device * dev ,
1777
1792
phys_addr_t msi_addr , struct iommu_domain * domain )
1778
1793
{
1779
- struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
1794
+ struct list_head * msi_page_list = cookie_msi_pages ( domain ) ;
1780
1795
struct iommu_dma_msi_page * msi_page ;
1781
1796
dma_addr_t iova ;
1782
1797
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO ;
1783
- size_t size = cookie_msi_granule (cookie );
1798
+ size_t size = cookie_msi_granule (domain );
1784
1799
1785
1800
msi_addr &= ~(phys_addr_t )(size - 1 );
1786
- list_for_each_entry (msi_page , & cookie -> msi_page_list , list )
1801
+ list_for_each_entry (msi_page , msi_page_list , list )
1787
1802
if (msi_page -> phys == msi_addr )
1788
1803
return msi_page ;
1789
1804
@@ -1801,11 +1816,11 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1801
1816
INIT_LIST_HEAD (& msi_page -> list );
1802
1817
msi_page -> phys = msi_addr ;
1803
1818
msi_page -> iova = iova ;
1804
- list_add (& msi_page -> list , & cookie -> msi_page_list );
1819
+ list_add (& msi_page -> list , msi_page_list );
1805
1820
return msi_page ;
1806
1821
1807
1822
out_free_iova :
1808
- iommu_dma_free_iova (cookie , iova , size , NULL );
1823
+ iommu_dma_free_iova (domain , iova , size , NULL );
1809
1824
out_free_page :
1810
1825
kfree (msi_page );
1811
1826
return NULL ;
@@ -1817,7 +1832,7 @@ static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
1817
1832
struct device * dev = msi_desc_to_dev (desc );
1818
1833
const struct iommu_dma_msi_page * msi_page ;
1819
1834
1820
- if (!domain -> iova_cookie ) {
1835
+ if (!has_msi_cookie ( domain ) ) {
1821
1836
msi_desc_set_iommu_msi_iova (desc , 0 , 0 );
1822
1837
return 0 ;
1823
1838
}
@@ -1827,9 +1842,8 @@ static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
1827
1842
if (!msi_page )
1828
1843
return - ENOMEM ;
1829
1844
1830
- msi_desc_set_iommu_msi_iova (
1831
- desc , msi_page -> iova ,
1832
- ilog2 (cookie_msi_granule (domain -> iova_cookie )));
1845
+ msi_desc_set_iommu_msi_iova (desc , msi_page -> iova ,
1846
+ ilog2 (cookie_msi_granule (domain )));
1833
1847
return 0 ;
1834
1848
}
1835
1849
0 commit comments