@@ -71,7 +71,6 @@ static unsigned int srp_sg_tablesize;
71
71
static unsigned int cmd_sg_entries ;
72
72
static unsigned int indirect_sg_entries ;
73
73
static bool allow_ext_sg ;
74
- static bool prefer_fr = true;
75
74
static bool register_always = true;
76
75
static bool never_register ;
77
76
static int topspin_workarounds = 1 ;
@@ -95,10 +94,6 @@ module_param(topspin_workarounds, int, 0444);
95
94
MODULE_PARM_DESC (topspin_workarounds ,
96
95
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0" );
97
96
98
- module_param (prefer_fr , bool , 0444 );
99
- MODULE_PARM_DESC (prefer_fr ,
100
- "Whether to use fast registration if both FMR and fast registration are supported" );
101
-
102
97
module_param (register_always , bool , 0444 );
103
98
MODULE_PARM_DESC (register_always ,
104
99
"Use memory registration even for contiguous memory regions" );
@@ -388,24 +383,6 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
388
383
srp_new_ib_cm_id (ch );
389
384
}
390
385
391
- static struct ib_fmr_pool * srp_alloc_fmr_pool (struct srp_target_port * target )
392
- {
393
- struct srp_device * dev = target -> srp_host -> srp_dev ;
394
- struct ib_fmr_pool_param fmr_param ;
395
-
396
- memset (& fmr_param , 0 , sizeof (fmr_param ));
397
- fmr_param .pool_size = target -> mr_pool_size ;
398
- fmr_param .dirty_watermark = fmr_param .pool_size / 4 ;
399
- fmr_param .cache = 1 ;
400
- fmr_param .max_pages_per_fmr = dev -> max_pages_per_mr ;
401
- fmr_param .page_shift = ilog2 (dev -> mr_page_size );
402
- fmr_param .access = (IB_ACCESS_LOCAL_WRITE |
403
- IB_ACCESS_REMOTE_WRITE |
404
- IB_ACCESS_REMOTE_READ );
405
-
406
- return ib_create_fmr_pool (dev -> pd , & fmr_param );
407
- }
408
-
409
386
/**
410
387
* srp_destroy_fr_pool() - free the resources owned by a pool
411
388
* @pool: Fast registration pool to be destroyed.
@@ -556,7 +533,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
556
533
struct ib_qp_init_attr * init_attr ;
557
534
struct ib_cq * recv_cq , * send_cq ;
558
535
struct ib_qp * qp ;
559
- struct ib_fmr_pool * fmr_pool = NULL ;
560
536
struct srp_fr_pool * fr_pool = NULL ;
561
537
const int m = 1 + dev -> use_fast_reg * target -> mr_per_cmd * 2 ;
562
538
int ret ;
@@ -619,14 +595,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
619
595
"FR pool allocation failed (%d)\n" , ret );
620
596
goto err_qp ;
621
597
}
622
- } else if (dev -> use_fmr ) {
623
- fmr_pool = srp_alloc_fmr_pool (target );
624
- if (IS_ERR (fmr_pool )) {
625
- ret = PTR_ERR (fmr_pool );
626
- shost_printk (KERN_WARNING , target -> scsi_host , PFX
627
- "FMR pool allocation failed (%d)\n" , ret );
628
- goto err_qp ;
629
- }
630
598
}
631
599
632
600
if (ch -> qp )
@@ -644,10 +612,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
644
612
if (ch -> fr_pool )
645
613
srp_destroy_fr_pool (ch -> fr_pool );
646
614
ch -> fr_pool = fr_pool ;
647
- } else if (dev -> use_fmr ) {
648
- if (ch -> fmr_pool )
649
- ib_destroy_fmr_pool (ch -> fmr_pool );
650
- ch -> fmr_pool = fmr_pool ;
651
615
}
652
616
653
617
kfree (init_attr );
@@ -702,9 +666,6 @@ static void srp_free_ch_ib(struct srp_target_port *target,
702
666
if (dev -> use_fast_reg ) {
703
667
if (ch -> fr_pool )
704
668
srp_destroy_fr_pool (ch -> fr_pool );
705
- } else if (dev -> use_fmr ) {
706
- if (ch -> fmr_pool )
707
- ib_destroy_fmr_pool (ch -> fmr_pool );
708
669
}
709
670
710
671
srp_destroy_qp (ch );
@@ -1017,12 +978,8 @@ static void srp_free_req_data(struct srp_target_port *target,
1017
978
1018
979
for (i = 0 ; i < target -> req_ring_size ; ++ i ) {
1019
980
req = & ch -> req_ring [i ];
1020
- if (dev -> use_fast_reg ) {
981
+ if (dev -> use_fast_reg )
1021
982
kfree (req -> fr_list );
1022
- } else {
1023
- kfree (req -> fmr_list );
1024
- kfree (req -> map_page );
1025
- }
1026
983
if (req -> indirect_dma_addr ) {
1027
984
ib_dma_unmap_single (ibdev , req -> indirect_dma_addr ,
1028
985
target -> indirect_size ,
@@ -1056,16 +1013,8 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
1056
1013
GFP_KERNEL );
1057
1014
if (!mr_list )
1058
1015
goto out ;
1059
- if (srp_dev -> use_fast_reg ) {
1016
+ if (srp_dev -> use_fast_reg )
1060
1017
req -> fr_list = mr_list ;
1061
- } else {
1062
- req -> fmr_list = mr_list ;
1063
- req -> map_page = kmalloc_array (srp_dev -> max_pages_per_mr ,
1064
- sizeof (void * ),
1065
- GFP_KERNEL );
1066
- if (!req -> map_page )
1067
- goto out ;
1068
- }
1069
1018
req -> indirect_desc = kmalloc (target -> indirect_size , GFP_KERNEL );
1070
1019
if (!req -> indirect_desc )
1071
1020
goto out ;
@@ -1272,11 +1221,6 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
1272
1221
if (req -> nmdesc )
1273
1222
srp_fr_pool_put (ch -> fr_pool , req -> fr_list ,
1274
1223
req -> nmdesc );
1275
- } else if (dev -> use_fmr ) {
1276
- struct ib_pool_fmr * * pfmr ;
1277
-
1278
- for (i = req -> nmdesc , pfmr = req -> fmr_list ; i > 0 ; i -- , pfmr ++ )
1279
- ib_fmr_pool_unmap (* pfmr );
1280
1224
}
1281
1225
1282
1226
ib_dma_unmap_sg (ibdev , scsi_sglist (scmnd ), scsi_sg_count (scmnd ),
@@ -1472,50 +1416,6 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1472
1416
state -> ndesc ++ ;
1473
1417
}
1474
1418
1475
- static int srp_map_finish_fmr (struct srp_map_state * state ,
1476
- struct srp_rdma_ch * ch )
1477
- {
1478
- struct srp_target_port * target = ch -> target ;
1479
- struct srp_device * dev = target -> srp_host -> srp_dev ;
1480
- struct ib_pool_fmr * fmr ;
1481
- u64 io_addr = 0 ;
1482
-
1483
- if (state -> fmr .next >= state -> fmr .end ) {
1484
- shost_printk (KERN_ERR , ch -> target -> scsi_host ,
1485
- PFX "Out of MRs (mr_per_cmd = %d)\n" ,
1486
- ch -> target -> mr_per_cmd );
1487
- return - ENOMEM ;
1488
- }
1489
-
1490
- WARN_ON_ONCE (!dev -> use_fmr );
1491
-
1492
- if (state -> npages == 0 )
1493
- return 0 ;
1494
-
1495
- if (state -> npages == 1 && target -> global_rkey ) {
1496
- srp_map_desc (state , state -> base_dma_addr , state -> dma_len ,
1497
- target -> global_rkey );
1498
- goto reset_state ;
1499
- }
1500
-
1501
- fmr = ib_fmr_pool_map_phys (ch -> fmr_pool , state -> pages ,
1502
- state -> npages , io_addr );
1503
- if (IS_ERR (fmr ))
1504
- return PTR_ERR (fmr );
1505
-
1506
- * state -> fmr .next ++ = fmr ;
1507
- state -> nmdesc ++ ;
1508
-
1509
- srp_map_desc (state , state -> base_dma_addr & ~dev -> mr_page_mask ,
1510
- state -> dma_len , fmr -> fmr -> rkey );
1511
-
1512
- reset_state :
1513
- state -> npages = 0 ;
1514
- state -> dma_len = 0 ;
1515
-
1516
- return 0 ;
1517
- }
1518
-
1519
1419
static void srp_reg_mr_err_done (struct ib_cq * cq , struct ib_wc * wc )
1520
1420
{
1521
1421
srp_handle_qp_err (cq , wc , "FAST REG" );
@@ -1606,74 +1506,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1606
1506
return n ;
1607
1507
}
1608
1508
1609
- static int srp_map_sg_entry (struct srp_map_state * state ,
1610
- struct srp_rdma_ch * ch ,
1611
- struct scatterlist * sg )
1612
- {
1613
- struct srp_target_port * target = ch -> target ;
1614
- struct srp_device * dev = target -> srp_host -> srp_dev ;
1615
- dma_addr_t dma_addr = sg_dma_address (sg );
1616
- unsigned int dma_len = sg_dma_len (sg );
1617
- unsigned int len = 0 ;
1618
- int ret ;
1619
-
1620
- WARN_ON_ONCE (!dma_len );
1621
-
1622
- while (dma_len ) {
1623
- unsigned offset = dma_addr & ~dev -> mr_page_mask ;
1624
-
1625
- if (state -> npages == dev -> max_pages_per_mr ||
1626
- (state -> npages > 0 && offset != 0 )) {
1627
- ret = srp_map_finish_fmr (state , ch );
1628
- if (ret )
1629
- return ret ;
1630
- }
1631
-
1632
- len = min_t (unsigned int , dma_len , dev -> mr_page_size - offset );
1633
-
1634
- if (!state -> npages )
1635
- state -> base_dma_addr = dma_addr ;
1636
- state -> pages [state -> npages ++ ] = dma_addr & dev -> mr_page_mask ;
1637
- state -> dma_len += len ;
1638
- dma_addr += len ;
1639
- dma_len -= len ;
1640
- }
1641
-
1642
- /*
1643
- * If the end of the MR is not on a page boundary then we need to
1644
- * close it out and start a new one -- we can only merge at page
1645
- * boundaries.
1646
- */
1647
- ret = 0 ;
1648
- if ((dma_addr & ~dev -> mr_page_mask ) != 0 )
1649
- ret = srp_map_finish_fmr (state , ch );
1650
- return ret ;
1651
- }
1652
-
1653
- static int srp_map_sg_fmr (struct srp_map_state * state , struct srp_rdma_ch * ch ,
1654
- struct srp_request * req , struct scatterlist * scat ,
1655
- int count )
1656
- {
1657
- struct scatterlist * sg ;
1658
- int i , ret ;
1659
-
1660
- state -> pages = req -> map_page ;
1661
- state -> fmr .next = req -> fmr_list ;
1662
- state -> fmr .end = req -> fmr_list + ch -> target -> mr_per_cmd ;
1663
-
1664
- for_each_sg (scat , sg , count , i ) {
1665
- ret = srp_map_sg_entry (state , ch , sg );
1666
- if (ret )
1667
- return ret ;
1668
- }
1669
-
1670
- ret = srp_map_finish_fmr (state , ch );
1671
- if (ret )
1672
- return ret ;
1673
-
1674
- return 0 ;
1675
- }
1676
-
1677
1509
static int srp_map_sg_fr (struct srp_map_state * state , struct srp_rdma_ch * ch ,
1678
1510
struct srp_request * req , struct scatterlist * scat ,
1679
1511
int count )
@@ -1733,7 +1565,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1733
1565
struct srp_device * dev = target -> srp_host -> srp_dev ;
1734
1566
struct srp_map_state state ;
1735
1567
struct srp_direct_buf idb_desc ;
1736
- u64 idb_pages [1 ];
1737
1568
struct scatterlist idb_sg [1 ];
1738
1569
int ret ;
1739
1570
@@ -1756,14 +1587,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1756
1587
if (ret < 0 )
1757
1588
return ret ;
1758
1589
WARN_ON_ONCE (ret < 1 );
1759
- } else if (dev -> use_fmr ) {
1760
- state .pages = idb_pages ;
1761
- state .pages [0 ] = (req -> indirect_dma_addr &
1762
- dev -> mr_page_mask );
1763
- state .npages = 1 ;
1764
- ret = srp_map_finish_fmr (& state , ch );
1765
- if (ret < 0 )
1766
- return ret ;
1767
1590
} else {
1768
1591
return - EINVAL ;
1769
1592
}
@@ -1787,9 +1610,6 @@ static void srp_check_mapping(struct srp_map_state *state,
1787
1610
if (dev -> use_fast_reg )
1788
1611
for (i = 0 , pfr = req -> fr_list ; i < state -> nmdesc ; i ++ , pfr ++ )
1789
1612
mr_len += (* pfr )-> mr -> length ;
1790
- else if (dev -> use_fmr )
1791
- for (i = 0 ; i < state -> nmdesc ; i ++ )
1792
- mr_len += be32_to_cpu (req -> indirect_desc [i ].len );
1793
1613
if (desc_len != scsi_bufflen (req -> scmnd ) ||
1794
1614
mr_len > scsi_bufflen (req -> scmnd ))
1795
1615
pr_err ("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n" ,
@@ -1904,8 +1724,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1904
1724
state .desc = req -> indirect_desc ;
1905
1725
if (dev -> use_fast_reg )
1906
1726
ret = srp_map_sg_fr (& state , ch , req , scat , count );
1907
- else if (dev -> use_fmr )
1908
- ret = srp_map_sg_fmr (& state , ch , req , scat , count );
1909
1727
else
1910
1728
ret = srp_map_sg_dma (& state , ch , req , scat , count );
1911
1729
req -> nmdesc = state .nmdesc ;
@@ -3874,26 +3692,26 @@ static ssize_t srp_create_target(struct device *dev,
3874
3692
goto out ;
3875
3693
}
3876
3694
3877
- if (!srp_dev -> has_fmr && ! srp_dev -> has_fr && !target -> allow_ext_sg &&
3695
+ if (!srp_dev -> has_fr && !target -> allow_ext_sg &&
3878
3696
target -> cmd_sg_cnt < target -> sg_tablesize ) {
3879
3697
pr_warn ("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n" );
3880
3698
target -> sg_tablesize = target -> cmd_sg_cnt ;
3881
3699
}
3882
3700
3883
- if (srp_dev -> use_fast_reg || srp_dev -> use_fmr ) {
3701
+ if (srp_dev -> use_fast_reg ) {
3884
3702
bool gaps_reg = (ibdev -> attrs .device_cap_flags &
3885
3703
IB_DEVICE_SG_GAPS_REG );
3886
3704
3887
3705
max_sectors_per_mr = srp_dev -> max_pages_per_mr <<
3888
3706
(ilog2 (srp_dev -> mr_page_size ) - 9 );
3889
3707
if (!gaps_reg ) {
3890
3708
/*
3891
- * FR and FMR can only map one HCA page per entry. If
3892
- * the start address is not aligned on a HCA page
3893
- * boundary two entries will be used for the head and
3894
- * the tail although these two entries combined
3895
- * contain at most one HCA page of data. Hence the "+
3896
- * 1" in the calculation below.
3709
+ * FR can only map one HCA page per entry. If the start
3710
+ * address is not aligned on a HCA page boundary two
3711
+ * entries will be used for the head and the tail
3712
+ * although these two entries combined contain at most
3713
+ * one HCA page of data. Hence the "+ 1" in the
3714
+ * calculation below.
3897
3715
*
3898
3716
* The indirect data buffer descriptor is contiguous
3899
3717
* so the memory for that buffer will only be
@@ -4174,23 +3992,15 @@ static int srp_add_one(struct ib_device *device)
4174
3992
srp_dev -> max_pages_per_mr = min_t (u64 , SRP_MAX_PAGES_PER_MR ,
4175
3993
max_pages_per_mr );
4176
3994
4177
- srp_dev -> has_fmr = (device -> ops .alloc_fmr &&
4178
- device -> ops .dealloc_fmr &&
4179
- device -> ops .map_phys_fmr &&
4180
- device -> ops .unmap_fmr );
4181
3995
srp_dev -> has_fr = (attr -> device_cap_flags &
4182
3996
IB_DEVICE_MEM_MGT_EXTENSIONS );
4183
- if (!never_register && !srp_dev -> has_fmr && !srp_dev -> has_fr ) {
4184
- dev_warn (& device -> dev , "neither FMR nor FR is supported\n" );
4185
- } else if (!never_register &&
4186
- attr -> max_mr_size >= 2 * srp_dev -> mr_page_size ) {
4187
- srp_dev -> use_fast_reg = (srp_dev -> has_fr &&
4188
- (!srp_dev -> has_fmr || prefer_fr ));
4189
- srp_dev -> use_fmr = !srp_dev -> use_fast_reg && srp_dev -> has_fmr ;
4190
- }
3997
+ if (!never_register && !srp_dev -> has_fr )
3998
+ dev_warn (& device -> dev , "FR is not supported\n" );
3999
+ else if (!never_register &&
4000
+ attr -> max_mr_size >= 2 * srp_dev -> mr_page_size )
4001
+ srp_dev -> use_fast_reg = srp_dev -> has_fr ;
4191
4002
4192
- if (never_register || !register_always ||
4193
- (!srp_dev -> has_fmr && !srp_dev -> has_fr ))
4003
+ if (never_register || !register_always || !srp_dev -> has_fr )
4194
4004
flags |= IB_PD_UNSAFE_GLOBAL_RKEY ;
4195
4005
4196
4006
if (srp_dev -> use_fast_reg ) {
0 commit comments