@@ -1539,13 +1539,19 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
1539
1539
* Encrypt / decrypt data from one bio to another one (can be the same one)
1540
1540
*/
1541
1541
static blk_status_t crypt_convert (struct crypt_config * cc ,
1542
- struct convert_context * ctx , bool atomic )
1542
+ struct convert_context * ctx , bool atomic , bool reset_pending )
1543
1543
{
1544
1544
unsigned int tag_offset = 0 ;
1545
1545
unsigned int sector_step = cc -> sector_size >> SECTOR_SHIFT ;
1546
1546
int r ;
1547
1547
1548
- atomic_set (& ctx -> cc_pending , 1 );
1548
+ /*
1549
+ * if reset_pending is set we are dealing with the bio for the first time,
1550
+ * else we're continuing to work on the previous bio, so don't mess with
1551
+ * the cc_pending counter
1552
+ */
1553
+ if (reset_pending )
1554
+ atomic_set (& ctx -> cc_pending , 1 );
1549
1555
1550
1556
while (ctx -> iter_in .bi_size && ctx -> iter_out .bi_size ) {
1551
1557
@@ -1568,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
1568
1574
* but the driver request queue is full, let's wait.
1569
1575
*/
1570
1576
case - EBUSY :
1571
- wait_for_completion (& ctx -> restart );
1577
+ if (in_interrupt ()) {
1578
+ if (try_wait_for_completion (& ctx -> restart )) {
1579
+ /*
1580
+ * we don't have to block to wait for completion,
1581
+ * so proceed
1582
+ */
1583
+ } else {
1584
+ /*
1585
+ * we can't wait for completion without blocking
1586
+ * exit and continue processing in a workqueue
1587
+ */
1588
+ ctx -> r .req = NULL ;
1589
+ ctx -> cc_sector += sector_step ;
1590
+ tag_offset ++ ;
1591
+ return BLK_STS_DEV_RESOURCE ;
1592
+ }
1593
+ } else {
1594
+ wait_for_completion (& ctx -> restart );
1595
+ }
1572
1596
reinit_completion (& ctx -> restart );
1573
1597
fallthrough ;
1574
1598
/*
@@ -1960,6 +1984,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1960
1984
}
1961
1985
}
1962
1986
1987
+ static void kcryptd_crypt_write_continue (struct work_struct * work )
1988
+ {
1989
+ struct dm_crypt_io * io = container_of (work , struct dm_crypt_io , work );
1990
+ struct crypt_config * cc = io -> cc ;
1991
+ struct convert_context * ctx = & io -> ctx ;
1992
+ int crypt_finished ;
1993
+ sector_t sector = io -> sector ;
1994
+ blk_status_t r ;
1995
+
1996
+ wait_for_completion (& ctx -> restart );
1997
+ reinit_completion (& ctx -> restart );
1998
+
1999
+ r = crypt_convert (cc , & io -> ctx , true, false);
2000
+ if (r )
2001
+ io -> error = r ;
2002
+ crypt_finished = atomic_dec_and_test (& ctx -> cc_pending );
2003
+ if (!crypt_finished && kcryptd_crypt_write_inline (cc , ctx )) {
2004
+ /* Wait for completion signaled by kcryptd_async_done() */
2005
+ wait_for_completion (& ctx -> restart );
2006
+ crypt_finished = 1 ;
2007
+ }
2008
+
2009
+ /* Encryption was already finished, submit io now */
2010
+ if (crypt_finished ) {
2011
+ kcryptd_crypt_write_io_submit (io , 0 );
2012
+ io -> sector = sector ;
2013
+ }
2014
+
2015
+ crypt_dec_pending (io );
2016
+ }
2017
+
1963
2018
static void kcryptd_crypt_write_convert (struct dm_crypt_io * io )
1964
2019
{
1965
2020
struct crypt_config * cc = io -> cc ;
@@ -1988,7 +2043,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1988
2043
1989
2044
crypt_inc_pending (io );
1990
2045
r = crypt_convert (cc , ctx ,
1991
- test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ));
2046
+ test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ), true);
2047
+ /*
2048
+ * Crypto API backlogged the request, because its queue was full
2049
+ * and we're in softirq context, so continue from a workqueue
2050
+ * (TODO: is it actually possible to be in softirq in the write path?)
2051
+ */
2052
+ if (r == BLK_STS_DEV_RESOURCE ) {
2053
+ INIT_WORK (& io -> work , kcryptd_crypt_write_continue );
2054
+ queue_work (cc -> crypt_queue , & io -> work );
2055
+ return ;
2056
+ }
1992
2057
if (r )
1993
2058
io -> error = r ;
1994
2059
crypt_finished = atomic_dec_and_test (& ctx -> cc_pending );
@@ -2013,6 +2078,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
2013
2078
crypt_dec_pending (io );
2014
2079
}
2015
2080
2081
+ static void kcryptd_crypt_read_continue (struct work_struct * work )
2082
+ {
2083
+ struct dm_crypt_io * io = container_of (work , struct dm_crypt_io , work );
2084
+ struct crypt_config * cc = io -> cc ;
2085
+ blk_status_t r ;
2086
+
2087
+ wait_for_completion (& io -> ctx .restart );
2088
+ reinit_completion (& io -> ctx .restart );
2089
+
2090
+ r = crypt_convert (cc , & io -> ctx , true, false);
2091
+ if (r )
2092
+ io -> error = r ;
2093
+
2094
+ if (atomic_dec_and_test (& io -> ctx .cc_pending ))
2095
+ kcryptd_crypt_read_done (io );
2096
+
2097
+ crypt_dec_pending (io );
2098
+ }
2099
+
2016
2100
static void kcryptd_crypt_read_convert (struct dm_crypt_io * io )
2017
2101
{
2018
2102
struct crypt_config * cc = io -> cc ;
@@ -2024,7 +2108,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
2024
2108
io -> sector );
2025
2109
2026
2110
r = crypt_convert (cc , & io -> ctx ,
2027
- test_bit (DM_CRYPT_NO_READ_WORKQUEUE , & cc -> flags ));
2111
+ test_bit (DM_CRYPT_NO_READ_WORKQUEUE , & cc -> flags ), true);
2112
+ /*
2113
+ * Crypto API backlogged the request, because its queue was full
2114
+ * and we're in softirq context, so continue from a workqueue
2115
+ */
2116
+ if (r == BLK_STS_DEV_RESOURCE ) {
2117
+ INIT_WORK (& io -> work , kcryptd_crypt_read_continue );
2118
+ queue_work (cc -> crypt_queue , & io -> work );
2119
+ return ;
2120
+ }
2028
2121
if (r )
2029
2122
io -> error = r ;
2030
2123
0 commit comments