@@ -342,6 +342,12 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
342
342
lim .max_hw_discard_sectors = UINT_MAX ;
343
343
else
344
344
lim .max_hw_discard_sectors = 0 ;
345
+ if (!(nbd -> config -> flags & NBD_FLAG_SEND_FLUSH ))
346
+ blk_queue_write_cache (nbd -> disk -> queue , false, false);
347
+ else if (nbd -> config -> flags & NBD_FLAG_SEND_FUA )
348
+ blk_queue_write_cache (nbd -> disk -> queue , true, true);
349
+ else
350
+ blk_queue_write_cache (nbd -> disk -> queue , true, false);
345
351
lim .logical_block_size = blksize ;
346
352
lim .physical_block_size = blksize ;
347
353
error = queue_limits_commit_update (nbd -> disk -> queue , & lim );
@@ -1286,19 +1292,10 @@ static void nbd_bdev_reset(struct nbd_device *nbd)
1286
1292
1287
1293
static void nbd_parse_flags (struct nbd_device * nbd )
1288
1294
{
1289
- struct nbd_config * config = nbd -> config ;
1290
- if (config -> flags & NBD_FLAG_READ_ONLY )
1295
+ if (nbd -> config -> flags & NBD_FLAG_READ_ONLY )
1291
1296
set_disk_ro (nbd -> disk , true);
1292
1297
else
1293
1298
set_disk_ro (nbd -> disk , false);
1294
- if (config -> flags & NBD_FLAG_SEND_FLUSH ) {
1295
- if (config -> flags & NBD_FLAG_SEND_FUA )
1296
- blk_queue_write_cache (nbd -> disk -> queue , true, true);
1297
- else
1298
- blk_queue_write_cache (nbd -> disk -> queue , true, false);
1299
- }
1300
- else
1301
- blk_queue_write_cache (nbd -> disk -> queue , false, false);
1302
1299
}
1303
1300
1304
1301
static void send_disconnects (struct nbd_device * nbd )
0 commit comments