43
43
44
44
static DEFINE_PER_CPU (struct llist_head , blk_cpu_done ) ;
45
45
static DEFINE_PER_CPU (call_single_data_t , blk_cpu_csd ) ;
46
+ static DEFINE_MUTEX (blk_mq_cpuhp_lock );
46
47
47
48
static void blk_mq_insert_request (struct request * rq , blk_insert_t flags );
48
49
static void blk_mq_request_bypass_insert (struct request * rq ,
@@ -3739,13 +3740,91 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3739
3740
return 0 ;
3740
3741
}
3741
3742
3742
- static void blk_mq_remove_cpuhp (struct blk_mq_hw_ctx * hctx )
3743
+ static void __blk_mq_remove_cpuhp (struct blk_mq_hw_ctx * hctx )
3743
3744
{
3744
- if (!(hctx -> flags & BLK_MQ_F_STACKING ))
3745
+ lockdep_assert_held (& blk_mq_cpuhp_lock );
3746
+
3747
+ if (!(hctx -> flags & BLK_MQ_F_STACKING ) &&
3748
+ !hlist_unhashed (& hctx -> cpuhp_online )) {
3745
3749
cpuhp_state_remove_instance_nocalls (CPUHP_AP_BLK_MQ_ONLINE ,
3746
3750
& hctx -> cpuhp_online );
3747
- cpuhp_state_remove_instance_nocalls (CPUHP_BLK_MQ_DEAD ,
3748
- & hctx -> cpuhp_dead );
3751
+ INIT_HLIST_NODE (& hctx -> cpuhp_online );
3752
+ }
3753
+
3754
+ if (!hlist_unhashed (& hctx -> cpuhp_dead )) {
3755
+ cpuhp_state_remove_instance_nocalls (CPUHP_BLK_MQ_DEAD ,
3756
+ & hctx -> cpuhp_dead );
3757
+ INIT_HLIST_NODE (& hctx -> cpuhp_dead );
3758
+ }
3759
+ }
3760
+
3761
+ static void blk_mq_remove_cpuhp (struct blk_mq_hw_ctx * hctx )
3762
+ {
3763
+ mutex_lock (& blk_mq_cpuhp_lock );
3764
+ __blk_mq_remove_cpuhp (hctx );
3765
+ mutex_unlock (& blk_mq_cpuhp_lock );
3766
+ }
3767
+
3768
+ static void __blk_mq_add_cpuhp (struct blk_mq_hw_ctx * hctx )
3769
+ {
3770
+ lockdep_assert_held (& blk_mq_cpuhp_lock );
3771
+
3772
+ if (!(hctx -> flags & BLK_MQ_F_STACKING ) &&
3773
+ hlist_unhashed (& hctx -> cpuhp_online ))
3774
+ cpuhp_state_add_instance_nocalls (CPUHP_AP_BLK_MQ_ONLINE ,
3775
+ & hctx -> cpuhp_online );
3776
+
3777
+ if (hlist_unhashed (& hctx -> cpuhp_dead ))
3778
+ cpuhp_state_add_instance_nocalls (CPUHP_BLK_MQ_DEAD ,
3779
+ & hctx -> cpuhp_dead );
3780
+ }
3781
+
3782
+ static void __blk_mq_remove_cpuhp_list (struct list_head * head )
3783
+ {
3784
+ struct blk_mq_hw_ctx * hctx ;
3785
+
3786
+ lockdep_assert_held (& blk_mq_cpuhp_lock );
3787
+
3788
+ list_for_each_entry (hctx , head , hctx_list )
3789
+ __blk_mq_remove_cpuhp (hctx );
3790
+ }
3791
+
3792
+ /*
3793
+ * Unregister cpuhp callbacks from exited hw queues
3794
+ *
3795
+ * Safe to call if this `request_queue` is live
3796
+ */
3797
+ static void blk_mq_remove_hw_queues_cpuhp (struct request_queue * q )
3798
+ {
3799
+ LIST_HEAD (hctx_list );
3800
+
3801
+ spin_lock (& q -> unused_hctx_lock );
3802
+ list_splice_init (& q -> unused_hctx_list , & hctx_list );
3803
+ spin_unlock (& q -> unused_hctx_lock );
3804
+
3805
+ mutex_lock (& blk_mq_cpuhp_lock );
3806
+ __blk_mq_remove_cpuhp_list (& hctx_list );
3807
+ mutex_unlock (& blk_mq_cpuhp_lock );
3808
+
3809
+ spin_lock (& q -> unused_hctx_lock );
3810
+ list_splice (& hctx_list , & q -> unused_hctx_list );
3811
+ spin_unlock (& q -> unused_hctx_lock );
3812
+ }
3813
+
3814
+ /*
3815
+ * Register cpuhp callbacks from all hw queues
3816
+ *
3817
+ * Safe to call if this `request_queue` is live
3818
+ */
3819
+ static void blk_mq_add_hw_queues_cpuhp (struct request_queue * q )
3820
+ {
3821
+ struct blk_mq_hw_ctx * hctx ;
3822
+ unsigned long i ;
3823
+
3824
+ mutex_lock (& blk_mq_cpuhp_lock );
3825
+ queue_for_each_hw_ctx (q , hctx , i )
3826
+ __blk_mq_add_cpuhp (hctx );
3827
+ mutex_unlock (& blk_mq_cpuhp_lock );
3749
3828
}
3750
3829
3751
3830
/*
@@ -3796,8 +3875,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
3796
3875
if (set -> ops -> exit_hctx )
3797
3876
set -> ops -> exit_hctx (hctx , hctx_idx );
3798
3877
3799
- blk_mq_remove_cpuhp (hctx );
3800
-
3801
3878
xa_erase (& q -> hctx_table , hctx_idx );
3802
3879
3803
3880
spin_lock (& q -> unused_hctx_lock );
@@ -3814,6 +3891,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
3814
3891
queue_for_each_hw_ctx (q , hctx , i ) {
3815
3892
if (i == nr_queue )
3816
3893
break ;
3894
+ blk_mq_remove_cpuhp (hctx );
3817
3895
blk_mq_exit_hctx (q , set , hctx , i );
3818
3896
}
3819
3897
}
@@ -3837,11 +3915,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
3837
3915
if (xa_insert (& q -> hctx_table , hctx_idx , hctx , GFP_KERNEL ))
3838
3916
goto exit_flush_rq ;
3839
3917
3840
- if (!(hctx -> flags & BLK_MQ_F_STACKING ))
3841
- cpuhp_state_add_instance_nocalls (CPUHP_AP_BLK_MQ_ONLINE ,
3842
- & hctx -> cpuhp_online );
3843
- cpuhp_state_add_instance_nocalls (CPUHP_BLK_MQ_DEAD , & hctx -> cpuhp_dead );
3844
-
3845
3918
return 0 ;
3846
3919
3847
3920
exit_flush_rq :
@@ -3876,6 +3949,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3876
3949
INIT_DELAYED_WORK (& hctx -> run_work , blk_mq_run_work_fn );
3877
3950
spin_lock_init (& hctx -> lock );
3878
3951
INIT_LIST_HEAD (& hctx -> dispatch );
3952
+ INIT_HLIST_NODE (& hctx -> cpuhp_dead );
3953
+ INIT_HLIST_NODE (& hctx -> cpuhp_online );
3879
3954
hctx -> queue = q ;
3880
3955
hctx -> flags = set -> flags & ~BLK_MQ_F_TAG_QUEUE_SHARED ;
3881
3956
@@ -4414,6 +4489,12 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4414
4489
xa_for_each_start (& q -> hctx_table , j , hctx , j )
4415
4490
blk_mq_exit_hctx (q , set , hctx , j );
4416
4491
mutex_unlock (& q -> sysfs_lock );
4492
+
4493
+ /* unregister cpuhp callbacks for exited hctxs */
4494
+ blk_mq_remove_hw_queues_cpuhp (q );
4495
+
4496
+ /* register cpuhp for new initialized hctxs */
4497
+ blk_mq_add_hw_queues_cpuhp (q );
4417
4498
}
4418
4499
4419
4500
int blk_mq_init_allocated_queue (struct blk_mq_tag_set * set ,
0 commit comments