@@ -122,7 +122,10 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
122
122
123
123
void blk_freeze_queue_start (struct request_queue * q )
124
124
{
125
+ int sub_class ;
126
+
125
127
mutex_lock (& q -> mq_freeze_lock );
128
+ sub_class = q -> mq_freeze_depth ;
126
129
if (++ q -> mq_freeze_depth == 1 ) {
127
130
percpu_ref_kill (& q -> q_usage_counter );
128
131
mutex_unlock (& q -> mq_freeze_lock );
@@ -131,6 +134,12 @@ void blk_freeze_queue_start(struct request_queue *q)
131
134
} else {
132
135
mutex_unlock (& q -> mq_freeze_lock );
133
136
}
137
+ /*
138
+ * model as down_write_trylock() so that two concurrent freeze queue
139
+ * can be allowed
140
+ */
141
+ if (blk_queue_freeze_lockdep (q ))
142
+ rwsem_acquire (& q -> q_usage_counter_map , sub_class , 1 , _RET_IP_ );
134
143
}
135
144
EXPORT_SYMBOL_GPL (blk_freeze_queue_start );
136
145
@@ -188,6 +197,9 @@ void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
188
197
wake_up_all (& q -> mq_freeze_wq );
189
198
}
190
199
mutex_unlock (& q -> mq_freeze_lock );
200
+
201
+ if (blk_queue_freeze_lockdep (q ))
202
+ rwsem_release (& q -> q_usage_counter_map , _RET_IP_ );
191
203
}
192
204
193
205
void blk_mq_unfreeze_queue (struct request_queue * q )
@@ -4241,6 +4253,9 @@ void blk_mq_destroy_queue(struct request_queue *q)
4241
4253
blk_queue_start_drain (q );
4242
4254
blk_mq_freeze_queue_wait (q );
4243
4255
4256
+ /* counter pair of acquire in blk_queue_start_drain */
4257
+ if (blk_queue_freeze_lockdep (q ))
4258
+ rwsem_release (& q -> q_usage_counter_map , _RET_IP_ );
4244
4259
blk_sync_queue (q );
4245
4260
blk_mq_cancel_work_sync (q );
4246
4261
blk_mq_exit_queue (q );
0 commit comments