@ -349,7 +349,7 @@ void blk_put_queue(struct request_queue *q)
@@ -349,7 +349,7 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL ( blk_put_queue ) ;
/**
* blk_drain_queue - drain requests from request_queue
* __ blk_drain_queue - drain requests from request_queue
* @ q : queue to drain
* @ drain_all : whether to drain all requests or only the ones w / ELVPRIV
*
@ -357,15 +357,17 @@ EXPORT_SYMBOL(blk_put_queue);
@@ -357,15 +357,17 @@ EXPORT_SYMBOL(blk_put_queue);
* If not , only ELVPRIV requests are drained . The caller is responsible
* for ensuring that no new requests which need to be drained are queued .
*/
void blk_drain_queue ( struct request_queue * q , bool drain_all )
static void __blk_drain_queue ( struct request_queue * q , bool drain_all )
__releases ( q - > queue_lock )
__acquires ( q - > queue_lock )
{
int i ;
lockdep_assert_held ( q - > queue_lock ) ;
while ( true ) {
bool drain = false ;
spin_lock_irq ( q - > queue_lock ) ;
/*
* The caller might be trying to drain @ q before its
* elevator is initialized .
@ -401,11 +403,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
@@ -401,11 +403,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
}
spin_unlock_irq ( q - > queue_lock ) ;
if ( ! drain )
break ;
spin_unlock_irq ( q - > queue_lock ) ;
msleep ( 10 ) ;
spin_lock_irq ( q - > queue_lock ) ;
}
/*
@ -416,13 +421,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
@@ -416,13 +421,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if ( q - > request_fn ) {
struct request_list * rl ;
spin_lock_irq ( q - > queue_lock ) ;
blk_queue_for_each_rl ( rl , q )
for ( i = 0 ; i < ARRAY_SIZE ( rl - > wait ) ; i + + )
wake_up_all ( & rl - > wait [ i ] ) ;
spin_unlock_irq ( q - > queue_lock ) ;
}
}
@ -446,7 +447,10 @@ void blk_queue_bypass_start(struct request_queue *q)
@@ -446,7 +447,10 @@ void blk_queue_bypass_start(struct request_queue *q)
spin_unlock_irq ( q - > queue_lock ) ;
if ( drain ) {
blk_drain_queue ( q , false ) ;
spin_lock_irq ( q - > queue_lock ) ;
__blk_drain_queue ( q , false ) ;
spin_unlock_irq ( q - > queue_lock ) ;
/* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu ( ) ;
}
@ -504,7 +508,9 @@ void blk_cleanup_queue(struct request_queue *q)
@@ -504,7 +508,9 @@ void blk_cleanup_queue(struct request_queue *q)
mutex_unlock ( & q - > sysfs_lock ) ;
/* drain all requests queued before DYING marking */
blk_drain_queue ( q , true ) ;
spin_lock_irq ( lock ) ;
__blk_drain_queue ( q , true ) ;
spin_unlock_irq ( lock ) ;
/* @q won't process any more request, flush async actions */
del_timer_sync ( & q - > backing_dev_info . laptop_mode_wb_timer ) ;