Browse Source

[BLOCK] Get rid of request_queue_t typedef

Some of the code has been gradually transitioned to using the proper
struct request_queue, but there's lots left. So do a full sweet of
the kernel and get rid of this typedef and replace its uses with
the proper type.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
master
Jens Axboe 15 years ago
parent
commit
165125e1e4
  1. 6
      Documentation/block/barrier.txt
  2. 10
      Documentation/block/biodoc.txt
  3. 2
      Documentation/block/request.txt
  4. 2
      Documentation/iostats.txt
  5. 8
      arch/arm/plat-omap/mailbox.c
  6. 4
      arch/um/drivers/ubd_kern.c
  7. 26
      block/as-iosched.c
  8. 10
      block/blktrace.c
  9. 12
      block/bsg.c
  10. 39
      block/cfq-iosched.c
  11. 18
      block/deadline-iosched.c
  12. 75
      block/elevator.c
  13. 215
      block/ll_rw_blk.c
  14. 14
      block/noop-iosched.c
  15. 24
      block/scsi_ioctl.c
  16. 4
      drivers/acorn/block/fd1772.c
  17. 2
      drivers/acorn/block/mfmhd.c
  18. 2
      drivers/ata/libata-scsi.c
  19. 2
      drivers/block/amiflop.c
  20. 2
      drivers/block/aoe/aoe.h
  21. 2
      drivers/block/aoe/aoeblk.c
  22. 2
      drivers/block/ataflop.c
  23. 10
      drivers/block/cciss.c
  24. 6
      drivers/block/cpqarray.c
  25. 4
      drivers/block/floppy.c
  26. 2
      drivers/block/lguest_blk.c
  27. 4
      drivers/block/loop.c
  28. 4
      drivers/block/nbd.c
  29. 4
      drivers/block/paride/pcd.c
  30. 2
      drivers/block/paride/pd.c
  31. 4
      drivers/block/paride/pf.c
  32. 12
      drivers/block/pktcdvd.c
  33. 4
      drivers/block/ps2esdi.c
  34. 8
      drivers/block/ps3disk.c
  35. 2
      drivers/block/rd.c
  36. 2
      drivers/block/sunvdc.c
  37. 4
      drivers/block/swim3.c
  38. 20
      drivers/block/sx8.c
  39. 6
      drivers/block/ub.c
  40. 6
      drivers/block/umem.c
  41. 2
      drivers/block/viodasd.c
  42. 2
      drivers/block/xd.c
  43. 2
      drivers/block/xd.h
  44. 4
      drivers/block/xen-blkfront.c
  45. 4
      drivers/block/xsysace.c
  46. 2
      drivers/block/z2ram.c
  47. 2
      drivers/cdrom/cdrom.c
  48. 2
      drivers/cdrom/viocd.c
  49. 4
      drivers/ide/ide-cd.c
  50. 4
      drivers/ide/ide-disk.c
  51. 2
      drivers/ide/ide-io.c
  52. 2
      drivers/ide/ide-probe.c
  53. 2
      drivers/ide/legacy/hd.c
  54. 8
      drivers/md/dm-table.c
  55. 10
      drivers/md/dm.c
  56. 2
      drivers/md/faulty.c
  57. 14
      drivers/md/linear.c
  58. 2
      drivers/md/md.c
  59. 12
      drivers/md/multipath.c
  60. 14
      drivers/md/raid0.c
  61. 12
      drivers/md/raid1.c
  62. 14
      drivers/md/raid10.c
  63. 18
      drivers/md/raid5.c
  64. 4
      drivers/message/i2o/i2o_block.c
  65. 8
      drivers/mmc/card/queue.c
  66. 4
      drivers/s390/block/dasd.c
  67. 2
      drivers/s390/block/dasd_int.h
  68. 2
      drivers/s390/block/dcssblk.c
  69. 2
      drivers/s390/block/xpram.c
  70. 2
      drivers/s390/char/tape.h
  71. 4
      drivers/s390/char/tape_block.c
  72. 2
      drivers/sbus/char/jsflash.c
  73. 12
      drivers/scsi/scsi_lib.c
  74. 4
      drivers/scsi/sd.c
  75. 2
      drivers/scsi/sr.c
  76. 30
      fs/bio.c
  77. 2
      include/asm-arm/arch-omap/mailbox.h
  78. 140
      include/linux/blkdev.h
  79. 2
      include/linux/blktrace_api.h
  80. 76
      include/linux/elevator.h
  81. 4
      include/linux/ide.h
  82. 2
      include/linux/loop.h
  83. 4
      include/linux/raid/md_k.h
  84. 2
      include/scsi/sd.h
  85. 4
      mm/bounce.c

6
Documentation/block/barrier.txt

@ -79,9 +79,9 @@ and how to prepare flush requests. Note that the term 'ordered' is
used to indicate the whole sequence of performing barrier requests
including draining and flushing.
typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq);
typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq);
int blk_queue_ordered(request_queue_t *q, unsigned ordered,
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn);
@q : the queue in question
@ -92,7 +92,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
For example, SCSI disk driver's prepare_flush_fn looks like the
following.
static void sd_prepare_flush(request_queue_t *q, struct request *rq)
static void sd_prepare_flush(struct request_queue *q, struct request *rq)
{
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd_type = REQ_TYPE_BLOCK_PC;

10
Documentation/block/biodoc.txt

@ -740,12 +740,12 @@ Block now offers some simple generic functionality to help support command
queueing (typically known as tagged command queueing), ie manage more than
one outstanding command on a queue at any given time.
blk_queue_init_tags(request_queue_t *q, int depth)
blk_queue_init_tags(struct request_queue *q, int depth)
Initialize internal command tagging structures for a maximum
depth of 'depth'.
blk_queue_free_tags((request_queue_t *q)
blk_queue_free_tags((struct request_queue *q)
Teardown tag info associated with the queue. This will be done
automatically by block if blk_queue_cleanup() is called on a queue
@ -754,7 +754,7 @@ one outstanding command on a queue at any given time.
The above are initialization and exit management, the main helpers during
normal operations are:
blk_queue_start_tag(request_queue_t *q, struct request *rq)
blk_queue_start_tag(struct request_queue *q, struct request *rq)
Start tagged operation for this request. A free tag number between
0 and 'depth' is assigned to the request (rq->tag holds this number),
@ -762,7 +762,7 @@ normal operations are:
for this queue is already achieved (or if the tag wasn't started for
some other reason), 1 is returned. Otherwise 0 is returned.
blk_queue_end_tag(request_queue_t *q, struct request *rq)
blk_queue_end_tag(struct request_queue *q, struct request *rq)
End tagged operation on this request. 'rq' is removed from the internal
book keeping structures.
@ -781,7 +781,7 @@ queue. For instance, on IDE any tagged request error needs to clear both
the hardware and software block queue and enable the driver to sanely restart
all the outstanding requests. There's a third helper to do that:
blk_queue_invalidate_tags(request_queue_t *q)
blk_queue_invalidate_tags(struct request_queue *q)
Clear the internal block tag queue and re-add all the pending requests
to the request queue. The driver will receive them again on the

2
Documentation/block/request.txt

@ -83,6 +83,6 @@ struct bio *bio DBI First bio in request
struct bio *biotail DBI Last bio in request
request_queue_t *q DB Request queue this request belongs to
struct request_queue *q DB Request queue this request belongs to
struct request_list *rl B Request list this request came from

2
Documentation/iostats.txt

@ -79,7 +79,7 @@ Field 8 -- # of milliseconds spent writing
measured from __make_request() to end_that_request_last()).
Field 9 -- # of I/Os currently in progress
The only field that should go to zero. Incremented as requests are
given to appropriate request_queue_t and decremented as they finish.
given to appropriate struct request_queue and decremented as they finish.
Field 10 -- # of milliseconds spent doing I/Os
This field is increases so long as field 9 is nonzero.
Field 11 -- weighted # of milliseconds spent doing I/Os

8
arch/arm/plat-omap/mailbox.c

@ -161,11 +161,11 @@ static void mbox_rx_work(struct work_struct *work)
/*
* Mailbox interrupt handler
*/
static void mbox_txq_fn(request_queue_t * q)
static void mbox_txq_fn(struct request_queue * q)
{
}
static void mbox_rxq_fn(request_queue_t * q)
static void mbox_rxq_fn(struct request_queue * q)
{
}
@ -180,7 +180,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
struct request *rq;
mbox_msg_t msg;
request_queue_t *q = mbox->rxq->queue;
struct request_queue *q = mbox->rxq->queue;
disable_mbox_irq(mbox, IRQ_RX);
@ -297,7 +297,7 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
request_fn_proc * proc,
void (*work) (struct work_struct *))
{
request_queue_t *q;
struct request_queue *q;
struct omap_mbox_queue *mq;
mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);

4
arch/um/drivers/ubd_kern.c

@ -469,7 +469,7 @@ __uml_help(fakehd,
" Change the ubd device name to \"hd\".\n\n"
);
static void do_ubd_request(request_queue_t * q);
static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
int thread_fd = -1;
@ -1081,7 +1081,7 @@ static void prepare_request(struct request *req, struct io_thread_req *io_req,
}
/* Called with dev->lock held */
static void do_ubd_request(request_queue_t *q)
static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;

26
block/as-iosched.c

@ -796,7 +796,7 @@ static void update_write_batch(struct as_data *ad)
* as_completed_request is to be called when a request has completed and
* returned something to the requesting process, be it an error or data.
*/
static void as_completed_request(request_queue_t *q, struct request *rq)
static void as_completed_request(struct request_queue *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
@ -853,7 +853,8 @@ out:
* reference unless it replaces the request at somepart of the elevator
* (ie. the dispatch queue)
*/
static void as_remove_queued_request(request_queue_t *q, struct request *rq)
static void as_remove_queued_request(struct request_queue *q,
struct request *rq)
{
const int data_dir = rq_is_sync(rq);
struct as_data *ad = q->elevator->elevator_data;
@ -978,7 +979,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
* read/write expire, batch expire, etc, and moves it to the dispatch
* queue. Returns 1 if a request was found, 0 otherwise.
*/
static int as_dispatch_request(request_queue_t *q, int force)
static int as_dispatch_request(struct request_queue *q, int force)
{
struct as_data *ad = q->elevator->elevator_data;
const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
@ -1139,7 +1140,7 @@ fifo_expired:
/*
* add rq to rbtree and fifo
*/
static void as_add_request(request_queue_t *q, struct request *rq)
static void as_add_request(struct request_queue *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
int data_dir;
@ -1167,7 +1168,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
RQ_SET_STATE(rq, AS_RQ_QUEUED);
}
static void as_activate_request(request_queue_t *q, struct request *rq)
static void as_activate_request(struct request_queue *q, struct request *rq)
{
WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
RQ_SET_STATE(rq, AS_RQ_REMOVED);
@ -1175,7 +1176,7 @@ static void as_activate_request(request_queue_t *q, struct request *rq)
atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
}
static void as_deactivate_request(request_queue_t *q, struct request *rq)
static void as_deactivate_request(struct request_queue *q, struct request *rq)
{
WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
@ -1189,7 +1190,7 @@ static void as_deactivate_request(request_queue_t *q, struct request *rq)
* is not empty - it is used in the block layer to check for plugging and
* merging opportunities
*/
static int as_queue_empty(request_queue_t *q)
static int as_queue_empty(struct request_queue *q)
{
struct as_data *ad = q->elevator->elevator_data;
@ -1198,7 +1199,7 @@ static int as_queue_empty(request_queue_t *q)
}
static int
as_merge(request_queue_t *q, struct request **req, struct bio *bio)
as_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct as_data *ad = q->elevator->elevator_data;
sector_t rb_key = bio->bi_sector + bio_sectors(bio);
@ -1216,7 +1217,8 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
static void as_merged_request(request_queue_t *q, struct request *req, int type)
static void as_merged_request(struct request_queue *q, struct request *req,
int type)
{
struct as_data *ad = q->elevator->elevator_data;
@ -1234,7 +1236,7 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type)
}
}
static void as_merged_requests(request_queue_t *q, struct request *req,
static void as_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
@ -1285,7 +1287,7 @@ static void as_work_handler(struct work_struct *work)
spin_unlock_irqrestore(q->queue_lock, flags);
}
static int as_may_queue(request_queue_t *q, int rw)
static int as_may_queue(struct request_queue *q, int rw)
{
int ret = ELV_MQUEUE_MAY;
struct as_data *ad = q->elevator->elevator_data;
@ -1318,7 +1320,7 @@ static void as_exit_queue(elevator_t *e)
/*
* initialize elevator private data (as_data).
*/
static void *as_init_queue(request_queue_t *q)
static void *as_init_queue(struct request_queue *q)
{
struct as_data *ad;

10
block/blktrace.c

@ -231,7 +231,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
kfree(bt);
}
static int blk_trace_remove(request_queue_t *q)
static int blk_trace_remove(struct request_queue *q)
{
struct blk_trace *bt;
@ -312,7 +312,7 @@ static struct rchan_callbacks blk_relay_callbacks = {
/*
* Setup everything required to start tracing
*/
static int blk_trace_setup(request_queue_t *q, struct block_device *bdev,
static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
{
struct blk_user_trace_setup buts;
@ -401,7 +401,7 @@ err:
return ret;
}
static int blk_trace_startstop(request_queue_t *q, int start)
static int blk_trace_startstop(struct request_queue *q, int start)
{
struct blk_trace *bt;
int ret;
@ -444,7 +444,7 @@ static int blk_trace_startstop(request_queue_t *q, int start)
**/
int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{
request_queue_t *q;
struct request_queue *q;
int ret, start = 0;
q = bdev_get_queue(bdev);
@ -479,7 +479,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
* @q: the request queue associated with the device
*
**/
void blk_trace_shutdown(request_queue_t *q)
void blk_trace_shutdown(struct request_queue *q)
{
if (q->blk_trace) {
blk_trace_startstop(q, 0);

12
block/bsg.c

@ -37,7 +37,7 @@
#define BSG_VERSION "0.4"
struct bsg_device {
request_queue_t *queue;
struct request_queue *queue;
spinlock_t lock;
struct list_head busy_list;
struct list_head done_list;
@ -180,7 +180,7 @@ unlock:
return ret;
}
static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, int has_write_perm)
{
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@ -214,7 +214,7 @@ static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
* Check if sg_io_v4 from user is allowed and valid
*/
static int
bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
{
int ret = 0;
@ -250,7 +250,7 @@ bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
static struct request *
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
request_queue_t *q = bd->queue;
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw;
unsigned int dxfer_len;
@ -345,7 +345,7 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
* do final setup of a 'bc' and submit the matching 'rq' to the block
* layer for io
*/
static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
rq->sense = bc->sense;
@ -611,7 +611,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
bc = NULL;
ret = 0;
while (nr_commands) {
request_queue_t *q = bd->queue;
struct request_queue *q = bd->queue;
bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {

39
block/cfq-iosched.c

@ -71,7 +71,7 @@ struct cfq_rb_root {
* Per block device queue structure
*/
struct cfq_data {
request_queue_t *queue;
struct request_queue *queue;
/*
* rr list of queues with requests and the count of them
@ -197,7 +197,7 @@ CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
#undef CFQ_CFQQ_FNS
static void cfq_dispatch_insert(request_queue_t *, struct request *);
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
struct task_struct *, gfp_t);
static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
@ -237,7 +237,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
kblockd_schedule_work(&cfqd->unplug_work);
}
static int cfq_queue_empty(request_queue_t *q)
static int cfq_queue_empty(struct request_queue *q)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@ -623,7 +623,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
return NULL;
}
static void cfq_activate_request(request_queue_t *q, struct request *rq)
static void cfq_activate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@ -641,7 +641,7 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
}
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@ -665,7 +665,8 @@ static void cfq_remove_request(struct request *rq)
}
}
static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
static int cfq_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq;
@ -679,7 +680,7 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
static void cfq_merged_request(request_queue_t *q, struct request *req,
static void cfq_merged_request(struct request_queue *q, struct request *req,
int type)
{
if (type == ELEVATOR_FRONT_MERGE) {
@ -690,7 +691,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req,
}
static void
cfq_merged_requests(request_queue_t *q, struct request *rq,
cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
/*
@ -703,7 +704,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
cfq_remove_request(next);
}
static int cfq_allow_merge(request_queue_t *q, struct request *rq,
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@ -913,7 +914,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
/*
* Move request from internal lists to the request queue dispatch list.
*/
static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@ -1093,7 +1094,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
return dispatched;
}
static int cfq_dispatch_requests(request_queue_t *q, int force)
static int cfq_dispatch_requests(struct request_queue *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
@ -1214,7 +1215,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
struct cfq_data *cfqd = cic->key;
if (cfqd) {
request_queue_t *q = cfqd->queue;
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
__cfq_exit_single_io_context(cfqd, cic);
@ -1775,7 +1776,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
}
static void cfq_insert_request(request_queue_t *q, struct request *rq)
static void cfq_insert_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@ -1789,7 +1790,7 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq)
cfq_rq_enqueued(cfqd, cfqq, rq);
}
static void cfq_completed_request(request_queue_t *q, struct request *rq)
static void cfq_completed_request(struct request_queue *q, struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
struct cfq_data *cfqd = cfqq->cfqd;
@ -1868,7 +1869,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
return ELV_MQUEUE_MAY;
}
static int cfq_may_queue(request_queue_t *q, int rw)
static int cfq_may_queue(struct request_queue *q, int rw)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@ -1922,7 +1923,7 @@ static void cfq_put_request(struct request *rq)
* Allocate cfq data structures associated with this request.
*/
static int
cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@ -1974,7 +1975,7 @@ static void cfq_kick_queue(struct work_struct *work)
{
struct cfq_data *cfqd =
container_of(work, struct cfq_data, unplug_work);
request_queue_t *q = cfqd->queue;
struct request_queue *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@ -2072,7 +2073,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
static void cfq_exit_queue(elevator_t *e)
{
struct cfq_data *cfqd = e->elevator_data;
request_queue_t *q = cfqd->queue;
struct request_queue *q = cfqd->queue;
cfq_shutdown_timer_wq(cfqd);
@ -2098,7 +2099,7 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
static void *cfq_init_queue(request_queue_t *q)
static void *cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;

18
block/deadline-iosched.c

@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* remove rq from rbtree and fifo.
*/
static void deadline_remove_request(request_queue_t *q, struct request *rq)
static void deadline_remove_request(struct request_queue *q, struct request *rq)
{
struct deadline_data *dd = q->elevator->elevator_data;
@ -115,7 +115,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
}
static int
deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct request *__rq;
@ -144,8 +144,8 @@ out:
return ret;
}
static void deadline_merged_request(request_queue_t *q, struct request *req,
int type)
static void deadline_merged_request(struct request_queue *q,
struct request *req, int type)
{
struct deadline_data *dd = q->elevator->elevator_data;
@ -159,7 +159,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req,
}
static void
deadline_merged_requests(request_queue_t *q, struct request *req,
deadline_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
@ -185,7 +185,7 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
static inline void
deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
request_queue_t *q = rq->q;
struct request_queue *q = rq->q;
deadline_remove_request(q, rq);
elv_dispatch_add_tail(q, rq);
@ -236,7 +236,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
static int deadline_dispatch_requests(request_queue_t *q, int force)
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
struct deadline_data *dd = q->elevator->elevator_data;
const int reads = !list_empty(&dd->fifo_list[READ]);
@ -335,7 +335,7 @@ dispatch_request:
return 1;
}
static int deadline_queue_empty(request_queue_t *q)
static int deadline_queue_empty(struct request_queue *q)
{
struct deadline_data *dd = q->elevator->elevator_data;
@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
/*
* initialize elevator private data (deadline_data).
*/
static void *deadline_init_queue(request_queue_t *q)
static void *deadline_init_queue(struct request_queue *q)
{
struct deadline_data *dd;

75
block/elevator.c

@ -56,7 +56,7 @@ static const int elv_hash_shift = 6;
*/
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
request_queue_t *q = rq->q;
struct request_queue *q = rq->q;
elevator_t *e = q->elevator;
if (e->ops->elevator_allow_merge_fn)
@ -141,12 +141,13 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
static void *elevator_init_queue(struct request_queue *q,
struct elevator_queue *eq)
{
return eq->ops->elevator_init_fn(q);
}
static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
void *data)
{
q->elevator = eq;
@ -172,7 +173,8 @@ __setup("elevator=", elevator_setup);
static struct kobj_type elv_ktype;
static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
static elevator_t *elevator_alloc(struct request_queue *q,
struct elevator_type *e)
{
elevator_t *eq;
int i;
@ -212,7 +214,7 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
int elevator_init(request_queue_t *q, char *name)
int elevator_init(struct request_queue *q, char *name)
{
struct elevator_type *e = NULL;
struct elevator_queue *eq;
@ -264,7 +266,7 @@ void elevator_exit(elevator_t *e)
EXPORT_SYMBOL(elevator_exit);
static void elv_activate_rq(request_queue_t *q, struct request *rq)
static void elv_activate_rq(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@ -272,7 +274,7 @@ static void elv_activate_rq(request_queue_t *q, struct request *rq)
e->ops->elevator_activate_req_fn(q, rq);
}
static void elv_deactivate_rq(request_queue_t *q, struct request *rq)
static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@ -285,13 +287,13 @@ static inline void __elv_rqhash_del(struct request *rq)
hlist_del_init(&rq->hash);
}
static void elv_rqhash_del(request_queue_t *q, struct request *rq)
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
{
if (ELV_ON_HASH(rq))
__elv_rqhash_del(rq);
}
static void elv_rqhash_add(request_queue_t *q, struct request *rq)
static void elv_rqhash_add(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@ -299,13 +301,13 @@ static void elv_rqhash_add(request_queue_t *q, struct request *rq)
hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
}
static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
{
__elv_rqhash_del(rq);
elv_rqhash_add(q, rq);
}
static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
elevator_t *e = q->elevator;
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
@ -391,7 +393,7 @@ EXPORT_SYMBOL(elv_rb_find);
* entry. rq is sort insted into the dispatch queue. To be used by
* specific elevators.
*/
void elv_dispatch_sort(request_queue_t *q, struct request *rq)
void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{
sector_t boundary;
struct list_head *entry;
@ -449,7 +451,7 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
EXPORT_SYMBOL(elv_dispatch_add_tail);
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
struct request *__rq;
@ -481,7 +483,7 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
void elv_merged_request(request_queue_t *q, struct request *rq, int type)
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
{
elevator_t *e = q->elevator;
@ -494,7 +496,7 @@ void elv_merged_request(request_queue_t *q, struct request *rq, int type)
q->last_merge = rq;
}
void elv_merge_requests(request_queue_t *q, struct request *rq,
void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
elevator_t *e = q->elevator;
@ -509,7 +511,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
q->last_merge = rq;
}
void elv_requeue_request(request_queue_t *q, struct request *rq)
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
* it already went through dequeue, we need to decrement the
@ -526,7 +528,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
}
static void elv_drain_elevator(request_queue_t *q)
static void elv_drain_elevator(struct request_queue *q)
{
static int printed;
while (q->elevator->ops->elevator_dispatch_fn(q, 1))
@ -540,7 +542,7 @@ static void elv_drain_elevator(request_queue_t *q)
}
}
void elv_insert(request_queue_t *q, struct request *rq, int where)
void elv_insert(struct request_queue *q, struct request *rq, int where)
{
struct list_head *pos;
unsigned ordseq;
@ -638,7 +640,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
}
}
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
void __elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
if (q->ordcolor)
@ -676,7 +678,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
EXPORT_SYMBOL(__elv_add_request);
void elv_add_request(request_queue_t *q, struct request *rq, int where,
void elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
unsigned long flags;
@ -688,7 +690,7 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
EXPORT_SYMBOL(elv_add_request);
static inline struct request *__elv_next_request(request_queue_t *q)
static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
@ -704,7 +706,7 @@ static inline struct request *__elv_next_request(request_queue_t *q)
}
}
struct request *elv_next_request(request_queue_t *q)
struct request *elv_next_request(struct request_queue *q)
{
struct request *rq;
int ret;
@ -770,7 +772,7 @@ struct request *elv_next_request(request_queue_t *q)
EXPORT_SYMBOL(elv_next_request);
void elv_dequeue_request(request_queue_t *q, struct request *rq)
void elv_dequeue_request(struct request_queue *q, struct request *rq)
{
BUG_ON(list_empty(&rq->queuelist));
BUG_ON(ELV_ON_HASH(rq));
@ -788,7 +790,7 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(elv_dequeue_request);
int elv_queue_empty(request_queue_t *q)
int elv_queue_empty(struct request_queue *q)
{
elevator_t *e = q->elevator;
@ -803,7 +805,7 @@ int elv_queue_empty(request_queue_t *q)
EXPORT_SYMBOL(elv_queue_empty);
struct request *elv_latter_request(request_queue_t *q, struct request *rq)
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@ -812,7 +814,7 @@ struct request *elv_latter_request(request_queue_t *q, struct request *rq)
return NULL;
}
struct request *elv_former_request(request_queue_t *q, struct request *rq)
struct request *elv_former_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@ -821,7 +823,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
return NULL;
}
int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
elevator_t *e = q->elevator;
@ -832,7 +834,7 @@ int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
return 0;
}
void elv_put_request(request_queue_t *q, struct request *rq)
void elv_put_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@ -840,7 +842,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
e->ops->elevator_put_req_fn(rq);
}
int elv_may_queue(request_queue_t *q, int rw)
int elv_may_queue(struct request_queue *q, int rw)
{
elevator_t *e = q->elevator;
@ -850,7 +852,7 @@ int elv_may_queue(request_queue_t *q, int rw)
return ELV_MQUEUE_MAY;
}
void elv_completed_request(request_queue_t *q, struct request *rq)
void elv_completed_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@ -1006,7 +1008,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
* need for the new one. this way we have a chance of going back to the old
* one, if the new one fails init for some reason.
*/
static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
elevator_t *old_elevator, *e;
void *data;
@ -1078,7 +1080,8 @@ fail_register:
return 0;
}
ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
ssize_t elv_iosched_store(struct request_queue *q, const char *name,
size_t count)
{
char elevator_name[ELV_NAME_MAX];
size_t len;
@ -1107,7 +1110,7 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
return count;
}
ssize_t elv_iosched_show(request_queue_t *q, char *name)
ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
elevator_t *e = q->elevator;
struct elevator_type *elv = e->elevator_type;
@ -1127,7 +1130,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
return len;
}
struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
struct request *elv_rb_former_request(struct request_queue *q,
struct request *rq)
{
struct rb_node *rbprev = rb_prev(&rq->rb_node);
@ -1139,7 +1143,8 @@ struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(elv_rb_former_request);
struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
struct request *elv_rb_latter_request(struct request_queue *q,
struct request *rq)
{
struct rb_node *rbnext = rb_next(&rq->rb_node);

215
block/ll_rw_blk.c

@ -40,7 +40,7 @@ static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(request_queue_t *q, struct bio *bio);
static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
@ -121,7 +121,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
struct backing_dev_info *ret = NULL;
request_queue_t *q = bdev_get_queue(bdev);
struct request_queue *q = bdev_get_queue(bdev);
if (q)
ret = &q->backing_dev_info;
@ -140,7 +140,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info);
* cdb from the request data for instance.
*
*/
void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
q->prep_rq_fn = pfn;
}
@ -163,14 +163,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq);
* no merge_bvec_fn is defined for a queue, and only the fixed limits are
* honored.
*/
void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
q->merge_bvec_fn = mbfn;
}
EXPORT_SYMBOL(blk_queue_merge_bvec);
void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
q->softirq_done_fn = fn;
}
@ -199,7 +199,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
{
/*
* set defaults
@ -235,7 +235,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
EXPORT_SYMBOL(blk_queue_make_request);
static void rq_init(request_queue_t *q, struct request *rq)
static void rq_init(struct request_queue *q, struct request *rq)
{
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
@ -272,7 +272,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
* feature should call this function and indicate so.
*
**/
int blk_queue_ordered(request_queue_t *q, unsigned ordered,
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn)
{
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
@ -311,7 +311,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
* to the block layer by defining it through this call.
*
**/
void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
{
q->issue_flush_fn = iff;
}
@ -321,7 +321,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
/*
* Cache flushing for ordered writes handling
*/
inline unsigned blk_ordered_cur_seq(request_queue_t *q)
inline unsigned blk_ordered_cur_seq(struct request_queue *q)
{
if (!q->ordseq)
return 0;
@ -330,7 +330,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q)
unsigned blk_ordered_req_seq(struct request *rq)
{
request_queue_t *q = rq->q;
struct request_queue *q = rq->q;
BUG_ON(q->ordseq == 0);
@ -357,7 +357,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
return QUEUE_ORDSEQ_DONE;
}
void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
{
struct request *rq;
int uptodate;
@ -401,7 +401,7 @@ static void post_flush_end_io(struct request *rq, int error)
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
}
static void queue_flush(request_queue_t *q, unsigned which)
static void queue_flush(struct request_queue *q, unsigned which)
{
struct request *rq;
rq_end_io_fn *end_io;
@ -425,7 +425,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
static inline struct request *start_ordered(request_queue_t *q,
static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{
q->bi_size = 0;
@ -476,7 +476,7 @@ static inline struct request *start_ordered(request_queue_t *q,
return rq;
}
int blk_do_ordered(request_queue_t *q, struct request **rqp)
int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
@ -527,7 +527,7 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
request_queue_t *q = bio->bi_private;
struct request_queue *q = bio->bi_private;
/*
* This is dry run, restore bio_sector and size. We'll finish
@ -551,7 +551,7 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
static int ordered_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
request_queue_t *q = rq->q;
struct request_queue *q = rq->q;
bio_end_io_t *endio;
void *private;
@ -588,7 +588,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio,
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
* buffers for doing I/O to pages residing above @page.
**/
void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
int dma = 0;
@ -624,7 +624,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Enables a low level driver to set an upper limit on the size of
* received requests.
**/
void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);