original development tree for Linux kernel GTP module; now long in mainline.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

476 lines
11 KiB

block: convert to pos and nr_sectors accessors With recent cleanups, there is no place where low level driver directly manipulates request fields. This means that the 'hard' request fields always equal the !hard fields. Convert all rq->sectors, nr_sectors and current_nr_sectors references to accessors. While at it, drop superflous blk_rq_pos() < 0 test in swim.c. [ Impact: use pos and nr_sectors accessors ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Tested-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Grant Likely <grant.likely@secretlab.ca> Tested-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Mike Miller <mike.miller@hp.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Eric Moore <Eric.Moore@lsi.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Dario Ballabio <ballabio_dario@emc.com> Cc: David S. Miller <davem@davemloft.net> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: unsik Kim <donari75@gmail.com> Cc: Laurent Vivier <Laurent@lvivier.info> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
13 years ago
  1. /*
  2. * Deadline i/o scheduler.
  3. *
  4. * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/fs.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/elevator.h>
  10. #include <linux/bio.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/compiler.h>
  15. #include <linux/rbtree.h>
  16. /*
  17. * See Documentation/block/deadline-iosched.txt
  18. */
  19. static const int read_expire = HZ / 2; /* max time before a read is submitted. */
  20. static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
  21. static const int writes_starved = 2; /* max times reads can starve a write */
  22. static const int fifo_batch = 16; /* # of sequential requests treated as one
  23. by the above parameters. For throughput. */
  24. struct deadline_data {
  25. /*
  26. * run time data
  27. */
  28. /*
  29. * requests (deadline_rq s) are present on both sort_list and fifo_list
  30. */
  31. struct rb_root sort_list[2];
  32. struct list_head fifo_list[2];
  33. /*
  34. * next in sort order. read, write or both are NULL
  35. */
  36. struct request *next_rq[2];
  37. unsigned int batching; /* number of sequential requests made */
  38. sector_t last_sector; /* head position */
  39. unsigned int starved; /* times reads have starved writes */
  40. /*
  41. * settings that change how the i/o scheduler behaves
  42. */
  43. int fifo_expire[2];
  44. int fifo_batch;
  45. int writes_starved;
  46. int front_merges;
  47. };
  48. static void deadline_move_request(struct deadline_data *, struct request *);
  49. static inline struct rb_root *
  50. deadline_rb_root(struct deadline_data *dd, struct request *rq)
  51. {
  52. return &dd->sort_list[rq_data_dir(rq)];
  53. }
  54. /*
  55. * get the request after `rq' in sector-sorted order
  56. */
  57. static inline struct request *
  58. deadline_latter_request(struct request *rq)
  59. {
  60. struct rb_node *node = rb_next(&rq->rb_node);
  61. if (node)
  62. return rb_entry_rq(node);
  63. return NULL;
  64. }
  65. static void
  66. deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
  67. {
  68. struct rb_root *root = deadline_rb_root(dd, rq);
  69. elv_rb_add(root, rq);
  70. }
  71. static inline void
  72. deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
  73. {
  74. const int data_dir = rq_data_dir(rq);
  75. if (dd->next_rq[data_dir] == rq)
  76. dd->next_rq[data_dir] = deadline_latter_request(rq);
  77. elv_rb_del(deadline_rb_root(dd, rq), rq);
  78. }
  79. /*
  80. * add rq to rbtree and fifo
  81. */
  82. static void
  83. deadline_add_request(struct request_queue *q, struct request *rq)
  84. {
  85. struct deadline_data *dd = q->elevator->elevator_data;
  86. const int data_dir = rq_data_dir(rq);
  87. deadline_add_rq_rb(dd, rq);
  88. /*
  89. * set expire time and add to fifo list
  90. */
  91. rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
  92. list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
  93. }
  94. /*
  95. * remove rq from rbtree and fifo.
  96. */
  97. static void deadline_remove_request(struct request_queue *q, struct request *rq)
  98. {
  99. struct deadline_data *dd = q->elevator->elevator_data;
  100. rq_fifo_clear(rq);
  101. deadline_del_rq_rb(dd, rq);
  102. }
  103. static int
  104. deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
  105. {
  106. struct deadline_data *dd = q->elevator->elevator_data;
  107. struct request *__rq;
  108. int ret;
  109. /*
  110. * check for front merge
  111. */
  112. if (dd->front_merges) {
  113. sector_t sector = bio_end_sector(bio);
  114. __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
  115. if (__rq) {
  116. BUG_ON(sector != blk_rq_pos(__rq));
  117. if (elv_rq_merge_ok(__rq, bio)) {
  118. ret = ELEVATOR_FRONT_MERGE;
  119. goto out;
  120. }
  121. }
  122. }
  123. return ELEVATOR_NO_MERGE;
  124. out:
  125. *req = __rq;
  126. return ret;
  127. }
  128. static void deadline_merged_request(struct request_queue *q,
  129. struct request *req, int type)
  130. {
  131. struct deadline_data *dd = q->elevator->elevator_data;
  132. /*
  133. * if the merge was a front merge, we need to reposition request
  134. */
  135. if (type == ELEVATOR_FRONT_MERGE) {
  136. elv_rb_del(deadline_rb_root(dd, req), req);
  137. deadline_add_rq_rb(dd, req);
  138. }
  139. }
  140. static void
  141. deadline_merged_requests(struct request_queue *q, struct request *req,
  142. struct request *next)
  143. {
  144. /*
  145. * if next expires before rq, assign its expire time to rq
  146. * and move into next position (next will be deleted) in fifo
  147. */
  148. if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
  149. if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
  150. list_move(&req->queuelist, &next->queuelist);
  151. rq_set_fifo_time(req, rq_fifo_time(next));
  152. }
  153. }
  154. /*
  155. * kill knowledge of next, this one is a goner
  156. */
  157. deadline_remove_request(q, next);
  158. }
  159. /*
  160. * move request from sort list to dispatch queue.
  161. */
  162. static inline void
  163. deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
  164. {
  165. struct request_queue *q = rq->q;
  166. deadline_remove_request(q, rq);
  167. elv_dispatch_add_tail(q, rq);
  168. }
  169. /*
  170. * move an entry to dispatch queue
  171. */
  172. static void
  173. deadline_move_request(struct deadline_data *dd, struct request *rq)
  174. {
  175. const int data_dir = rq_data_dir(rq);
  176. dd->next_rq[READ] = NULL;
  177. dd->next_rq[WRITE] = NULL;
  178. dd->next_rq[data_dir] = deadline_latter_request(rq);
  179. dd->last_sector = rq_end_sector(rq);
  180. /*
  181. * take it off the sort and fifo list, move
  182. * to dispatch queue
  183. */
  184. deadline_move_to_dispatch(dd, rq);
  185. }
  186. /*
  187. * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
  188. * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
  189. */
  190. static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
  191. {
  192. struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
  193. /*
  194. * rq is expired!
  195. */
  196. if (time_after_eq(jiffies, rq_fifo_time(rq)))
  197. return 1;
  198. return 0;
  199. }
  200. /*
  201. * deadline_dispatch_requests selects the best request according to
  202. * read/write expire, fifo_batch, etc
  203. */
  204. static int deadline_dispatch_requests(struct request_queue *q, int force)
  205. {
  206. struct deadline_data *dd = q->elevator->elevator_data;
  207. const int reads = !list_empty(&dd->fifo_list[READ]);
  208. const int writes = !list_empty(&dd->fifo_list[WRITE]);
  209. struct request *rq;
  210. int data_dir;
  211. /*
  212. * batches are currently reads XOR writes
  213. */
  214. if (dd->next_rq[WRITE])
  215. rq = dd->next_rq[WRITE];
  216. else
  217. rq = dd->next_rq[READ];
  218. if (rq && dd->batching < dd->fifo_batch)
  219. /* we have a next request are still entitled to batch */
  220. goto dispatch_request;
  221. /*
  222. * at this point we are not running a batch. select the appropriate
  223. * data direction (read / write)
  224. */
  225. if (reads) {
  226. BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
  227. if (writes && (dd->starved++ >= dd->writes_starved))
  228. goto dispatch_writes;
  229. data_dir = READ;
  230. goto dispatch_find_request;
  231. }
  232. /*
  233. * there are either no reads or writes have been starved
  234. */
  235. if (writes) {
  236. dispatch_writes:
  237. BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
  238. dd->starved = 0;
  239. data_dir = WRITE;
  240. goto dispatch_find_request;
  241. }
  242. return 0;
  243. dispatch_find_request:
  244. /*
  245. * we are not running a batch, find best request for selected data_dir
  246. */
  247. if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
  248. /*
  249. * A deadline has expired, the last request was in the other
  250. * direction, or we have run out of higher-sectored requests.
  251. * Start again from the request with the earliest expiry time.
  252. */
  253. rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
  254. } else {
  255. /*
  256. * The last req was the same dir and we have a next request in
  257. * sort order. No expired requests so continue on from here.
  258. */
  259. rq = dd->next_rq[data_dir];
  260. }
  261. dd->batching = 0;
  262. dispatch_request:
  263. /*
  264. * rq is the selected appropriate request.
  265. */
  266. dd->batching++;
  267. deadline_move_request(dd, rq);
  268. return 1;
  269. }
  270. static void deadline_exit_queue(struct elevator_queue *e)
  271. {
  272. struct deadline_data *dd = e->elevator_data;
  273. BUG_ON(!list_empty(&dd->fifo_list[READ]));
  274. BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
  275. kfree(dd);
  276. }
  277. /*
  278. * initialize elevator private data (deadline_data).
  279. */
  280. static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
  281. {
  282. struct deadline_data *dd;
  283. struct elevator_queue *eq;
  284. eq = elevator_alloc(q, e);
  285. if (!eq)
  286. return -ENOMEM;
  287. dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
  288. if (!dd) {
  289. kobject_put(&eq->kobj);
  290. return -ENOMEM;
  291. }
  292. eq->elevator_data = dd;
  293. INIT_LIST_HEAD(&dd->fifo_list[READ]);
  294. INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
  295. dd->sort_list[READ] = RB_ROOT;
  296. dd->sort_list[WRITE] = RB_ROOT;
  297. dd->fifo_expire[READ] = read_expire;
  298. dd->fifo_expire[WRITE] = write_expire;
  299. dd->writes_starved = writes_starved;
  300. dd->front_merges = 1;
  301. dd->fifo_batch = fifo_batch;
  302. spin_lock_irq(q->queue_lock);
  303. q->elevator = eq;
  304. spin_unlock_irq(q->queue_lock);
  305. return 0;
  306. }
  307. /*
  308. * sysfs parts below
  309. */
  310. static ssize_t
  311. deadline_var_show(int var, char *page)
  312. {
  313. return sprintf(page, "%d\n", var);
  314. }
  315. static ssize_t
  316. deadline_var_store(int *var, const char *page, size_t count)
  317. {
  318. char *p = (char *) page;
  319. *var = simple_strtol(p, &p, 10);
  320. return count;
  321. }
  322. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  323. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  324. { \
  325. struct deadline_data *dd = e->elevator_data; \
  326. int __data = __VAR; \
  327. if (__CONV) \
  328. __data = jiffies_to_msecs(__data); \
  329. return deadline_var_show(__data, (page)); \
  330. }
  331. SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
  332. SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
  333. SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
  334. SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
  335. SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
  336. #undef SHOW_FUNCTION
  337. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  338. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  339. { \
  340. struct deadline_data *dd = e->elevator_data; \
  341. int __data; \
  342. int ret = deadline_var_store(&__data, (page), count); \
  343. if (__data < (MIN)) \
  344. __data = (MIN); \
  345. else if (__data > (MAX)) \
  346. __data = (MAX); \
  347. if (__CONV) \
  348. *(__PTR) = msecs_to_jiffies(__data); \
  349. else \
  350. *(__PTR) = __data; \
  351. return ret; \
  352. }
  353. STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
  354. STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
  355. STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
  356. STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
  357. STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
  358. #undef STORE_FUNCTION
  359. #define DD_ATTR(name) \
  360. __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
  361. deadline_##name##_store)
  362. static struct elv_fs_entry deadline_attrs[] = {
  363. DD_ATTR(read_expire),
  364. DD_ATTR(write_expire),
  365. DD_ATTR(writes_starved),
  366. DD_ATTR(front_merges),
  367. DD_ATTR(fifo_batch),
  368. __ATTR_NULL
  369. };
  370. static struct elevator_type iosched_deadline = {
  371. .ops = {
  372. .elevator_merge_fn = deadline_merge,
  373. .elevator_merged_fn = deadline_merged_request,
  374. .elevator_merge_req_fn = deadline_merged_requests,
  375. .elevator_dispatch_fn = deadline_dispatch_requests,
  376. .elevator_add_req_fn = deadline_add_request,
  377. .elevator_former_req_fn = elv_rb_former_request,
  378. .elevator_latter_req_fn = elv_rb_latter_request,
  379. .elevator_init_fn = deadline_init_queue,
  380. .elevator_exit_fn = deadline_exit_queue,
  381. },
  382. .elevator_attrs = deadline_attrs,
  383. .elevator_name = "deadline",
  384. .elevator_owner = THIS_MODULE,
  385. };
  386. static int __init deadline_init(void)
  387. {
  388. return elv_register(&iosched_deadline);
  389. }
  390. static void __exit deadline_exit(void)
  391. {
  392. elv_unregister(&iosched_deadline);
  393. }
  394. module_init(deadline_init);
  395. module_exit(deadline_exit);
  396. MODULE_AUTHOR("Jens Axboe");
  397. MODULE_LICENSE("GPL");
  398. MODULE_DESCRIPTION("deadline IO scheduler");