Patch from Nick Piggin Contributions from Mitchell Blank Jr This patch replaces the anticipatory io scheduler and smaller batch_expire patch in mm3. It bumps dd->anticipate to 1 when we first start anticipating, and increments it only in the timeout. Basically, dd->anticipate is the xth ms we have been anticipating for. This means we don't need "no auto unplug" stuff. block/deadline-iosched.c | 563 +++++++++++++++++++++++++++++++++--------- block/deadline-iosched.c.orig | 0 2 files changed, 450 insertions(+), 113 deletions(-) diff -puN drivers/block/deadline-iosched.c~anticipatory_io_scheduling drivers/block/deadline-iosched.c --- 25/drivers/block/deadline-iosched.c~anticipatory_io_scheduling 2003-02-14 18:56:27.000000000 -0800 +++ 25-akpm/drivers/block/deadline-iosched.c 2003-02-14 18:56:27.000000000 -0800 @@ -19,6 +19,24 @@ #include #include +struct ant_stats { + int reads; /* total read requests */ + int writes; /* total write requests */ + int anticipate_starts; + int expired_read_batches; + int expired_write_batches; + int timeouts; + int anticipate_hits; + + int ant_delay_hist[100]; /* milliseconds */ + + /* + * This is a logarithmic (base 2) histogram + */ + int lba_forward_offsets[32]; + int lba_backward_offsets[32]; +} ant_stats; + /* * See Documentation/deadline-iosched.txt */ @@ -26,7 +44,7 @@ /* * max time before a read is submitted. */ -static int read_expire = HZ / 2; +static int read_expire = HZ / 10; /* * ditto for writes, these limits are not hard, even @@ -38,15 +56,18 @@ static int write_expire = 5 * HZ; * read_batch_expire describes how long we will allow a stream of reads to * persist before looking to see whether it is time to switch over to writes. */ -static int read_batch_expire = HZ / 20; +static int read_batch_expire = HZ / 10; /* * write_batch_expire describes how long we will allow a stream of writes to * persist before looking to see whether it is time to switch over to reads. */ -static int write_batch_expire = HZ / 40; +static int write_batch_expire = HZ / 20; -static int writes_starved = 2; /* max times reads can starve a write */ +/* + * max time we may wait to anticipate a read + */ +static int antic_expire = HZ / 100; static const int deadline_hash_shift = 10; #define DL_HASH_BLOCK(sec) ((sec) >> 3) @@ -62,6 +83,11 @@ static const int deadline_hash_shift = 1 (dd)->hash_valid_count = 1; \ } while (0) +#define ANTIC_OFF 0 +#define ANTIC_WAIT 1 +#define ANTIC_TIMEOUT 2 +#define ANTIC_FOUND 3 + struct deadline_data { /* * run time data @@ -78,17 +104,23 @@ struct deadline_data { struct list_head *dispatch; /* driver dispatch queue */ struct list_head *hash; /* request hash */ unsigned long hash_valid_count; /* barrier hash count */ - unsigned int current_batch_expires; + unsigned long current_batch_expires; + unsigned long current_check_fifo[2]; int batch_data_dir; /* current/last batch READ or WRITE */ - unsigned int starved; /* times reads have starved writes */ + + int antic_status; + unsigned long antic_start; /* jiffies: when it started */ + struct timer_list antic_timer; /* anticipatory scheduling timer */ + struct work_struct antic_work; /* anticipatory scheduling work */ + unsigned long current_id; /* Identify the expected process */ /* * settings that change how the i/o scheduler behaves */ int fifo_expire[2]; - int writes_starved; int batch_expire[2]; int front_merges; + int antic_expire; }; /* @@ -103,6 +135,8 @@ struct deadline_rq { struct request *request; + unsigned long request_id; + /* * request hash, key is the ending offset (for back merge lookup) */ @@ -116,7 +150,15 @@ struct deadline_rq { unsigned long expires; }; -static inline void deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq); +static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq); + +/* + * deadline_update_drq must be called whenever a request (drq) is added to + * the sort_list. This function keeps caches up to date, and checks if the + * request might be one we are "anticipating" + */ +static void +deadline_update_drq(struct deadline_data *dd, struct deadline_rq *drq); static kmem_cache_t *drq_pool; @@ -137,7 +179,7 @@ static inline void deadline_del_drq_hash __deadline_del_drq_hash(drq); } -static inline void +static void deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) { struct request *rq = drq->request; @@ -230,46 +272,31 @@ __deadline_add_drq_rb(struct deadline_da return 0; } +/* + * Add the request to the rb tree if it is unique. If there is an alias (an + * existing request against the same sector), which can happen when using + * direct IO, then move the alias to the dispatch list and then add the + * request. + */ static void deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) { - struct deadline_rq *__alias; - const int data_dir = rq_data_dir(drq->request); - - /* keep the next_drq cache up to date */ - if (0 && dd->next_drq[data_dir] != NULL) { - sector_t last = dd->last_sector[data_dir]; - sector_t next = dd->next_drq[data_dir]->request->sector; - sector_t this = drq->request->sector; - - /* - * We can have one of the following 2 situations. If "this" - * lands on a + (or L), it is a better "next" candidate. - * - * ---L+++N--- - * +++N---L+++ - */ - if ((next > last && (this >= last && this < next)) - || (next < last && (this >= last || this < next)) ) - /* we have a better "next" drq */ - dd->next_drq[data_dir] = drq; - } else - dd->next_drq[data_dir] = drq; - - /* now add to the rb tree */ - drq->rb_key = rq_rb_key(drq->request); - -retry: - __alias = __deadline_add_drq_rb(dd, drq); - if (!__alias) { - rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); - return; - } + struct deadline_rq *alias; + struct request *rq = drq->request; + + drq->rb_key = rq_rb_key(rq); - deadline_move_to_dispatch(dd, __alias); - goto retry; + while ((alias = __deadline_add_drq_rb(dd, drq))) + deadline_move_request(dd, alias); + + rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); + deadline_update_drq(dd, drq); } +static struct deadline_rq * +deadline_choose_req(struct deadline_data *dd, + struct deadline_rq *drq1, struct deadline_rq *drq2); + static inline void deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) { @@ -277,11 +304,21 @@ deadline_del_drq_rb(struct deadline_data if (dd->next_drq[data_dir] == drq) { struct rb_node *rbnext = rb_next(&drq->rb_node); + struct rb_node *rbprev = rb_prev(&drq->rb_node); + struct deadline_rq *drq_next, *drq_prev; + + if (rbprev) + drq_prev = rb_entry_drq(rbprev); + else + drq_prev = NULL; if (rbnext) - dd->next_drq[data_dir] = rb_entry_drq(rbnext); + drq_next = rb_entry_drq(rbnext); else - dd->next_drq[data_dir] = deadline_find_first_drq(dd, data_dir); + drq_next = deadline_find_first_drq(dd, data_dir); + + dd->next_drq[data_dir] = deadline_choose_req(dd, + drq_next, drq_prev); } if (ON_RB(&drq->rb_node)) { @@ -313,7 +350,7 @@ deadline_find_drq_rb(struct deadline_dat /* * add drq to rbtree and fifo */ -static inline void +static void deadline_add_request(struct deadline_data *dd, struct deadline_rq *drq) { const int data_dir = rq_data_dir(drq->request); @@ -463,7 +500,7 @@ deadline_merged_requests(request_queue_t /* * move request from sort list to dispatch queue. */ -static inline void +static void deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) { request_queue_t *q = drq->request->q; @@ -477,6 +514,7 @@ deadline_move_to_dispatch(struct deadlin list_add_tail(&drq->request->queuelist, dd->dispatch); } + /* * move an entry to dispatch queue */ @@ -485,14 +523,28 @@ deadline_move_request(struct deadline_da { const int data_dir = rq_data_dir(drq->request); struct rb_node *rbnext = rb_next(&drq->rb_node); + struct rb_node *rbprev = rb_prev(&drq->rb_node); + struct deadline_rq *drq_next, *drq_prev; - if (rbnext) - dd->next_drq[data_dir] = rb_entry_drq(rbnext); + BUG_ON(!ON_RB(&drq->rb_node)); + + if (rbprev) + drq_prev = rb_entry_drq(rbprev); + else + drq_prev = NULL; + + if (rbnext) + drq_next = rb_entry_drq(rbnext); else - dd->next_drq[data_dir] = deadline_find_first_drq(dd, data_dir); + drq_next = deadline_find_first_drq(dd, data_dir); + dd->next_drq[data_dir] = deadline_choose_req(dd, drq_next, drq_prev); dd->last_sector[data_dir] = drq->request->sector + drq->request->nr_sectors; + if (data_dir == READ) + /* In case we have to anticipate after this */ + dd->current_id = drq->request_id; + /* * take it off the sort and fifo list, move * to dispatch queue @@ -503,107 +555,355 @@ deadline_move_request(struct deadline_da #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo) /* - * deadline_check_fifo returns 0 if there are no expired reads on the fifo, - * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) + * deadline_fifo_expired returns 0 if there are no expired reads on the fifo, + * 1 otherwise. + */ +static inline int deadline_fifo_expired(struct deadline_data *dd, int ddir) +{ + struct deadline_rq *drq; + + if (list_empty(&dd->fifo_list[ddir])) + return 0; + + drq = list_entry_fifo(dd->fifo_list[ddir].next); + + return time_after(jiffies, drq->expires); +} + +/* + * deadline_check_fifo returns 0 if the fifo list is not due to be checked. + * 1 otherwise. */ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) { - struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next); + return time_after(jiffies, dd->current_check_fifo[ddir]); +} - /* - * drq is expired! - */ - if (time_after(jiffies, drq->expires)) +/* + * deadline_batch_expired returns true if the current batch has expired, + */ +static inline int deadline_batch_expired(struct deadline_data *dd) +{ + return time_after(jiffies, dd->current_batch_expires); +} + +/* + * anticipatory scheduling functions follow + */ + +static inline unsigned long request_id(void) +{ + return (unsigned long)current->pid; +} + +static int deadline_queue_empty(request_queue_t *q); + +/* + * deadline_anticipate_work is scheduled by deadline_anticipate_timeout. It + * stops anticipation, ie. resumes dispatching requests to a device. + */ +static void deadline_anticipate_work(void *data) +{ + struct request_queue *q = data; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + if (!deadline_queue_empty(q)) + q->request_fn(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +/* + * deadline_anticipate_timeout is the timer function set by + * deadline_start_anticipate. + */ +static void deadline_anticipate_timeout(unsigned long data) +{ + struct request_queue *q = (struct request_queue *)data; + struct deadline_data *dd = q->elevator.elevator_data; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + + if (dd->antic_status != ANTIC_FOUND) + dd->antic_status = ANTIC_TIMEOUT; + + blk_remove_plug(q); + schedule_work(&dd->antic_work); + ant_stats.timeouts++; + + spin_unlock_irqrestore(q->queue_lock, flags); +} + +/* + * deadline_close_req decides if one request is considered "close" to the + * previous one issued. + */ +static int +deadline_close_req(struct deadline_data *dd, struct deadline_rq *drq) +{ + unsigned long delay = jiffies - dd->antic_start; + sector_t last = dd->last_sector[dd->batch_data_dir]; + sector_t next = drq->request->sector; + + sector_t delta; /* acceptable close offset (in sectors) */ + + if (dd->antic_status == ANTIC_OFF || delay <= 2) + delta = 64; + else if (delay <= dd->antic_expire / 2) + delta = 64 << ((delay - 2)*2); + else + return 1; + + return (last <= next) && (next <= last + delta); +} + +#define MAXBACK (512 * 1024) + +static struct deadline_rq * +deadline_choose_req(struct deadline_data *dd, + struct deadline_rq *drq1, struct deadline_rq *drq2) +{ + int data_dir; + sector_t last, s1, s2, d1, d2; + const sector_t maxback = MAXBACK; + + if (drq1 == NULL) + return drq2; + if (drq2 == NULL) + return drq1; + + data_dir = rq_data_dir(drq1->request); + last = dd->last_sector[data_dir]; + s1 = drq1->request->sector; + s2 = drq2->request->sector; + + BUG_ON(data_dir != rq_data_dir(drq2->request)); + + if (s1 >= last) + d1 = s1 - last; + else { + /* count large back seeks as a forward seek */ + if (dd->current_id == drq1->request_id && s1+maxback >= last) + d1 = (last - s1)*2; + else + d1 = (last - s1)*8; + } + + if (s2 >= last) + d2 = s2 - last; + else { + if (dd->current_id == drq2->request_id && s2+maxback >= last) + d2 = (last - s2)*2; + else + d2 = (last - s2)*8; + } + + if (d1 < d2) + return drq1; + else if (d2 < d1) + return drq2; + else { + if (s1 >= s2) + return drq1; + else + return drq2; + } +} + +/* + * deadline_antic_req, has @dd been anticipating this @drq? + */ +static int +deadline_antic_req(struct deadline_data *dd, struct deadline_rq *drq) +{ + if (deadline_close_req(dd, drq) + || dd->current_id == drq->request_id) return 1; return 0; } /* - * deadline_check_batch returns 0 if the current batch has not expired, - * 1 otherwise. + * deadline_update_drq must be called whenever a request (drq) is added to + * the sort_list. This function keeps caches up to date, and checks if the + * request might be one we are "anticipating" */ -static inline int deadline_batch_expired(struct deadline_data *dd) +static void +deadline_update_drq(struct deadline_data *dd, struct deadline_rq *drq) { - return time_after(jiffies, dd->current_batch_expires); + const int data_dir = rq_data_dir(drq->request); + sector_t last = dd->last_sector[data_dir]; + sector_t this = drq->request->sector; + unsigned long delay = jiffies - dd->antic_start; + + drq->request_id = request_id(); + + if (data_dir == READ) + ant_stats.reads++; + else + ant_stats.writes++; + + /* keep the next_drq cache up to date */ + dd->next_drq[data_dir] = deadline_choose_req(dd, drq, dd->next_drq[data_dir]); + + /* have we been anticipating this request? */ + if (dd->antic_status != ANTIC_OFF && data_dir == READ && deadline_antic_req(dd, drq)) { + long lba_offset; + int neg; + int log2; + + if (delay >= ARRAY_SIZE(ant_stats.ant_delay_hist)) + delay = ARRAY_SIZE(ant_stats.ant_delay_hist) - 1; + ant_stats.ant_delay_hist[delay]++; + ant_stats.anticipate_hits++; + + lba_offset = this - last; + neg = 0; + if (lba_offset < 0) { + lba_offset = -lba_offset; + neg = 1; + } + log2 = ffs(lba_offset); + BUG_ON(log2 >= 32); + if (neg) + ant_stats.lba_backward_offsets[log2]++; + else + ant_stats.lba_forward_offsets[log2]++; + + del_timer(&dd->antic_timer); + dd->antic_status = ANTIC_FOUND; + blk_remove_plug(drq->request->q); + schedule_work(&dd->antic_work); + } } /* - * deadline_dispatch_requests selects the best request according to - * read/write expire, batch expire, etc + * deadline_dispatch_request selects the best request according to + * read/write expire, batch expire, etc, and moves it to the dispatch + * queue. Returns 1 if a request was found, 0 otherwise. */ -static int deadline_dispatch_requests(struct deadline_data *dd) +static int deadline_dispatch_request(struct request_queue *q) { + struct deadline_data *dd = q->elevator.elevator_data; + struct deadline_rq *drq; const int reads = !list_empty(&dd->fifo_list[READ]); const int writes = !list_empty(&dd->fifo_list[WRITE]); - struct deadline_rq *drq; - int data_dir, other_dir; - /* - * batches are reads XOR writes - */ - drq = dd->next_drq[dd->batch_data_dir]; + if (!(reads || writes)) + return 0; - if (drq && !deadline_batch_expired(dd)) - /* we have a "next request" and our batch is still running */ - goto dispatch_request; + if (deadline_batch_expired(dd)) { + if (dd->batch_data_dir == READ) + ant_stats.expired_read_batches++; + else + ant_stats.expired_write_batches++; + } + + if (!(reads && writes && deadline_batch_expired(dd)) ) { + /* + * batch is still running or no reads or no writes + */ + drq = dd->next_drq[dd->batch_data_dir]; + + if (dd->batch_data_dir == READ && dd->antic_expire) { + if (deadline_check_fifo(dd, READ)) { + if (deadline_fifo_expired(dd, READ)) + goto dispatch_request; + + dd->current_check_fifo[READ] = jiffies + + dd->fifo_expire[READ]; + } + + if (dd->antic_status != ANTIC_FOUND + && (dd->antic_status == ANTIC_OFF || jiffies < dd->antic_start + dd->antic_expire) + && (!drq || !deadline_antic_req(dd, drq)) ) { + unsigned long timeout; + + if (dd->antic_status == ANTIC_OFF) { + ant_stats.anticipate_starts++; + dd->antic_start = jiffies; + } + timeout = min(dd->antic_start + dd->antic_expire, + dd->current_batch_expires); + timeout = min(timeout, dd->current_check_fifo[READ]); + mod_timer(&dd->antic_timer, timeout); + + dd->antic_status = ANTIC_WAIT; + blk_plug_device(q); + + return 0; + } + + } + + if (drq) { + /* we have a "next request" */ + if (reads && !writes) + dd->current_batch_expires = jiffies + dd->batch_expire[READ]; + goto dispatch_request; + } + } /* * at this point we are not running a batch. select the appropriate * data direction (read / write) */ - - dd->current_batch_expires = jiffies + dd->batch_expire[WRITE]; if (reads) { BUG_ON(RB_EMPTY(&dd->sort_list[READ])); - if (writes && (dd->starved++ >= dd->writes_starved)) + if (writes && dd->batch_data_dir == READ) + /* + * Last batch was a read, switch to writes + */ goto dispatch_writes; - data_dir = dd->batch_data_dir = READ; - dd->current_batch_expires = jiffies + dd->batch_expire[READ]; - other_dir = WRITE; - - goto dispatch_find_request; + dd->batch_data_dir = READ; + drq = dd->next_drq[dd->batch_data_dir]; + dd->current_batch_expires = jiffies + + dd->batch_expire[dd->batch_data_dir]; + goto dispatch_request; } /* - * there are either no reads or writes have been starved + * there are either no reads or the last batch was a read */ if (writes) { dispatch_writes: BUG_ON(RB_EMPTY(&dd->sort_list[WRITE])); - dd->starved = 0; - - data_dir = dd->batch_data_dir = WRITE; - other_dir = READ; - - goto dispatch_find_request; + dd->batch_data_dir = WRITE; + drq = dd->next_drq[dd->batch_data_dir]; + dd->current_batch_expires = jiffies + + dd->batch_expire[dd->batch_data_dir]; + goto dispatch_request; } + BUG(); return 0; -dispatch_find_request: +dispatch_request: /* - * we are not running a batch, find best request for selected data_dir + * check fifo if it is due */ - if (deadline_check_fifo(dd, data_dir)) - /* An expired request exists - satisfy it */ - drq = list_entry_fifo(dd->fifo_list[data_dir].next); - - else - /* No expired requests, get the next in sort order */ - drq = dd->next_drq[data_dir]; + if (deadline_check_fifo(dd, dd->batch_data_dir)) { + /* reset timer to check once per expire interval */ + dd->current_check_fifo[dd->batch_data_dir] = jiffies + + dd->fifo_expire[dd->batch_data_dir]; + + if (deadline_fifo_expired(dd, dd->batch_data_dir)) + /* An expired request exists - satisfy it */ + drq = list_entry_fifo( + dd->fifo_list[dd->batch_data_dir].next); + } -dispatch_request: /* * drq is the selected appropriate request. */ + dd->antic_status = ANTIC_OFF; deadline_move_request(dd, drq); - return 1; } @@ -621,7 +921,7 @@ dispatch: return rq; } - if (deadline_dispatch_requests(dd)) + if (deadline_dispatch_request(q)) goto dispatch; return NULL; @@ -644,6 +944,14 @@ deadline_insert_request(request_queue_t insert_here = dd->dispatch->prev; list_add(&rq->queuelist, insert_here); + + if (rq_data_dir(rq) == READ && dd->antic_status != ANTIC_OFF) { + del_timer(&dd->antic_timer); + dd->antic_status = ANTIC_FOUND; + blk_remove_plug(q); + schedule_work(&dd->antic_work); + } + return; } @@ -662,13 +970,36 @@ static int deadline_queue_empty(request_ struct deadline_data *dd = q->elevator.elevator_data; if (!list_empty(&dd->fifo_list[WRITE]) - || !list_empty(&dd->fifo_list[READ]) - || !list_empty(dd->dispatch)) - return 0; + || !list_empty(&dd->fifo_list[READ]) + || !list_empty(dd->dispatch) ) + return 0; return 1; } +/* + * deadline_queue_notready tells us weather or not deadline_next_request + * will return us a request or NULL. With the previous work conserving + * scheduler this API was designed around, if a queue had requests in it, + * deadline_next_request would return a request, and drivers seem to make + * that assumption + */ +static int deadline_queue_notready(request_queue_t *q) +{ + struct deadline_data *dd = q->elevator.elevator_data; + + if (!list_empty(dd->dispatch)) + return 0; + + if (dd->antic_status == ANTIC_WAIT) + return 1; + + if (!deadline_dispatch_request(q)) + return 1; + + return 0; +} + static struct request * deadline_former_request(request_queue_t *q, struct request *rq) { @@ -747,6 +1078,12 @@ static int deadline_init(request_queue_t return -ENOMEM; } + /* anticipatory scheduling helpers */ + dd->antic_timer.function = deadline_anticipate_timeout; + dd->antic_timer.data = (unsigned long)q; + init_timer(&dd->antic_timer); + INIT_WORK(&dd->antic_work, deadline_anticipate_work, q); + for (i = 0; i < DL_HASH_ENTRIES; i++) INIT_LIST_HEAD(&dd->hash[i]); @@ -758,8 +1095,8 @@ static int deadline_init(request_queue_t dd->fifo_expire[READ] = read_expire; dd->fifo_expire[WRITE] = write_expire; dd->hash_valid_count = 1; - dd->writes_starved = writes_starved; dd->front_merges = 1; + dd->antic_expire = antic_expire; dd->batch_expire[READ] = read_batch_expire; dd->batch_expire[WRITE] = write_batch_expire; e->elevator_data = dd; @@ -823,8 +1160,8 @@ static ssize_t __FUNC(struct deadline_da } SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ]); SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE]); -SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved); SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges); +SHOW_FUNCTION(deadline_anticexpire_show, dd->antic_expire); SHOW_FUNCTION(deadline_read_batchexpire_show, dd->batch_expire[READ]); SHOW_FUNCTION(deadline_write_batchexpire_show, dd->batch_expire[WRITE]); #undef SHOW_FUNCTION @@ -841,8 +1178,8 @@ static ssize_t __FUNC(struct deadline_da } STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX); STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX); -STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1); +STORE_FUNCTION(deadline_anticexpire_store, &dd->antic_expire, 0, INT_MAX); STORE_FUNCTION(deadline_read_batchexpire_store, &dd->batch_expire[READ], 0, INT_MAX); STORE_FUNCTION(deadline_write_batchexpire_store, @@ -859,16 +1196,16 @@ static struct deadline_fs_entry deadline .show = deadline_writeexpire_show, .store = deadline_writeexpire_store, }; -static struct deadline_fs_entry deadline_writesstarved_entry = { - .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR }, - .show = deadline_writesstarved_show, - .store = deadline_writesstarved_store, -}; static struct deadline_fs_entry deadline_frontmerges_entry = { .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR }, .show = deadline_frontmerges_show, .store = deadline_frontmerges_store, }; +static struct deadline_fs_entry deadline_anticexpire_entry = { + .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR }, + .show = deadline_anticexpire_show, + .store = deadline_anticexpire_store, +}; static struct deadline_fs_entry deadline_read_batchexpire_entry = { .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR }, .show = deadline_read_batchexpire_show, @@ -883,8 +1220,8 @@ static struct deadline_fs_entry deadline static struct attribute *default_attrs[] = { &deadline_readexpire_entry.attr, &deadline_writeexpire_entry.attr, - &deadline_writesstarved_entry.attr, &deadline_frontmerges_entry.attr, + &deadline_anticexpire_entry.attr, &deadline_read_batchexpire_entry.attr, &deadline_write_batchexpire_entry.attr, NULL, @@ -947,7 +1284,7 @@ elevator_t iosched_deadline = { .elevator_next_req_fn = deadline_next_request, .elevator_add_req_fn = deadline_insert_request, .elevator_remove_req_fn = deadline_remove_request, - .elevator_queue_empty_fn = deadline_queue_empty, + .elevator_queue_empty_fn = deadline_queue_notready, .elevator_former_req_fn = deadline_former_request, .elevator_latter_req_fn = deadline_latter_request, .elevator_init_fn = deadline_init, diff -puN drivers/block/deadline-iosched.c.orig~anticipatory_io_scheduling drivers/block/deadline-iosched.c.orig _