From: Oleg Nesterov Currently page_cache_readahead() treats ra->size == 0 (first read) and ra->size == -1 (ra_off was called) separately, but does exactly the same in both cases. With this patch we may assume that the reading starts in 'ra_off()' state, so we don't need to consider the first read as a special case. Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton --- 25-akpm/mm/readahead.c | 10 ++++------ 1 files changed, 4 insertions(+), 6 deletions(-) diff -puN mm/readahead.c~readahead-simplify-ra-size-testing mm/readahead.c --- 25/mm/readahead.c~readahead-simplify-ra-size-testing 2005-03-02 17:57:50.000000000 -0800 +++ 25-akpm/mm/readahead.c 2005-03-02 17:57:52.000000000 -0800 @@ -55,7 +55,7 @@ static inline void ra_off(struct file_ra { ra->start = 0; ra->flags = 0; - ra->size = -1; + ra->size = 0; ra->ahead_start = 0; ra->ahead_size = 0; return; @@ -452,7 +452,7 @@ page_cache_readahead(struct address_spac * perturbing the readahead window expansion logic. * If size is zero, there is no read ahead window so we need one */ - if (offset == ra->prev_page && req_size == 1 && ra->size != 0) + if (offset == ra->prev_page && req_size == 1) goto out; ra->prev_page = offset; @@ -471,9 +471,7 @@ page_cache_readahead(struct address_spac * at start of file, and grow the window fast. Or detect first * sequential access */ - if ((ra->size == 0 && offset == 0) /* first io and start of file */ - || (ra->size == -1 && sequential)) { - /* First sequential */ + if (sequential && ra->size == 0) { ra->size = get_init_ra_size(newsize, max); ra->start = offset; if (!blockable_page_cache_readahead(mapping, filp, offset, @@ -499,7 +497,7 @@ page_cache_readahead(struct address_spac * partial page reads and first access were handled above, * so this must be the next page otherwise it is random */ - if (!sequential || (ra->size == 0)) { + if (!sequential) { ra_off(ra); blockable_page_cache_readahead(mapping, filp, offset, newsize, ra, 1); _