Skip to content

Commit bb4cc2b

Browse files
gormanmtorvalds
authored andcommitted
mm, vmscan: remove highmem_file_pages
With the reintroduction of per-zone LRU stats, highmem_file_pages is redundant so remove it. [[email protected]: wrong stat is being accumulated in highmem_dirtyable_memory] Link: http://lkml.kernel.org/r/[email protected]: http://lkml.kernel.org/r/[email protected] Signed-off-by: Mel Gorman <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Vlastimil Babka <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 71c799f commit bb4cc2b

File tree

2 files changed

+4
-25
lines changed

2 files changed

+4
-25
lines changed

include/linux/mm_inline.h

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -4,22 +4,6 @@
44
#include <linux/huge_mm.h>
55
#include <linux/swap.h>
66

7-
#ifdef CONFIG_HIGHMEM
8-
extern atomic_t highmem_file_pages;
9-
10-
static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
11-
int nr_pages)
12-
{
13-
if (is_highmem_idx(zid) && is_file_lru(lru))
14-
atomic_add(nr_pages, &highmem_file_pages);
15-
}
16-
#else
17-
static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
18-
int nr_pages)
19-
{
20-
}
21-
#endif
22-
237
/**
248
* page_is_file_cache - should the page be on a file LRU or anon LRU?
259
* @page: the page to test
@@ -47,7 +31,6 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
4731
__mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
4832
__mod_zone_page_state(&pgdat->node_zones[zid],
4933
NR_ZONE_LRU_BASE + lru, nr_pages);
50-
acct_highmem_file_pages(zid, lru, nr_pages);
5134
}
5235

5336
static __always_inline void update_lru_size(struct lruvec *lruvec,

mm/page-writeback.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -299,17 +299,13 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
299299

300300
return nr_pages;
301301
}
302-
#ifdef CONFIG_HIGHMEM
303-
atomic_t highmem_file_pages;
304-
#endif
305302

306303
static unsigned long highmem_dirtyable_memory(unsigned long total)
307304
{
308305
#ifdef CONFIG_HIGHMEM
309306
int node;
310-
unsigned long x;
307+
unsigned long x = 0;
311308
int i;
312-
unsigned long dirtyable = 0;
313309

314310
for_each_node_state(node, N_HIGH_MEMORY) {
315311
for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
@@ -326,12 +322,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
326322
nr_pages = zone_page_state(z, NR_FREE_PAGES);
327323
/* watch for underflows */
328324
nr_pages -= min(nr_pages, high_wmark_pages(z));
329-
dirtyable += nr_pages;
325+
nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
326+
nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
327+
x += nr_pages;
330328
}
331329
}
332330

333-
x = dirtyable + atomic_read(&highmem_file_pages);
334-
335331
/*
336332
* Unreclaimable memory (kernel memory or anonymous memory
337333
* without swap) can bring down the dirtyable pages below

0 commit comments

Comments
 (0)