From: Martin Hicks This adds a syscall to call into the reclaim code. The use for this would be to clear all unneeded pagecache and slabcache off a node before running a big HPC job. A "memory freer" app can be found at: http://www.bork.org/~mort/sgi/localreclaim/reclaim_memory.c Signed-off-by: Martin Hicks Signed-off-by: Andrew Morton --- arch/ia64/kernel/entry.S | 6 ++--- kernel/sys_ni.c | 2 + mm/vmscan.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 3 deletions(-) diff -puN arch/ia64/kernel/entry.S~vm-reclaim_page_cache_node-syscall arch/ia64/kernel/entry.S --- 25/arch/ia64/kernel/entry.S~vm-reclaim_page_cache_node-syscall 2005-05-03 16:14:03.000000000 -0700 +++ 25-akpm/arch/ia64/kernel/entry.S 2005-05-03 16:14:03.000000000 -0700 @@ -1571,9 +1571,9 @@ sys_call_table: data8 sys_add_key data8 sys_request_key data8 sys_keyctl - data8 sys_ni_syscall - data8 sys_ni_syscall // 1275 - data8 sys_ni_syscall + data8 sys_ni_syscall // reserved for sys_ioprio_set + data8 sys_ni_syscall // reserved for sys_ioprio_get 1275 + data8 sys_reclaim_page_cache_node data8 sys_ni_syscall data8 sys_ni_syscall data8 sys_ni_syscall diff -puN kernel/sys_ni.c~vm-reclaim_page_cache_node-syscall kernel/sys_ni.c --- 25/kernel/sys_ni.c~vm-reclaim_page_cache_node-syscall 2005-05-03 16:14:03.000000000 -0700 +++ 25-akpm/kernel/sys_ni.c 2005-05-03 16:14:03.000000000 -0700 @@ -77,6 +77,8 @@ cond_syscall(sys_request_key); cond_syscall(sys_keyctl); cond_syscall(compat_sys_keyctl); cond_syscall(compat_sys_socketcall); +cond_syscall(sys_reclaim_page_cache_node); +cond_syscall(compat_sys_reclaim_page_cache_node); /* arch-specific weak syscall entries */ cond_syscall(sys_pciconfig_read); diff -puN mm/vmscan.c~vm-reclaim_page_cache_node-syscall mm/vmscan.c --- 25/mm/vmscan.c~vm-reclaim_page_cache_node-syscall 2005-05-03 16:14:03.000000000 -0700 +++ 25-akpm/mm/vmscan.c 2005-05-03 16:14:03.000000000 -0700 @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -1505,3 +1506,54 @@ unsigned int reclaim_clean_pages(struct return total_reclaimed; } + + +/* Free some page cache on a specified node */ +asmlinkage long sys_reclaim_page_cache_node(unsigned int node, + unsigned long bytes, + unsigned int flags) +{ + unsigned long pages_to_reclaim; + unsigned long reclaimed = 0; + int i; + struct zone *z, **zones; + + if (!node_online(node)) + /* get a better error code here? */ + return -EINVAL; + + /* Check to make sure that we have reasonable flag values */ + if (flags & RECLAIM_MASK) + return -EINVAL; + + /* Set the Manual reclaim flag to override rate limiting */ + flags |= RECLAIM_MANUAL; + + pages_to_reclaim = (bytes + PAGE_SIZE - 1)/PAGE_SIZE; + + /* + * This is kind of bad because we're using zone internals. + * The goal here is to start reclaiming from the "higest" zone, + * ZONE_HIGHMEM -> ZONE_NORMAL -> ZONE_DMA + */ + zones = (NODE_DATA(node)->node_zonelists+ZONE_HIGHMEM)->zones; + for (i = 0; (z = zones[i]) && reclaimed < pages_to_reclaim; i++) { + if (!z->present_pages) + continue; + reclaimed += reclaim_clean_pages(z, pages_to_reclaim, + flags); + } + + return reclaimed * PAGE_SIZE; +} + +#ifdef CONFIG_COMPAT + +asmlinkage long compat_sys_reclaim_page_cache_node(compat_uint_t node, + compat_ulong_t bytes, + compat_uint_t flags) +{ + return sys_reclaim_page_cache_node(node, bytes, flags); +} + +#endif _