summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2011-02-06 23:15:01 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2011-02-06 23:15:01 -0500
commit03a798aea22c853f80754474d8608aee8d92503e (patch)
tree9f85ee71e983895b960795a23a4a66c81dd52228
parent21ffbf575d5696a5a54d9e286e88a10a7018b000 (diff)
downloadrt-patches-03a798aea22c853f80754474d8608aee8d92503e.tar.gz
refresh mm-realtime-support.patch
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--mm-realtime-support.patch36
1 files changed, 18 insertions, 18 deletions
diff --git a/mm-realtime-support.patch b/mm-realtime-support.patch
index cb99459..3ac9be7 100644
--- a/mm-realtime-support.patch
+++ b/mm-realtime-support.patch
@@ -1,4 +1,4 @@
-From b1efb0a78957befe9a871455a77505764e5ef4b9 Mon Sep 17 00:00:00 2001
+From 550d97737d7acf2d5e48bff9e8973097164dc0b6 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:13 -0500
Subject: [PATCH] mm: realtime support
@@ -12,10 +12,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/fs/exec.c b/fs/exec.c
-index 9192f7e..2bcb327 100644
+index b8d5eb2..2780f61 100644
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -503,7 +503,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -504,7 +504,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
unsigned long length = old_end - old_start;
unsigned long new_start = old_start - shift;
unsigned long new_end = old_end - shift;
@@ -24,7 +24,7 @@ index 9192f7e..2bcb327 100644
BUG_ON(new_start > new_end);
-@@ -528,12 +528,12 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -530,12 +530,12 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
return -ENOMEM;
lru_add_drain();
@@ -39,7 +39,7 @@ index 9192f7e..2bcb327 100644
vma->vm_next ? vma->vm_next->vm_start : 0);
} else {
/*
-@@ -542,10 +542,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -544,10 +544,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* have constraints on va-space that make this illegal (IA64) -
* for the others its just a little faster.
*/
@@ -51,7 +51,7 @@ index 9192f7e..2bcb327 100644
+ tlb_finish_mmu(&tlb, new_end, old_end);
/*
- * shrink the vma to just the new range.
+ * Shrink the vma to just the new range. Always succeeds.
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e43f976..30f998d 100644
--- a/include/asm-generic/tlb.h
@@ -159,10 +159,10 @@ index e43f976..30f998d 100644
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 60c467b..9817658 100644
+index 3899395..2fe57e7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
-@@ -758,7 +758,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+@@ -761,7 +761,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
@@ -203,10 +203,10 @@ index a2b76a5..2fd099c 100644
#else /* CONFIG_HIGHMEM */
diff --git a/mm/memory.c b/mm/memory.c
-index 553bf3d..aecb745 100644
+index 09e4158..e071ce4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -1010,17 +1010,14 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
+@@ -1104,17 +1104,14 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules.
*/
@@ -225,7 +225,7 @@ index 553bf3d..aecb745 100644
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
-@@ -1041,11 +1038,6 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
+@@ -1135,11 +1132,6 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
untrack_pfn_vma(vma, 0, 0);
while (start != end) {
@@ -237,7 +237,7 @@ index 553bf3d..aecb745 100644
if (unlikely(is_vm_hugetlb_page(vma))) {
/*
* It is undesirable to test vma->vm_file as it
-@@ -1066,7 +1058,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
+@@ -1160,7 +1152,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
start = end;
} else
@@ -246,7 +246,7 @@ index 553bf3d..aecb745 100644
start, end, &zap_work, details);
if (zap_work > 0) {
-@@ -1074,19 +1066,13 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
+@@ -1168,19 +1160,13 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
break;
}
@@ -267,7 +267,7 @@ index 553bf3d..aecb745 100644
zap_work = ZAP_BLOCK_SIZE;
}
}
-@@ -1106,16 +1092,15 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
+@@ -1200,16 +1186,15 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
@@ -288,7 +288,7 @@ index 553bf3d..aecb745 100644
}
diff --git a/mm/mmap.c b/mm/mmap.c
-index ee22989..73ab63e 100644
+index f1b4448..c7fe939 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1820,17 +1820,17 @@ static void unmap_region(struct mm_struct *mm,
@@ -313,7 +313,7 @@ index ee22989..73ab63e 100644
}
/*
-@@ -2032,10 +2032,16 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2054,10 +2054,16 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
static inline void verify_mm_writelocked(struct mm_struct *mm)
{
#ifdef CONFIG_DEBUG_VM
@@ -332,7 +332,7 @@ index ee22989..73ab63e 100644
#endif
}
-@@ -2143,7 +2149,7 @@ EXPORT_SYMBOL(do_brk);
+@@ -2166,7 +2172,7 @@ EXPORT_SYMBOL(do_brk);
/* Release all mmaps. */
void exit_mmap(struct mm_struct *mm)
{
@@ -341,7 +341,7 @@ index ee22989..73ab63e 100644
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
unsigned long end;
-@@ -2168,14 +2174,14 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2191,14 +2197,14 @@ void exit_mmap(struct mm_struct *mm)
lru_add_drain();
flush_cache_mm(mm);