summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-11-02 12:56:24 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2011-11-02 12:56:24 -0700
commit8f84e37a6729649d2c84bc6a684cc9b84116d8bb (patch)
tree176da30eb63349cb296406046327489ecec50641
parentf63c580d89ac6d9801c1e05a0b476c945566c3f7 (diff)
downloadlongterm-queue-2.6.32-8f84e37a6729649d2c84bc6a684cc9b84116d8bb.tar.gz
.32 patches
-rw-r--r--queue-2.6.32/cfq-break-apart-merged-cfqqs-if-they-stop-cooperating.patch176
-rw-r--r--queue-2.6.32/cfq-calculate-the-seek_mean-per-cfq_queue-not-per-cfq_io_context.patch209
-rw-r--r--queue-2.6.32/cfq-change-the-meaning-of-the-cfqq_coop-flag.patch101
-rw-r--r--queue-2.6.32/cfq-don-t-allow-queue-merges-for-queues-that-have-no-process-references.patch176
-rw-r--r--queue-2.6.32/cfq-iosched-get-rid-of-the-coop_preempt-flag.patch70
-rw-r--r--queue-2.6.32/cfq-merge-cooperating-cfq_queues.patch177
-rw-r--r--queue-2.6.32/kvm-x86-reset-tsc_timestamp-on-tsc-writes.patch91
-rw-r--r--queue-2.6.32/series9
-rw-r--r--queue-2.6.32/um-fix-ubd-cow-size.patch66
-rw-r--r--queue-2.6.32/xen-timer-missing-irqf_no_suspend-in-timer-code-broke-suspend.patch36
10 files changed, 1111 insertions, 0 deletions
diff --git a/queue-2.6.32/cfq-break-apart-merged-cfqqs-if-they-stop-cooperating.patch b/queue-2.6.32/cfq-break-apart-merged-cfqqs-if-they-stop-cooperating.patch
new file mode 100644
index 0000000..b53b8f3
--- /dev/null
+++ b/queue-2.6.32/cfq-break-apart-merged-cfqqs-if-they-stop-cooperating.patch
@@ -0,0 +1,176 @@
+From sjayaraman@suse.de Wed Nov 2 12:46:18 2011
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Thu, 29 Sep 2011 15:27:45 +0530
+Subject: cfq: break apart merged cfqqs if they stop cooperating
+To: Greg KH <gregkh@suse.de>
+Cc: stable@kernel.org
+Message-ID: <4E844119.9070801@suse.de>
+
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit e6c5bc737ab71e4af6025ef7d150f5a26ae5f146 upstream.
+
+cfq_queues are merged if they are issuing requests within the mean seek
+distance of one another. This patch detects when the coopearting stops and
+breaks the queues back up.
+
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/cfq-iosched.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 76 insertions(+), 3 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -38,6 +38,12 @@ static int cfq_slice_idle = HZ / 125;
+ */
+ #define CFQ_MIN_TT (2)
+
++/*
++ * Allow merged cfqqs to perform this amount of seeky I/O before
++ * deciding to break the queues up again.
++ */
++#define CFQQ_COOP_TOUT (HZ)
++
+ #define CFQ_SLICE_SCALE (5)
+ #define CFQ_HW_QUEUE_MIN (5)
+
+@@ -116,6 +122,7 @@ struct cfq_queue {
+ u64 seek_total;
+ sector_t seek_mean;
+ sector_t last_request_pos;
++ unsigned long seeky_start;
+
+ pid_t pid;
+
+@@ -1041,6 +1048,11 @@ static struct cfq_queue *cfq_close_coope
+ {
+ struct cfq_queue *cfqq;
+
++ if (!cfq_cfqq_sync(cur_cfqq))
++ return NULL;
++ if (CFQQ_SEEKY(cur_cfqq))
++ return NULL;
++
+ /*
+ * We should notice if some of the queues are cooperating, eg
+ * working closely on the same area of the disk. In that case,
+@@ -1055,6 +1067,8 @@ static struct cfq_queue *cfq_close_coope
+ */
+ if (!cfq_cfqq_sync(cfqq))
+ return NULL;
++ if (CFQQ_SEEKY(cfqq))
++ return NULL;
+
+ return cfqq;
+ }
+@@ -1186,7 +1200,7 @@ static int cfqq_process_refs(struct cfq_
+
+ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
+ {
+- int process_refs;
++ int process_refs, new_process_refs;
+ struct cfq_queue *__cfqq;
+
+ /* Avoid a circular list and skip interim queue merges */
+@@ -1204,8 +1218,17 @@ static void cfq_setup_merge(struct cfq_q
+ if (process_refs == 0)
+ return;
+
+- cfqq->new_cfqq = new_cfqq;
+- atomic_add(process_refs, &new_cfqq->ref);
++ /*
++ * Merge in the direction of the lesser amount of work.
++ */
++ new_process_refs = cfqq_process_refs(new_cfqq);
++ if (new_process_refs >= process_refs) {
++ cfqq->new_cfqq = new_cfqq;
++ atomic_add(process_refs, &new_cfqq->ref);
++ } else {
++ new_cfqq->new_cfqq = cfqq;
++ atomic_add(new_process_refs, &cfqq->ref);
++ }
+ }
+
+ /*
+@@ -2040,6 +2063,19 @@ cfq_update_io_seektime(struct cfq_data *
+ total = cfqq->seek_total + (cfqq->seek_samples/2);
+ do_div(total, cfqq->seek_samples);
+ cfqq->seek_mean = (sector_t)total;
++
++ /*
++ * If this cfqq is shared between multiple processes, check to
++ * make sure that those processes are still issuing I/Os within
++ * the mean seek distance. If not, it may be time to break the
++ * queues apart again.
++ */
++ if (cfq_cfqq_coop(cfqq)) {
++ if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
++ cfqq->seeky_start = jiffies;
++ else if (!CFQQ_SEEKY(cfqq))
++ cfqq->seeky_start = 0;
++ }
+ }
+
+ /*
+@@ -2410,6 +2446,32 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, s
+ return cic_to_cfqq(cic, 1);
+ }
+
++static int should_split_cfqq(struct cfq_queue *cfqq)
++{
++ if (cfqq->seeky_start &&
++ time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
++ return 1;
++ return 0;
++}
++
++/*
++ * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
++ * was the last process referring to said cfqq.
++ */
++static struct cfq_queue *
++split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
++{
++ if (cfqq_process_refs(cfqq) == 1) {
++ cfqq->seeky_start = 0;
++ cfqq->pid = current->pid;
++ cfq_clear_cfqq_coop(cfqq);
++ return cfqq;
++ }
++
++ cic_set_cfqq(cic, NULL, 1);
++ cfq_put_queue(cfqq);
++ return NULL;
++}
+ /*
+ * Allocate cfq data structures associated with this request.
+ */
+@@ -2432,12 +2494,23 @@ cfq_set_request(struct request_queue *q,
+ if (!cic)
+ goto queue_fail;
+
++new_queue:
+ cfqq = cic_to_cfqq(cic, is_sync);
+ if (!cfqq || cfqq == &cfqd->oom_cfqq) {
+ cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
+ cic_set_cfqq(cic, cfqq, is_sync);
+ } else {
+ /*
++ * If the queue was seeky for too long, break it apart.
++ */
++ if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
++ cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
++ cfqq = split_cfqq(cic, cfqq);
++ if (!cfqq)
++ goto new_queue;
++ }
++
++ /*
+ * Check to see if this queue is scheduled to merge with
+ * another, closely cooperating queue. The merging of
+ * queues happens here as it must be done in process context.
diff --git a/queue-2.6.32/cfq-calculate-the-seek_mean-per-cfq_queue-not-per-cfq_io_context.patch b/queue-2.6.32/cfq-calculate-the-seek_mean-per-cfq_queue-not-per-cfq_io_context.patch
new file mode 100644
index 0000000..f5e9aa9
--- /dev/null
+++ b/queue-2.6.32/cfq-calculate-the-seek_mean-per-cfq_queue-not-per-cfq_io_context.patch
@@ -0,0 +1,209 @@
+From sjayaraman@suse.de Wed Nov 2 12:45:44 2011
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Thu, 29 Sep 2011 15:26:58 +0530
+Subject: cfq: calculate the seek_mean per cfq_queue not per cfq_io_context
+To: Greg KH <gregkh@suse.de>
+Cc: stable@kernel.org
+Message-ID: <4E8440EA.7080501@suse.de>
+
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit b2c18e1e08a5a9663094d57bb4be2f02226ee61c upstream.
+
+async cfq_queue's are already shared between processes within the same
+priority, and forthcoming patches will change the mapping of cic to sync
+cfq_queue from 1:1 to 1:N. So, calculate the seekiness of a process
+based on the cfq_queue instead of the cfq_io_context.
+
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/cfq-iosched.c | 68 ++++++++++++++++++++++------------------------
+ include/linux/iocontext.h | 5 ---
+ 2 files changed, 33 insertions(+), 40 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -112,6 +112,11 @@ struct cfq_queue {
+ unsigned short ioprio, org_ioprio;
+ unsigned short ioprio_class, org_ioprio_class;
+
++ unsigned int seek_samples;
++ u64 seek_total;
++ sector_t seek_mean;
++ sector_t last_request_pos;
++
+ pid_t pid;
+ };
+
+@@ -967,16 +972,16 @@ static inline sector_t cfq_dist_from_las
+ return cfqd->last_position - blk_rq_pos(rq);
+ }
+
+-#define CIC_SEEK_THR 8 * 1024
+-#define CIC_SEEKY(cic) ((cic)->seek_mean > CIC_SEEK_THR)
++#define CFQQ_SEEK_THR 8 * 1024
++#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
+
+-static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
++static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
++ struct request *rq)
+ {
+- struct cfq_io_context *cic = cfqd->active_cic;
+- sector_t sdist = cic->seek_mean;
++ sector_t sdist = cfqq->seek_mean;
+
+- if (!sample_valid(cic->seek_samples))
+- sdist = CIC_SEEK_THR;
++ if (!sample_valid(cfqq->seek_samples))
++ sdist = CFQQ_SEEK_THR;
+
+ return cfq_dist_from_last(cfqd, rq) <= sdist;
+ }
+@@ -1005,7 +1010,7 @@ static struct cfq_queue *cfqq_close(stru
+ * will contain the closest sector.
+ */
+ __cfqq = rb_entry(parent, struct cfq_queue, p_node);
+- if (cfq_rq_close(cfqd, __cfqq->next_rq))
++ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
+ return __cfqq;
+
+ if (blk_rq_pos(__cfqq->next_rq) < sector)
+@@ -1016,7 +1021,7 @@ static struct cfq_queue *cfqq_close(stru
+ return NULL;
+
+ __cfqq = rb_entry(node, struct cfq_queue, p_node);
+- if (cfq_rq_close(cfqd, __cfqq->next_rq))
++ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
+ return __cfqq;
+
+ return NULL;
+@@ -1039,13 +1044,6 @@ static struct cfq_queue *cfq_close_coope
+ struct cfq_queue *cfqq;
+
+ /*
+- * A valid cfq_io_context is necessary to compare requests against
+- * the seek_mean of the current cfqq.
+- */
+- if (!cfqd->active_cic)
+- return NULL;
+-
+- /*
+ * We should notice if some of the queues are cooperating, eg
+ * working closely on the same area of the disk. In that case,
+ * we can group them together and don't waste time idling.
+@@ -1115,7 +1113,7 @@ static void cfq_arm_slice_timer(struct c
+ * seeks. so allow a little bit of time for him to submit a new rq
+ */
+ sl = cfqd->cfq_slice_idle;
+- if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
++ if (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq))
+ sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
+
+ mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
+@@ -1958,33 +1956,33 @@ cfq_update_io_thinktime(struct cfq_data
+ }
+
+ static void
+-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
++cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct request *rq)
+ {
+ sector_t sdist;
+ u64 total;
+
+- if (!cic->last_request_pos)
++ if (!cfqq->last_request_pos)
+ sdist = 0;
+- else if (cic->last_request_pos < blk_rq_pos(rq))
+- sdist = blk_rq_pos(rq) - cic->last_request_pos;
++ else if (cfqq->last_request_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
+ else
+- sdist = cic->last_request_pos - blk_rq_pos(rq);
++ sdist = cfqq->last_request_pos - blk_rq_pos(rq);
+
+ /*
+ * Don't allow the seek distance to get too large from the
+ * odd fragment, pagein, etc
+ */
+- if (cic->seek_samples <= 60) /* second&third seek */
+- sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
++ if (cfqq->seek_samples <= 60) /* second&third seek */
++ sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
+ else
+- sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
++ sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
+
+- cic->seek_samples = (7*cic->seek_samples + 256) / 8;
+- cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
+- total = cic->seek_total + (cic->seek_samples/2);
+- do_div(total, cic->seek_samples);
+- cic->seek_mean = (sector_t)total;
++ cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
++ cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
++ total = cfqq->seek_total + (cfqq->seek_samples/2);
++ do_div(total, cfqq->seek_samples);
++ cfqq->seek_mean = (sector_t)total;
+ }
+
+ /*
+@@ -2006,11 +2004,11 @@ cfq_update_idle_window(struct cfq_data *
+ enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
+
+ if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
+- (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
++ (!cfqd->cfq_latency && cfqd->hw_tag && CFQQ_SEEKY(cfqq)))
+ enable_idle = 0;
+ else if (sample_valid(cic->ttime_samples)) {
+ unsigned int slice_idle = cfqd->cfq_slice_idle;
+- if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
++ if (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq))
+ slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
+ if (cic->ttime_mean > slice_idle)
+ enable_idle = 0;
+@@ -2077,7 +2075,7 @@ cfq_should_preempt(struct cfq_data *cfqd
+ * if this request is as-good as one we would expect from the
+ * current cfqq, let it preempt
+ */
+- if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) ||
++ if (cfq_rq_close(cfqd, cfqq, rq) && (!cfq_cfqq_coop(new_cfqq) ||
+ cfqd->busy_queues == 1)) {
+ /*
+ * Mark new queue coop_preempt, so its coop flag will not be
+@@ -2127,10 +2125,10 @@ cfq_rq_enqueued(struct cfq_data *cfqd, s
+ cfqq->meta_pending++;
+
+ cfq_update_io_thinktime(cfqd, cic);
+- cfq_update_io_seektime(cfqd, cic, rq);
++ cfq_update_io_seektime(cfqd, cfqq, rq);
+ cfq_update_idle_window(cfqd, cfqq, cic);
+
+- cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+
+ if (cfqq == cfqd->active_queue) {
+ /*
+--- a/include/linux/iocontext.h
++++ b/include/linux/iocontext.h
+@@ -40,16 +40,11 @@ struct cfq_io_context {
+ struct io_context *ioc;
+
+ unsigned long last_end_request;
+- sector_t last_request_pos;
+
+ unsigned long ttime_total;
+ unsigned long ttime_samples;
+ unsigned long ttime_mean;
+
+- unsigned int seek_samples;
+- u64 seek_total;
+- sector_t seek_mean;
+-
+ struct list_head queue_list;
+ struct hlist_node cic_list;
+
diff --git a/queue-2.6.32/cfq-change-the-meaning-of-the-cfqq_coop-flag.patch b/queue-2.6.32/cfq-change-the-meaning-of-the-cfqq_coop-flag.patch
new file mode 100644
index 0000000..bc34f3a
--- /dev/null
+++ b/queue-2.6.32/cfq-change-the-meaning-of-the-cfqq_coop-flag.patch
@@ -0,0 +1,101 @@
+From sjayaraman@suse.de Wed Nov 2 12:46:08 2011
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Thu, 29 Sep 2011 15:27:37 +0530
+Subject: cfq: change the meaning of the cfqq_coop flag
+To: Greg KH <gregkh@suse.de>
+Cc: stable@kernel.org
+Message-ID: <4E844111.9030102@suse.de>
+
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit b3b6d0408c953524f979468562e7e210d8634150 upstream
+
+The flag used to indicate that a cfqq was allowed to jump ahead in the
+scheduling order due to submitting a request close to the queue that
+just executed. Since closely cooperating queues are now merged, the flag
+holds little meaning. Change it to indicate that multiple queues were
+merged. This will later be used to allow the breaking up of merged queues
+when they are no longer cooperating.
+
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/cfq-iosched.c | 20 ++++++--------------
+ 1 file changed, 6 insertions(+), 14 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -202,7 +202,7 @@ enum cfqq_state_flags {
+ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
+ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
+ CFQ_CFQQ_FLAG_sync, /* synchronous queue */
+- CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
++ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
+ CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
+ };
+
+@@ -952,11 +952,8 @@ static struct cfq_queue *cfq_get_next_qu
+ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
+ struct cfq_queue *cfqq)
+ {
+- if (!cfqq) {
++ if (!cfqq)
+ cfqq = cfq_get_next_queue(cfqd);
+- if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
+- cfq_clear_cfqq_coop(cfqq);
+- }
+
+ if (cfqq)
+ cfq_clear_cfqq_coop_preempt(cfqq);
+@@ -1040,8 +1037,7 @@ static struct cfq_queue *cfqq_close(stru
+ * assumption.
+ */
+ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
+- struct cfq_queue *cur_cfqq,
+- bool probe)
++ struct cfq_queue *cur_cfqq)
+ {
+ struct cfq_queue *cfqq;
+
+@@ -1060,11 +1056,6 @@ static struct cfq_queue *cfq_close_coope
+ if (!cfq_cfqq_sync(cfqq))
+ return NULL;
+
+- if (cfq_cfqq_coop(cfqq))
+- return NULL;
+-
+- if (!probe)
+- cfq_mark_cfqq_coop(cfqq);
+ return cfqq;
+ }
+
+@@ -1248,7 +1239,7 @@ static struct cfq_queue *cfq_select_queu
+ * cooperators and put the close queue at the front of the service
+ * tree. If possible, merge the expiring queue with the new cfqq.
+ */
+- new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
++ new_cfqq = cfq_close_cooperator(cfqd, cfqq);
+ if (new_cfqq) {
+ if (!cfqq->new_cfqq)
+ cfq_setup_merge(cfqq, new_cfqq);
+@@ -2313,7 +2304,7 @@ static void cfq_completed_request(struct
+ */
+ if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
+ cfq_slice_expired(cfqd, 1);
+- else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) &&
++ else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq) &&
+ sync && !rq_noidle(rq))
+ cfq_arm_slice_timer(cfqd);
+ }
+@@ -2414,6 +2405,7 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, s
+ {
+ cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
+ cic_set_cfqq(cic, cfqq->new_cfqq, 1);
++ cfq_mark_cfqq_coop(cfqq->new_cfqq);
+ cfq_put_queue(cfqq);
+ return cic_to_cfqq(cic, 1);
+ }
diff --git a/queue-2.6.32/cfq-don-t-allow-queue-merges-for-queues-that-have-no-process-references.patch b/queue-2.6.32/cfq-don-t-allow-queue-merges-for-queues-that-have-no-process-references.patch
new file mode 100644
index 0000000..136b924
--- /dev/null
+++ b/queue-2.6.32/cfq-don-t-allow-queue-merges-for-queues-that-have-no-process-references.patch
@@ -0,0 +1,176 @@
+From sjayaraman@suse.de Wed Nov 2 12:46:38 2011
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Thu, 29 Sep 2011 15:28:26 +0530
+Subject: cfq: Don't allow queue merges for queues that have no process references
+To: Greg KH <gregkh@suse.de>
+Cc: stable@kernel.org
+Message-ID: <4E844142.7030006@suse.de>
+
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit c10b61f0910466b4b99c266a7d76ac4390743fb5 upstream.
+
+Hi,
+
+A user reported a kernel bug when running a particular program that did
+the following:
+
+created 32 threads
+- each thread took a mutex, grabbed a global offset, added a buffer size
+ to that offset, released the lock
+- read from the given offset in the file
+- created a new thread to do the same
+- exited
+
+The result is that cfq's close cooperator logic would trigger, as the
+threads were issuing I/O within the mean seek distance of one another.
+This workload managed to routinely trigger a use after free bug when
+walking the list of merge candidates for a particular cfqq
+(cfqq->new_cfqq). The logic used for merging queues looks like this:
+
+static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
+{
+ int process_refs, new_process_refs;
+ struct cfq_queue *__cfqq;
+
+ /* Avoid a circular list and skip interim queue merges */
+ while ((__cfqq = new_cfqq->new_cfqq)) {
+ if (__cfqq == cfqq)
+ return;
+ new_cfqq = __cfqq;
+ }
+
+ process_refs = cfqq_process_refs(cfqq);
+ /*
+ * If the process for the cfqq has gone away, there is no
+ * sense in merging the queues.
+ */
+ if (process_refs == 0)
+ return;
+
+ /*
+ * Merge in the direction of the lesser amount of work.
+ */
+ new_process_refs = cfqq_process_refs(new_cfqq);
+ if (new_process_refs >= process_refs) {
+ cfqq->new_cfqq = new_cfqq;
+ atomic_add(process_refs, &new_cfqq->ref);
+ } else {
+ new_cfqq->new_cfqq = cfqq;
+ atomic_add(new_process_refs, &cfqq->ref);
+ }
+}
+
+When a merge candidate is found, we add the process references for the
+queue with less references to the queue with more. The actual merging
+of queues happens when a new request is issued for a given cfqq. In the
+case of the test program, it only does a single pread call to read in
+1MB, so the actual merge never happens.
+
+Normally, this is fine, as when the queue exits, we simply drop the
+references we took on the other cfqqs in the merge chain:
+
+ /*
+ * If this queue was scheduled to merge with another queue, be
+ * sure to drop the reference taken on that queue (and others in
+ * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
+ */
+ __cfqq = cfqq->new_cfqq;
+ while (__cfqq) {
+ if (__cfqq == cfqq) {
+ WARN(1, "cfqq->new_cfqq loop detected\n");
+ break;
+ }
+ next = __cfqq->new_cfqq;
+ cfq_put_queue(__cfqq);
+ __cfqq = next;
+ }
+
+However, there is a hole in this logic. Consider the following (and
+keep in mind that each I/O keeps a reference to the cfqq):
+
+q1->new_cfqq = q2 // q2 now has 2 process references
+q3->new_cfqq = q2 // q2 now has 3 process references
+
+// the process associated with q2 exits
+// q2 now has 2 process references
+
+// queue 1 exits, drops its reference on q2
+// q2 now has 1 process reference
+
+// q3 exits, so has 0 process references, and hence drops its references
+// to q2, which leaves q2 also with 0 process references
+
+q4 comes along and wants to merge with q3
+
+q3->new_cfqq still points at q2! We follow that link and end up at an
+already freed cfqq.
+
+So, the fix is to not follow a merge chain if the top-most queue does
+not have a process reference, otherwise any queue in the chain could be
+already freed. I also changed the logic to disallow merging with a
+queue that does not have any process references. Previously, we did
+this check for one of the merge candidates, but not the other. That
+doesn't really make sense.
+
+Without the attached patch, my system would BUG within a couple of
+seconds of running the reproducer program. With the patch applied, my
+system ran the program for over an hour without issues.
+
+This addresses the following bugzilla:
+ https://bugzilla.kernel.org/show_bug.cgi?id=16217
+
+Thanks a ton to Phil Carns for providing the bug report and an excellent
+reproducer.
+
+[ Note for stable: this applies to 2.6.32/33/34 ].
+
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Reported-by: Phil Carns <carns@mcs.anl.gov>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/cfq-iosched.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -1198,6 +1198,15 @@ static void cfq_setup_merge(struct cfq_q
+ int process_refs, new_process_refs;
+ struct cfq_queue *__cfqq;
+
++ /*
++ * If there are no process references on the new_cfqq, then it is
++ * unsafe to follow the ->new_cfqq chain as other cfqq's in the
++ * chain may have dropped their last reference (not just their
++ * last process reference).
++ */
++ if (!cfqq_process_refs(new_cfqq))
++ return;
++
+ /* Avoid a circular list and skip interim queue merges */
+ while ((__cfqq = new_cfqq->new_cfqq)) {
+ if (__cfqq == cfqq)
+@@ -1206,17 +1215,17 @@ static void cfq_setup_merge(struct cfq_q
+ }
+
+ process_refs = cfqq_process_refs(cfqq);
++ new_process_refs = cfqq_process_refs(new_cfqq);
+ /*
+ * If the process for the cfqq has gone away, there is no
+ * sense in merging the queues.
+ */
+- if (process_refs == 0)
++ if (process_refs == 0 || new_process_refs == 0)
+ return;
+
+ /*
+ * Merge in the direction of the lesser amount of work.
+ */
+- new_process_refs = cfqq_process_refs(new_cfqq);
+ if (new_process_refs >= process_refs) {
+ cfqq->new_cfqq = new_cfqq;
+ atomic_add(process_refs, &new_cfqq->ref);
diff --git a/queue-2.6.32/cfq-iosched-get-rid-of-the-coop_preempt-flag.patch b/queue-2.6.32/cfq-iosched-get-rid-of-the-coop_preempt-flag.patch
new file mode 100644
index 0000000..f03a341
--- /dev/null
+++ b/queue-2.6.32/cfq-iosched-get-rid-of-the-coop_preempt-flag.patch
@@ -0,0 +1,70 @@
+From sjayaraman@suse.de Wed Nov 2 12:46:28 2011
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Thu, 29 Sep 2011 15:27:56 +0530
+Subject: cfq-iosched: get rid of the coop_preempt flag
+To: Greg KH <gregkh@suse.de>
+Cc: stable@kernel.org
+Message-ID: <4E844124.70207@suse.de>
+
+
+From: Jens Axboe <jens.axboe@oracle.com>
+
+commit e00ef7997195e4f8e10593727a6286e2e2802159 upstream
+
+We need to rework this logic post the cooperating cfq_queue merging,
+for now just get rid of it and Jeff Moyer will fix the fall out.
+
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/cfq-iosched.c | 15 +--------------
+ 1 file changed, 1 insertion(+), 14 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -210,7 +210,6 @@ enum cfqq_state_flags {
+ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
+ CFQ_CFQQ_FLAG_sync, /* synchronous queue */
+ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
+- CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
+ };
+
+ #define CFQ_CFQQ_FNS(name) \
+@@ -237,7 +236,6 @@ CFQ_CFQQ_FNS(prio_changed);
+ CFQ_CFQQ_FNS(slice_new);
+ CFQ_CFQQ_FNS(sync);
+ CFQ_CFQQ_FNS(coop);
+-CFQ_CFQQ_FNS(coop_preempt);
+ #undef CFQ_CFQQ_FNS
+
+ #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+@@ -962,9 +960,6 @@ static struct cfq_queue *cfq_set_active_
+ if (!cfqq)
+ cfqq = cfq_get_next_queue(cfqd);
+
+- if (cfqq)
+- cfq_clear_cfqq_coop_preempt(cfqq);
+-
+ __cfq_set_active_queue(cfqd, cfqq);
+ return cfqq;
+ }
+@@ -2168,16 +2163,8 @@ cfq_should_preempt(struct cfq_data *cfqd
+ * if this request is as-good as one we would expect from the
+ * current cfqq, let it preempt
+ */
+- if (cfq_rq_close(cfqd, cfqq, rq) && (!cfq_cfqq_coop(new_cfqq) ||
+- cfqd->busy_queues == 1)) {
+- /*
+- * Mark new queue coop_preempt, so its coop flag will not be
+- * cleared when new queue gets scheduled at the very first time
+- */
+- cfq_mark_cfqq_coop_preempt(new_cfqq);
+- cfq_mark_cfqq_coop(new_cfqq);
++ if (cfq_rq_close(cfqd, cfqq, rq))
+ return true;
+- }
+
+ return false;
+ }
diff --git a/queue-2.6.32/cfq-merge-cooperating-cfq_queues.patch b/queue-2.6.32/cfq-merge-cooperating-cfq_queues.patch
new file mode 100644
index 0000000..d0dd645
--- /dev/null
+++ b/queue-2.6.32/cfq-merge-cooperating-cfq_queues.patch
@@ -0,0 +1,177 @@
+From sjayaraman@suse.de Wed Nov 2 12:45:57 2011
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Thu, 29 Sep 2011 15:27:11 +0530
+Subject: cfq: merge cooperating cfq_queues
+To: Greg KH <gregkh@suse.de>
+Cc: stable@kernel.org
+Message-ID: <4E8440F7.6080309@suse.de>
+
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit df5fe3e8e13883f58dc97489076bbcc150789a21 upstream.
+
+When cooperating cfq_queues are detected currently, they are allowed to
+skip ahead in the scheduling order. It is much more efficient to
+automatically share the cfq_queue data structure between cooperating processes.
+Performance of the read-test2 benchmark (which is written to emulate the
+dump(8) utility) went from 12MB/s to 90MB/s on my SATA disk. NFS servers
+with multiple nfsd threads also saw performance increases.
+
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/cfq-iosched.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 87 insertions(+), 2 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -118,6 +118,8 @@ struct cfq_queue {
+ sector_t last_request_pos;
+
+ pid_t pid;
++
++ struct cfq_queue *new_cfqq;
+ };
+
+ /*
+@@ -1052,6 +1054,12 @@ static struct cfq_queue *cfq_close_coope
+ if (!cfqq)
+ return NULL;
+
++ /*
++ * It only makes sense to merge sync queues.
++ */
++ if (!cfq_cfqq_sync(cfqq))
++ return NULL;
++
+ if (cfq_cfqq_coop(cfqq))
+ return NULL;
+
+@@ -1173,6 +1181,43 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd,
+ }
+
+ /*
++ * Must be called with the queue_lock held.
++ */
++static int cfqq_process_refs(struct cfq_queue *cfqq)
++{
++ int process_refs, io_refs;
++
++ io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
++ process_refs = atomic_read(&cfqq->ref) - io_refs;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
++{
++ int process_refs;
++ struct cfq_queue *__cfqq;
++
++ /* Avoid a circular list and skip interim queue merges */
++ while ((__cfqq = new_cfqq->new_cfqq)) {
++ if (__cfqq == cfqq)
++ return;
++ new_cfqq = __cfqq;
++ }
++
++ process_refs = cfqq_process_refs(cfqq);
++ /*
++ * If the process for the cfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0)
++ return;
++
++ cfqq->new_cfqq = new_cfqq;
++ atomic_add(process_refs, &new_cfqq->ref);
++}
++
++/*
+ * Select a queue for service. If we have a current active queue,
+ * check whether to continue servicing it, or retrieve and set a new one.
+ */
+@@ -1201,11 +1246,14 @@ static struct cfq_queue *cfq_select_queu
+ * If another queue has a request waiting within our mean seek
+ * distance, let it run. The expire code will check for close
+ * cooperators and put the close queue at the front of the service
+- * tree.
++ * tree. If possible, merge the expiring queue with the new cfqq.
+ */
+ new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
+- if (new_cfqq)
++ if (new_cfqq) {
++ if (!cfqq->new_cfqq)
++ cfq_setup_merge(cfqq, new_cfqq);
+ goto expire;
++ }
+
+ /*
+ * No requests pending. If the active queue still has requests in
+@@ -1516,11 +1564,29 @@ static void cfq_free_io_context(struct i
+
+ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+ {
++ struct cfq_queue *__cfqq, *next;
++
+ if (unlikely(cfqq == cfqd->active_queue)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
+
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
++ */
++ __cfqq = cfqq->new_cfqq;
++ while (__cfqq) {
++ if (__cfqq == cfqq) {
++ WARN(1, "cfqq->new_cfqq loop detected\n");
++ break;
++ }
++ next = __cfqq->new_cfqq;
++ cfq_put_queue(__cfqq);
++ __cfqq = next;
++ }
++
+ cfq_put_queue(cfqq);
+ }
+
+@@ -2342,6 +2408,16 @@ static void cfq_put_request(struct reque
+ }
+ }
+
++static struct cfq_queue *
++cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
++ struct cfq_queue *cfqq)
++{
++ cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
++ cic_set_cfqq(cic, cfqq->new_cfqq, 1);
++ cfq_put_queue(cfqq);
++ return cic_to_cfqq(cic, 1);
++}
++
+ /*
+ * Allocate cfq data structures associated with this request.
+ */
+@@ -2368,6 +2444,15 @@ cfq_set_request(struct request_queue *q,
+ if (!cfqq || cfqq == &cfqd->oom_cfqq) {
+ cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
+ cic_set_cfqq(cic, cfqq, is_sync);
++ } else {
++ /*
++ * Check to see if this queue is scheduled to merge with
++ * another, closely cooperating queue. The merging of
++ * queues happens here as it must be done in process context.
++ * The reference on new_cfqq was taken in merge_cfqqs.
++ */
++ if (cfqq->new_cfqq)
++ cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
+ }
+
+ cfqq->allocated[rw]++;
diff --git a/queue-2.6.32/kvm-x86-reset-tsc_timestamp-on-tsc-writes.patch b/queue-2.6.32/kvm-x86-reset-tsc_timestamp-on-tsc-writes.patch
new file mode 100644
index 0000000..78094d7
--- /dev/null
+++ b/queue-2.6.32/kvm-x86-reset-tsc_timestamp-on-tsc-writes.patch
@@ -0,0 +1,91 @@
+From mtosatti@redhat.com Wed Nov 2 12:48:55 2011
+From: Marcelo Tosatti <mtosatti@redhat.com>
+Date: Mon, 12 Sep 2011 12:52:51 -0300
+Subject: KVM: x86: Reset tsc_timestamp on TSC writes
+To: stable@kernel.org, greg@kroah.com
+Cc: Avi Kivity <avi@redhat.com>, Philipp Hahn <hahn@univention.de>
+Message-ID: <20110912155251.GA18491@amt.cnet>
+Content-Disposition: inline
+
+
+From: Philipp Hahn <hahn@univention.de>
+
+There is no upstream commit ID for this patch since it is not a straight
+backport from upstream. It is a fix only relevant to 2.6.32.y.
+
+Since 1d5f066e0b63271b67eac6d3752f8aa96adcbddb from 2.6.37 was
+back-ported to 2.6.32.40 as ad2088cabe0fd7f633f38ba106025d33ed9a2105,
+the following patch is needed to add the needed reset logic to 2.6.32 as
+well.
+
+
+Bug #23257: Reset tsc_timestamp on TSC writes
+
+vcpu->last_guest_tsc is updated in vcpu_enter_guest() and kvm_arch_vcpu_put()
+by getting the last value of the TSC from the guest.
+On reset, the SeaBIOS resets the TSC to 0, which triggers a bug on the next
+call to kvm_write_guest_time(): Since vcpu->hw_clock.tsc_timestamp still
+contains the old value before the reset, "max_kernel_ns = vcpu->last_guest_tsc
+- vcpu->hw_clock.tsc_timestamp" gets negative. Since the variable is u64, it
+ gets translated to a large positive value.
+
+[9333.197080]
+vcpu->last_guest_tsc =209_328_760_015 ←
+vcpu->hv_clock.tsc_timestamp=209_328_708_109
+vcpu->last_kernel_ns =9_333_179_830_643
+kernel_ns =9_333_197_073_429
+max_kernel_ns =9_333_179_847_943 ←
+
+[9336.910995]
+vcpu->last_guest_tsc =9_438_510_584 ←
+vcpu->hv_clock.tsc_timestamp=211_080_593_143
+vcpu->last_kernel_ns =9_333_763_732_907
+kernel_ns =9_336_910_990_771
+max_kernel_ns =6_148_296_831_006_663_830 ←
+
+For completeness, here are the values for my 3 GHz CPU:
+vcpu->hv_clock.tsc_shift =-1
+vcpu->hv_clock.tsc_to_system_mul =2_863_019_502
+
+This makes the guest kernel crawl very slowly when clocksource=kvmclock is
+used: sleeps take way longer than expected and don't match wall clock any more.
+The times printed with printk() don't match real time and the reboot often
+stalls for long times.
+
+In linux-git this isn't a problem, since on every MSR_IA32_TSC write
+vcpu->arch.hv_clock.tsc_timestamp is reset to 0, which disables above logic.
+The code there is only in arch/x86/kvm/x86.c, since much of the kvm-clock
+related code has been refactured for 2.6.37:
+ 99e3e30a arch/x86/kvm/x86.c
+ (Zachary Amsden 2010-08-19 22:07:17 -1000 1084)
+ vcpu->arch.hv_clock.tsc_timestamp = 0;
+
+Signed-off-by: Philipp Hahn <hahn@univention.de>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/svm.c | 1 +
+ arch/x86/kvm/vmx.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2256,6 +2256,7 @@ static int svm_set_msr(struct kvm_vcpu *
+ }
+
+ svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
++ vcpu->arch.hv_clock.tsc_timestamp = 0;
+
+ break;
+ }
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1067,6 +1067,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+ case MSR_IA32_TSC:
+ rdtscll(host_tsc);
+ guest_write_tsc(data, host_tsc);
++ vcpu->arch.hv_clock.tsc_timestamp = 0;
+ break;
+ case MSR_IA32_CR_PAT:
+ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
diff --git a/queue-2.6.32/series b/queue-2.6.32/series
index 3d7f381..89882e2 100644
--- a/queue-2.6.32/series
+++ b/queue-2.6.32/series
@@ -81,3 +81,12 @@ carminefb-fix-module-parameters-permissions.patch
uvcvideo-set-alternate-setting-0-on-resume-if-the-bus-has-been-reset.patch
tuner_xc2028-allow-selection-of-the-frequency-adjustment-code-for-xc3028.patch
plat-mxc-iomux-v3.h-implicitly-enable-pull-up-down-when-that-s-desired.patch
+um-fix-ubd-cow-size.patch
+cfq-calculate-the-seek_mean-per-cfq_queue-not-per-cfq_io_context.patch
+cfq-merge-cooperating-cfq_queues.patch
+cfq-change-the-meaning-of-the-cfqq_coop-flag.patch
+cfq-break-apart-merged-cfqqs-if-they-stop-cooperating.patch
+cfq-iosched-get-rid-of-the-coop_preempt-flag.patch
+cfq-don-t-allow-queue-merges-for-queues-that-have-no-process-references.patch
+xen-timer-missing-irqf_no_suspend-in-timer-code-broke-suspend.patch
+kvm-x86-reset-tsc_timestamp-on-tsc-writes.patch
diff --git a/queue-2.6.32/um-fix-ubd-cow-size.patch b/queue-2.6.32/um-fix-ubd-cow-size.patch
new file mode 100644
index 0000000..0ba55e2
--- /dev/null
+++ b/queue-2.6.32/um-fix-ubd-cow-size.patch
@@ -0,0 +1,66 @@
+From 8535639810e578960233ad39def3ac2157b0c3ec Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Wed, 2 Nov 2011 13:17:27 +0100
+Subject: um: fix ubd cow size
+
+From: Richard Weinberger <richard@nod.at>
+
+commit 8535639810e578960233ad39def3ac2157b0c3ec upstream.
+
+ubd_file_size() cannot use ubd_dev->cow.file because at this time
+ubd_dev->cow.file is not initialized.
+Therefore, ubd_file_size() will always report a wrong disk size when
+COW files are used.
+Reading from /dev/ubd* would crash the kernel.
+
+We have to read the correct disk size from the COW file's backing
+file.
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/um/drivers/ubd_kern.c | 31 ++++++++++++++++++++++++++++++-
+ 1 file changed, 30 insertions(+), 1 deletion(-)
+
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -510,8 +510,37 @@ __uml_exitcall(kill_io_thread);
+ static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
+ {
+ char *file;
++ int fd;
++ int err;
+
+- file = ubd_dev->cow.file ? ubd_dev->cow.file : ubd_dev->file;
++ __u32 version;
++ __u32 align;
++ char *backing_file;
++ time_t mtime;
++ unsigned long long size;
++ int sector_size;
++ int bitmap_offset;
++
++ if (ubd_dev->file && ubd_dev->cow.file) {
++ file = ubd_dev->cow.file;
++
++ goto out;
++ }
++
++ fd = os_open_file(ubd_dev->file, global_openflags, 0);
++ if (fd < 0)
++ return fd;
++
++ err = read_cow_header(file_reader, &fd, &version, &backing_file, \
++ &mtime, &size, &sector_size, &align, &bitmap_offset);
++ os_close_file(fd);
++
++ if(err == -EINVAL)
++ file = ubd_dev->file;
++ else
++ file = backing_file;
++
++out:
+ return os_file_size(file, size_out);
+ }
+
diff --git a/queue-2.6.32/xen-timer-missing-irqf_no_suspend-in-timer-code-broke-suspend.patch b/queue-2.6.32/xen-timer-missing-irqf_no_suspend-in-timer-code-broke-suspend.patch
new file mode 100644
index 0000000..4db09c4
--- /dev/null
+++ b/queue-2.6.32/xen-timer-missing-irqf_no_suspend-in-timer-code-broke-suspend.patch
@@ -0,0 +1,36 @@
+From f611f2da99420abc973c32cdbddbf5c365d0a20c Mon Sep 17 00:00:00 2001
+From: Ian Campbell <Ian.Campbell@citrix.com>
+Date: Tue, 8 Feb 2011 14:03:31 +0000
+Subject: xen/timer: Missing IRQF_NO_SUSPEND in timer code broke suspend.
+
+From: Ian Campbell <Ian.Campbell@citrix.com>
+
+commit f611f2da99420abc973c32cdbddbf5c365d0a20c upstream.
+
+The patches missed an indirect use of IRQF_NO_SUSPEND pulled in via
+IRQF_TIMER. The following patch fixes the issue.
+
+With this fixlet PV guest migration works just fine. I also booted the
+entire series as a dom0 kernel and it appeared fine.
+
+Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/xen/time.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -395,7 +395,9 @@ void xen_setup_timer(int cpu)
+ name = "<timer kasprintf failed>";
+
+ irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
+- IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
++ IRQF_DISABLED|IRQF_PERCPU|
++ IRQF_NOBALANCING|IRQF_TIMER|
++ IRQF_FORCE_RESUME,
+ name, NULL);
+
+ evt = &per_cpu(xen_clock_events, cpu);