aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@fusionio.com>2012-10-26 15:23:40 -0400
committerChris Mason <clm@fb.com>2014-09-24 12:02:07 -0700
commit6c984075e39be376f159f831536a8c2dfd620e9a (patch)
tree358da40808e61736065988f9858e51bdd10ff718
parent79d615306f3a4146bb27d59c4b1707907099f669 (diff)
downloadblktrace-6c984075e39be376f159f831536a8c2dfd620e9a.tar.gz
iowatcher: Fix some rounding errors around the max offset
set_gdd_bit makes sure that we don't try to set bits past the max offset we used to allocate our gdd array. But, it only does this when the function is first called, and the whole byte range for the IO we're recording may go past max offset. This adds a check to be sure we stay in the right range. Signed-off-by: Chris Mason <chris.mason@fusionio.com>
-rw-r--r--iowatcher/main.c4
-rw-r--r--iowatcher/plot.c5
2 files changed, 4 insertions, 5 deletions
diff --git a/iowatcher/main.c b/iowatcher/main.c
index 23fb707..6e392f2 100644
--- a/iowatcher/main.c
+++ b/iowatcher/main.c
@@ -297,8 +297,8 @@ static void read_traces(void)
last_time = find_last_time(trace);
tf->trace = trace;
- tf->max_seconds = SECONDS(last_time);
- tf->stop_seconds = SECONDS(last_time);
+ tf->max_seconds = SECONDS(last_time) + 1;
+ tf->stop_seconds = SECONDS(last_time) + 1;
find_extreme_offsets(trace, &tf->min_offset, &tf->max_offset,
&max_bank, &max_bank_offset);
filter_outliers(trace, tf->min_offset, tf->max_offset, &ymin, &ymax);
diff --git a/iowatcher/plot.c b/iowatcher/plot.c
index 79e5d3c..1e9b7d7 100644
--- a/iowatcher/plot.c
+++ b/iowatcher/plot.c
@@ -148,7 +148,7 @@ struct graph_dot_data *alloc_dot_data(int min_seconds, int max_seconds, u64 min_
arr_size = (rows + 1) * cols;
/* the number of bytes */
- arr_size /= 8;
+ arr_size = (arr_size + 7) / 8;
gdd = calloc(1, size + arr_size);
if (!gdd) {
@@ -191,10 +191,9 @@ void set_gdd_bit(struct graph_dot_data *gdd, u64 offset, double bytes, double ti
if (offset > gdd->max_offset || offset < gdd->min_offset)
return;
-
gdd->total_ios++;
time = time / 1000000000.0;
- while (bytes > 0) {
+ while (bytes > 0 && offset <= gdd->max_offset) {
row = (double)(offset - gdd->min_offset) / bytes_per_row;
col = (time - gdd->min_seconds) / secs_per_col;