aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2009-02-09 14:04:25 +0100
committerHannes Reinecke <hare@suse.de>2011-05-18 10:20:40 +0200
commita0cf544ce46264f637d695df1bfd7a5c85e5c0c8 (patch)
treebc4beb4b0cfac4c83a607d2f7431dda1c7d8bf1c
parent6df6f4fb158132d90a9a228ac5b47f3571a7f14d (diff)
downloadmultipath-tools-a0cf544ce46264f637d695df1bfd7a5c85e5c0c8.tar.gz
multipathd is not starting waitevent checker for single paths
After multipathd was started, any SCSI disks that would be added afterwards would not trigger multipathd to create a waitevent thread. The waitevent thread listens for kernel's offline/online events and thoroughly checks what the kernel sees with what multipathd thinks and if something is off, whacks multipathd to the right state. For devices which did not have a kernel device mapper helper (hp_sw, rdac, etc) and only have one single path, when the link experiences a momentary blib with I/O on it the path would be marked as failed _only_ by the kernel. This event would _not_ be propagated to multipathd (b/c it did not have a waitevent thread create). Multipathd would only do the path checker which would provide a PATH_UP event (rightly so - as the path would only be down for a second or so). However, the device mapper path group would be marked as failed, and any incoming I/O would be blocked (if queue_if_no_path was set) or fail. The end result was the multipathd would think everything was peachy while the kernel would be failing (or queueing) the I/O to the multipath device. References: bnc#473841 Signed-off-by: Hannes Reinecke <hare@suse.de>
-rw-r--r--multipathd/main.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/multipathd/main.c b/multipathd/main.c
index 6008d77..dd779d0 100644
--- a/multipathd/main.c
+++ b/multipathd/main.c
@@ -354,6 +354,7 @@ ev_add_path (char * devname, struct vectors * vecs)
struct path * pp;
char empty_buff[WWID_SIZE] = {0};
char params[PARAMS_SIZE] = {0};
+ int start_waiter = 0;
if (strstr(devname, "..") != NULL) {
/*
@@ -437,8 +438,14 @@ rescan:
}
condlog(4,"%s: creating new map", pp->dev);
- if ((mpp = add_map_with_path(vecs, pp, 1)))
+ if ((mpp = add_map_with_path(vecs, pp, 1))) {
mpp->action = ACT_CREATE;
+ /*
+ * We don't depend on ACT_CREATE, as domap will
+ * set it to ACT_NOTHING when complete.
+ */
+ start_waiter = 1;
+ }
else
goto fail; /* leave path added to pathvec */
}
@@ -479,7 +486,8 @@ rescan:
sync_map_state(mpp);
- if (mpp->action == ACT_CREATE &&
+ if ((mpp->action == ACT_CREATE ||
+ (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
start_waiter_thread(mpp, vecs))
goto fail_map;