aboutsummaryrefslogtreecommitdiffstats
path: root/Documentation/litmus-tests
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-08-14 16:14:34 -0700
committerPaul E. McKenney <paulmck@kernel.org>2023-03-24 10:22:25 -0700
commit7e7eb5ae4e4cd482a1ebc6a82ea8a04c5ead62ee (patch)
tree81ec8404ff290bda113be2858064ba76bceddf70 /Documentation/litmus-tests
parentfe15c26ee26efa11741a7b632e9f23b01aca4cc6 (diff)
downloadlinux-7e7eb5ae4e4cd482a1ebc6a82ea8a04c5ead62ee.tar.gz
tools/memory-model: Document locking corner cases
Most Linux-kernel uses of locking are straightforward, but there are corner-case uses that rely on less well-known aspects of the lock and unlock primitives. This commit therefore adds a locking.txt and litmus tests in Documentation/litmus-tests/locking to explain these corner-case uses. [ paulmck: Apply Andrea Parri feedback for klitmus7. ] [ paulmck: Apply Akira Yokosawa example-consistency feedback. ] Reviewed-by: Akira Yokosawa <akiyks@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'Documentation/litmus-tests')
-rw-r--r--Documentation/litmus-tests/locking/DCL-broken.litmus54
-rw-r--r--Documentation/litmus-tests/locking/DCL-fixed.litmus55
-rw-r--r--Documentation/litmus-tests/locking/RM-broken.litmus41
-rw-r--r--Documentation/litmus-tests/locking/RM-fixed.litmus41
4 files changed, 191 insertions, 0 deletions
diff --git a/Documentation/litmus-tests/locking/DCL-broken.litmus b/Documentation/litmus-tests/locking/DCL-broken.litmus
new file mode 100644
index 00000000000000..bfb7ba4316d690
--- /dev/null
+++ b/Documentation/litmus-tests/locking/DCL-broken.litmus
@@ -0,0 +1,54 @@
+C DCL-broken
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test demonstrates more than just locking is required to
+ * correctly implement double-checked locking.
+ *)
+
+{
+ int flag;
+ int data;
+}
+
+P0(int *flag, int *data, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ WRITE_ONCE(*flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+P1(int *flag, int *data, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ WRITE_ONCE(*flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+locations [flag;data;0:r0;0:r1;1:r0;1:r1]
+exists (0:r2=0 \/ 1:r2=0)
diff --git a/Documentation/litmus-tests/locking/DCL-fixed.litmus b/Documentation/litmus-tests/locking/DCL-fixed.litmus
new file mode 100644
index 00000000000000..d1b60bcb0c8f3a
--- /dev/null
+++ b/Documentation/litmus-tests/locking/DCL-fixed.litmus
@@ -0,0 +1,55 @@
+C DCL-fixed
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that double-checked locking can be
+ * reliable given proper use of smp_load_acquire() and smp_store_release()
+ * in addition to the locking.
+ *)
+
+{
+ int flag;
+ int data;
+}
+
+P0(int *flag, int *data, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = smp_load_acquire(flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ smp_store_release(flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+P1(int *flag, int *data, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = smp_load_acquire(flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ smp_store_release(flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+locations [flag;data;0:r0;0:r1;1:r0;1:r1]
+exists (0:r2=0 \/ 1:r2=0)
diff --git a/Documentation/litmus-tests/locking/RM-broken.litmus b/Documentation/litmus-tests/locking/RM-broken.litmus
new file mode 100644
index 00000000000000..b7ef30cedfe51a
--- /dev/null
+++ b/Documentation/litmus-tests/locking/RM-broken.litmus
@@ -0,0 +1,41 @@
+C RM-broken
+
+(*
+ * Result: DEADLOCK
+ *
+ * This litmus test demonstrates that the old "roach motel" approach
+ * to locking, where code can be freely moved into critical sections,
+ * cannot be used in the Linux kernel.
+ *)
+
+{
+ int x;
+ atomic_t y;
+}
+
+P0(int *x, atomic_t *y, spinlock_t *lck)
+{
+ int r2;
+
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(lck);
+}
+
+P1(int *x, atomic_t *y, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ spin_lock(lck);
+ r0 = READ_ONCE(*x);
+ r1 = READ_ONCE(*x);
+ r2 = atomic_inc_return(y);
+ spin_unlock(lck);
+}
+
+locations [x;0:r2;1:r0;1:r1;1:r2]
+filter (1:r0=0 /\ 1:r1=1)
+exists (1:r2=1)
diff --git a/Documentation/litmus-tests/locking/RM-fixed.litmus b/Documentation/litmus-tests/locking/RM-fixed.litmus
new file mode 100644
index 00000000000000..b628175596160f
--- /dev/null
+++ b/Documentation/litmus-tests/locking/RM-fixed.litmus
@@ -0,0 +1,41 @@
+C RM-fixed
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that the old "roach motel" approach
+ * to locking, where code can be freely moved into critical sections,
+ * cannot be used in the Linux kernel.
+ *)
+
+{
+ int x;
+ atomic_t y;
+}
+
+P0(int *x, atomic_t *y, spinlock_t *lck)
+{
+ int r2;
+
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(lck);
+}
+
+P1(int *x, atomic_t *y, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*x);
+ r1 = READ_ONCE(*x);
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ spin_unlock(lck);
+}
+
+locations [x;0:r2;1:r0;1:r1;1:r2]
+filter (1:r0=0 /\ 1:r1=1)
+exists (1:r2=1)