lkml.org 
[lkml]   [2019]   [Feb]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 01/20] asm-generic/mmiowb: Add generic implementation of mmiowb() tracking
Date
In preparation for removing all explicit mmiowb() calls from driver
code, implement a tracking system in asm-generic based on the PowerPC
implementation. This allows architectures with a non-empty mmiowb()
definition to automatically have the barrier inserted in spin_unlock()
following a critical section containing an I/O write.

Signed-off-by: Will Deacon <will.deacon@arm.com>
---
include/asm-generic/mmiowb.h | 60 ++++++++++++++++++++++++++++++++++++++++++++
kernel/Kconfig.locks | 3 +++
kernel/locking/spinlock.c | 5 ++++
3 files changed, 68 insertions(+)
create mode 100644 include/asm-generic/mmiowb.h

diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
new file mode 100644
index 000000000000..1cec8907806f
--- /dev/null
+++ b/include/asm-generic/mmiowb.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_H
+#define __ASM_GENERIC_MMIOWB_H
+
+/*
+ * Generic implementation of mmiowb() tracking for spinlocks.
+ *
+ * If your architecture doesn't ensure that writes to an I/O peripheral
+ * within two spinlocked sections on two different CPUs are seen by the
+ * peripheral in the order corresponding to the lock handover, then you
+ * need to follow these FIVE easy steps:
+ *
+ * 1. Implement mmiowb() in asm/mmiowb.h and then #include this file
+ * 2. Ensure your I/O write accessors call mmiowb_set_pending()
+ * 3. Select ARCH_HAS_MMIOWB
+ * 4. Untangle the resulting mess of header files
+ * 5. Complain to your architects
+ */
+#if defined(CONFIG_ARCH_HAS_MMIOWB) && defined(CONFIG_SMP)
+
+#include <linux/types.h>
+#include <asm/percpu.h>
+#include <asm/smp.h>
+
+struct mmiowb_state {
+ u16 nesting_count;
+ u16 mmiowb_pending;
+};
+DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+
+#ifndef mmiowb_set_pending
+static inline void mmiowb_set_pending(void)
+{
+ __this_cpu_write(__mmiowb_state.mmiowb_pending, 1);
+}
+#endif
+
+#ifndef mmiowb_spin_lock
+static inline void mmiowb_spin_lock(void)
+{
+ if (__this_cpu_inc_return(__mmiowb_state.nesting_count) == 1)
+ __this_cpu_write(__mmiowb_state.mmiowb_pending, 0);
+}
+#endif
+
+#ifndef mmiowb_spin_unlock
+static inline void mmiowb_spin_unlock(void)
+{
+ if (__this_cpu_xchg(__mmiowb_state.mmiowb_pending, 0))
+ mmiowb();
+ __this_cpu_dec_return(__mmiowb_state.nesting_count);
+}
+#endif
+
+#else
+#define mmiowb_set_pending() do { } while (0)
+#define mmiowb_spin_lock() do { } while (0)
+#define mmiowb_spin_unlock() do { } while (0)
+#endif /* CONFIG_ARCH_HAS_MMIOWB && CONFIG_SMP */
+#endif /* __ASM_GENERIC_MMIOWB_H */
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 84d882f3e299..04976ae41176 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -248,3 +248,6 @@ config ARCH_USE_QUEUED_RWLOCKS
config QUEUED_RWLOCKS
def_bool y if ARCH_USE_QUEUED_RWLOCKS
depends on SMP
+
+config ARCH_HAS_MMIOWB
+ bool
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 936f3d14dd6b..cbae365d7dd1 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -22,6 +22,11 @@
#include <linux/debug_locks.h>
#include <linux/export.h>

+#ifdef CONFIG_ARCH_HAS_MMIOWB
+DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
+#endif
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
--
2.11.0
\
 
 \ /
  Last update: 2019-02-22 19:53    [W:0.186 / U:1.376 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site