summaryrefslogtreecommitdiff
Diffstat
-rw-r--r--arch/arm64/configs/meson64_defconfig1
-rw-r--r--drivers/amlogic/usb/dwc_otg/310/dwc_otg_pcd_intr.c8
-rw-r--r--include/linux/nmi.h9
-rw-r--r--kernel/watchdog.c21
-rw-r--r--kernel/watchdog_hld.c128
-rw-r--r--kernel/workqueue.c17
-rw-r--r--lib/Kconfig.debug14
7 files changed, 179 insertions, 19 deletions
diff --git a/arch/arm64/configs/meson64_defconfig b/arch/arm64/configs/meson64_defconfig
index 467cea1..7eb2d0a 100644
--- a/arch/arm64/configs/meson64_defconfig
+++ b/arch/arm64/configs/meson64_defconfig
@@ -548,6 +548,7 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_RT_THROTTLING=y
CONFIG_SCHEDSTATS=y
diff --git a/drivers/amlogic/usb/dwc_otg/310/dwc_otg_pcd_intr.c b/drivers/amlogic/usb/dwc_otg/310/dwc_otg_pcd_intr.c
index 2c4f27f..901cf6e 100644
--- a/drivers/amlogic/usb/dwc_otg/310/dwc_otg_pcd_intr.c
+++ b/drivers/amlogic/usb/dwc_otg/310/dwc_otg_pcd_intr.c
@@ -1807,12 +1807,16 @@ static inline void pcd_setup(dwc_otg_pcd_t *pcd)
("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n");
if ((core_if->snpsid >= OTG_CORE_REV_3_00a)
- && (core_if->dma_enable == 1) && (core_if->dma_desc_enable == 0))
+ && (core_if->dma_enable == 1) && (core_if->dma_desc_enable == 0)) {
+ if (doeptsize0.b.supcnt == 3 && ep0->dwc_ep.stp_rollover == 0) {
+ DWC_ERROR(" !!! Setup packet count\n");
+ return;
+ }
ctrl =
(pcd->setup_pkt +
(3 - doeptsize0.b.supcnt - 1 +
ep0->dwc_ep.stp_rollover))->req;
-
+ }
#ifdef DEBUG_EP0
DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
ctrl.bmRequestType, ctrl.bRequest,
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 0a3fadc..32ba395 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -31,8 +31,11 @@
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
*/
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
#include <asm/nmi.h>
+#endif
+
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void touch_nmi_watchdog(void);
#else
static inline void touch_nmi_watchdog(void)
@@ -143,6 +146,10 @@ static inline void lockup_detector_resume(void)
}
#endif
+extern void watchdog_check_hardlockup_other_cpu(void);
+extern int watchdog_nmi_enable(unsigned int cpu);
+extern void watchdog_nmi_disable(unsigned int cpu);
+
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
#include <asm/nmi.h>
#endif
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 63177be..0b3638c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -78,10 +78,10 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
-static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
+DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
-static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static unsigned long soft_lockup_nmi_warn;
unsigned int __read_mostly softlockup_panic =
@@ -211,6 +211,7 @@ void touch_softlockup_watchdog_sync(void)
}
/* watchdog detector functions */
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
bool is_hardlockup(void)
{
unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
@@ -221,6 +222,7 @@ bool is_hardlockup(void)
__this_cpu_write(hrtimer_interrupts_saved, hrint);
return false;
}
+#endif
static int is_softlockup(unsigned long touch_ts)
{
@@ -239,18 +241,6 @@ static void watchdog_interrupt_count(void)
__this_cpu_inc(hrtimer_interrupts);
}
-/*
- * These two functions are mostly architecture specific
- * defining them as weak here.
- */
-int __weak watchdog_nmi_enable(unsigned int cpu)
-{
- return 0;
-}
-void __weak watchdog_nmi_disable(unsigned int cpu)
-{
-}
-
static int watchdog_enable_all_cpus(void);
static void watchdog_disable_all_cpus(void);
@@ -268,6 +258,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* kick the hardlockup detector */
watchdog_interrupt_count();
+ /* test for hardlockups on the next cpu */
+ watchdog_check_hardlockup_other_cpu();
+
/* kick the softlockup detector */
wake_up_process(__this_cpu_read(softlockup_watchdog));
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 12b8dd6..ac528e1 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -18,7 +18,14 @@
static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static cpumask_t __read_mostly watchdog_cpus;
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+#endif
+DECLARE_PER_CPU(unsigned long, hrtimer_interrupts);
+DECLARE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
/* boot commands */
/*
@@ -26,7 +33,7 @@ static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
*/
unsigned int __read_mostly hardlockup_panic =
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long hardlockup_allcpu_dumped;
+static unsigned long __maybe_unused hardlockup_allcpu_dumped;
/*
* We may not want to enable hard lockup detection by default in all cases,
* for example when running the kernel as a guest on a hypervisor. In these
@@ -68,6 +75,7 @@ void touch_nmi_watchdog(void)
}
EXPORT_SYMBOL(touch_nmi_watchdog);
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
@@ -131,7 +139,84 @@ static void watchdog_overflow_callback(struct perf_event *event,
__this_cpu_write(hard_watchdog_warn, false);
return;
}
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static unsigned int watchdog_next_cpu(unsigned int cpu)
+{
+ cpumask_t cpus = watchdog_cpus;
+ unsigned int next_cpu;
+
+ next_cpu = cpumask_next(cpu, &cpus);
+ if (next_cpu >= nr_cpu_ids)
+ next_cpu = cpumask_first(&cpus);
+
+ if (next_cpu == cpu)
+ return nr_cpu_ids;
+
+ return next_cpu;
+}
+
+static int is_hardlockup_other_cpu(unsigned int cpu)
+{
+ unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+
+ if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+ return 1;
+
+ per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+ return 0;
+}
+
+void watchdog_check_hardlockup_other_cpu(void)
+{
+ unsigned int next_cpu;
+
+ /*
+ * Test for hardlockups every 3 samples. The sample period is
+ * watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
+ * watchdog_thresh (over by 20%).
+ */
+ if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
+ return;
+
+ /* check for a hardlockup on the next cpu */
+ next_cpu = watchdog_next_cpu(smp_processor_id());
+ if (next_cpu >= nr_cpu_ids)
+ return;
+
+ /*mem barrier*/
+ smp_rmb();
+
+ if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
+ per_cpu(watchdog_nmi_touch, next_cpu) = false;
+ return;
+ }
+
+ if (is_hardlockup_other_cpu(next_cpu)) {
+ /* only warn once */
+ if (per_cpu(hard_watchdog_warn, next_cpu) == true)
+ return;
+
+ if (hardlockup_panic)
+ panic("Watchdog detected hard LOCKUP on cpu %u",
+ next_cpu);
+ else
+ WARN(1, "Watchdog detected hard LOCKUP on cpu %u",
+ next_cpu);
+
+ per_cpu(hard_watchdog_warn, next_cpu) = true;
+ } else {
+ per_cpu(hard_watchdog_warn, next_cpu) = false;
+ }
+}
+#else
+static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
+#endif
+
+
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
/*
* People like the simple clean cpu node info on boot.
* Reduce the watchdog noise by only printing messages
@@ -228,3 +313,44 @@ void watchdog_nmi_disable(unsigned int cpu)
cpu0_err = 0;
}
}
+#else
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+int watchdog_nmi_enable(unsigned int cpu)
+{
+ /*
+ * The new cpu will be marked online before the first hrtimer interrupt
+ * runs on it. If another cpu tests for a hardlockup on the new cpu
+ * before it has run its first hrtimer, it will get a false positive.
+ * Touch the watchdog on the new cpu to delay the first check for at
+ * least 3 sampling periods to guarantee one hrtimer has run on the new
+ * cpu.
+ */
+ per_cpu(watchdog_nmi_touch, cpu) = true;
+ /*mem barrier*/
+ smp_wmb();
+ cpumask_set_cpu(cpu, &watchdog_cpus);
+ return 0;
+}
+
+void watchdog_nmi_disable(unsigned int cpu)
+{
+ unsigned int next_cpu = watchdog_next_cpu(cpu);
+
+ /*
+ * Offlining this cpu will cause the cpu before this one to start
+ * checking the one after this one. If this cpu just finished checking
+ * the next cpu and updating hrtimer_interrupts_saved, and then the
+ * previous cpu checks it within one sample period, it will trigger a
+ * false positive. Touch the watchdog on the next cpu to prevent it.
+ */
+ if (next_cpu < nr_cpu_ids)
+ per_cpu(watchdog_nmi_touch, next_cpu) = true;
+ /*mem barrier*/
+ smp_wmb();
+ cpumask_clear_cpu(cpu, &watchdog_cpus);
+}
+#else
+int watchdog_nmi_enable(unsigned int cpu) { return 0; }
+void watchdog_nmi_disable(unsigned int cpu) { return; }
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 181c2ad..c272428 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2013,11 +2013,20 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock)
__acquires(&pool->lock)
{
+#ifdef CONFIG_AMLOGIC_MODIFY
+ int work_color;
+ struct worker *collision;
+ bool cpu_intensive;
+ struct pool_workqueue *pwq = get_work_pwq(work);
+ struct worker_pool *pool = worker->pool;
+#else
struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool;
bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
int work_color;
struct worker *collision;
+#endif
+
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the struct work_struct from
@@ -2030,6 +2039,14 @@ __acquires(&pool->lock)
lockdep_copy_map(&lockdep_map, &work->lockdep_map);
#endif
+#ifdef CONFIG_AMLOGIC_MODIFY
+ if (!pwq) {
+ WARN_ONCE(1, "Warning: pool_workqueue is NULL!!!!!!!!!!!!.\n");
+ return;
+ }
+
+ cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
+#endif
/* ensure we're on the correct CPU */
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
raw_smp_processor_id() != pool->cpu);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index db71a20..d8a4358 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -764,15 +764,27 @@ config LOCKUP_DETECTOR
The overhead should be minimal. A periodic hrtimer runs to
generate interrupts and kick the watchdog task every 4 seconds.
An NMI is generated every 10 seconds or so to check for hardlockups.
+ If NMIs are not available on the platform, every 12 seconds the
+ hrtimer interrupt on one cpu will be used to check for hardlockups
+ on the next cpu.
The frequency of hrtimer and NMI events and the soft and hard lockup
thresholds can be controlled through the sysctl watchdog_thresh.
-config HARDLOCKUP_DETECTOR
+config HARDLOCKUP_DETECTOR_NMI
def_bool y
depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+config HARDLOCKUP_DETECTOR_OTHER_CPU
+ def_bool y
+ depends on LOCKUP_DETECTOR && SMP
+ depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR
+ def_bool y
+ depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
+
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
depends on HARDLOCKUP_DETECTOR