]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-client.git/commitdiff
kunit: irq: Ensure timer doesn't fire too frequently
authorEric Biggers <ebiggers@kernel.org>
Tue, 24 Feb 2026 03:37:51 +0000 (19:37 -0800)
committerEric Biggers <ebiggers@kernel.org>
Tue, 24 Feb 2026 22:44:21 +0000 (14:44 -0800)
Fix a bug where kunit_run_irq_test() could hang if the system is too
slow.  This was noticed with the crypto library tests in certain VMs.

Specifically, if kunit_irq_test_timer_func() and the associated hrtimer
code took over 5us to run, then the CPU would spend all its time
executing that code in hardirq context.  As a result, the task executing
kunit_run_irq_test() never had a chance to run, exit the loop, and
cancel the timer.

To fix it, make kunit_irq_test_timer_func() increase the timer interval
when the other contexts aren't having a chance to run.

Fixes: 950a81224e8b ("lib/crypto: tests: Add hash-test-template.h and gen-hash-testvecs.py")
Cc: stable@vger.kernel.org
Reviewed-by: David Gow <david@davidgow.net>
Link: https://lore.kernel.org/r/20260224033751.97615-1-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
include/kunit/run-in-irq-context.h

index c89b1b1b12dd585192bce5c07c64f39da6b4ceed..bfe60d6cf28d86cba9a9546c8dac04acafee1b60 100644 (file)
 #include <linux/hrtimer.h>
 #include <linux/workqueue.h>
 
-#define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
-
 struct kunit_irq_test_state {
        bool (*func)(void *test_specific_state);
        void *test_specific_state;
        bool task_func_reported_failure;
        bool hardirq_func_reported_failure;
        bool softirq_func_reported_failure;
+       atomic_t task_func_calls;
        atomic_t hardirq_func_calls;
        atomic_t softirq_func_calls;
+       ktime_t interval;
        struct hrtimer timer;
        struct work_struct bh_work;
 };
@@ -30,14 +30,25 @@ static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer)
 {
        struct kunit_irq_test_state *state =
                container_of(timer, typeof(*state), timer);
+       int task_calls, hardirq_calls, softirq_calls;
 
        WARN_ON_ONCE(!in_hardirq());
-       atomic_inc(&state->hardirq_func_calls);
+       task_calls = atomic_read(&state->task_func_calls);
+       hardirq_calls = atomic_inc_return(&state->hardirq_func_calls);
+       softirq_calls = atomic_read(&state->softirq_func_calls);
+
+       /*
+        * If the timer is firing too often for the softirq or task to ever have
+        * a chance to run, increase the timer interval.  This is needed on very
+        * slow systems.
+        */
+       if (hardirq_calls >= 20 && (softirq_calls == 0 || task_calls == 0))
+               state->interval = ktime_add_ns(state->interval, 250);
 
        if (!state->func(state->test_specific_state))
                state->hardirq_func_reported_failure = true;
 
-       hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL);
+       hrtimer_forward_now(&state->timer, state->interval);
        queue_work(system_bh_wq, &state->bh_work);
        return HRTIMER_RESTART;
 }
@@ -86,10 +97,14 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
        struct kunit_irq_test_state state = {
                .func = func,
                .test_specific_state = test_specific_state,
+               /*
+                * Start with a 5us timer interval.  If the system can't keep
+                * up, kunit_irq_test_timer_func() will increase it.
+                */
+               .interval = us_to_ktime(5),
        };
        unsigned long end_jiffies;
-       int hardirq_calls, softirq_calls;
-       bool allctx = false;
+       int task_calls, hardirq_calls, softirq_calls;
 
        /*
         * Set up a hrtimer (the way we access hardirq context) and a work
@@ -104,21 +119,18 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
         * and hardirq), or 1 second, whichever comes first.
         */
        end_jiffies = jiffies + HZ;
-       hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL,
-                     HRTIMER_MODE_REL_HARD);
-       for (int task_calls = 0, calls = 0;
-            ((calls < max_iterations) || !allctx) &&
-            !time_after(jiffies, end_jiffies);
-            task_calls++) {
+       hrtimer_start(&state.timer, state.interval, HRTIMER_MODE_REL_HARD);
+       do {
                if (!func(test_specific_state))
                        state.task_func_reported_failure = true;
 
+               task_calls = atomic_inc_return(&state.task_func_calls);
                hardirq_calls = atomic_read(&state.hardirq_func_calls);
                softirq_calls = atomic_read(&state.softirq_func_calls);
-               calls = task_calls + hardirq_calls + softirq_calls;
-               allctx = (task_calls > 0) && (hardirq_calls > 0) &&
-                        (softirq_calls > 0);
-       }
+       } while ((task_calls + hardirq_calls + softirq_calls < max_iterations ||
+                 (task_calls == 0 || hardirq_calls == 0 ||
+                  softirq_calls == 0)) &&
+                !time_after(jiffies, end_jiffies));
 
        /* Cancel the timer and work. */
        hrtimer_cancel(&state.timer);