Created
June 28, 2019 02:57
-
-
Save regehr/b2ce4e05438aac2d7f5caf1c2e85ef72 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
89 if (current_clr_polling_and_test()) { | |
93 arch_cpu_idle(); | |
arch_cpu_idle () at arch/x86/kernel/process.c:571 | |
571 x86_idle(); | |
__x86_indirect_thunk_rax () at arch/x86/lib/retpoline.S:32 | |
32 GENERATE_THUNK(_ASM_AX) | |
default_idle () at arch/x86/kernel/process.c:578 | |
578 { | |
579 trace_cpu_idle_rcuidle(1, smp_processor_id()); | |
580 safe_halt(); | |
arch_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
native_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
mds_idle_clear_cpu_buffers () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
arch_static_branch_jump (branch=<optimized out>, key=<optimized out>) at ./arch/x86/include/asm/jump_label.h:23 | |
23 asm_volatile_goto("1:" | |
native_safe_halt () at ./arch/x86/include/asm/irqflags.h:60 | |
60 asm volatile("sti; hlt": : :"memory"); | |
582 } | |
cpuidle_idle_call () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
__current_set_polling () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_set_polling () at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:71 | |
71 asm volatile(LOCK_PREFIX "orb %1,%0" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
arch_local_save_flags () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
216 rcu_idle_exit(); | |
rcu_idle_exit () at kernel/rcu/tree.c:806 | |
806 { | |
809 local_irq_save(flags); | |
arch_local_irq_save () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
arch_local_save_flags () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:779 | |
779 rdp = this_cpu_ptr(&rcu_data); | |
780 oldval = rdp->dynticks_nesting; | |
782 if (oldval) { | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:787 | |
787 rcu_dynticks_eqs_exit(); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:250 | |
250 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:258 | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:261 | |
261 if (seq & RCU_DYNTICK_CTRL_MASK) { | |
267 } | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:789 | |
789 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
791 WRITE_ONCE(rdp->dynticks_nesting, 1); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:792 | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
793 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_idle_exit () at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
rcu_idle_exit () at kernel/rcu/tree.c:812 | |
812 } | |
do_idle () at kernel/sched/idle.c:264 | |
264 arch_cpu_idle_exit(); | |
0xffffffff8108eb70 in arch_cpu_idle_exit () at kernel/sched/idle.c:72 | |
72 void __weak arch_cpu_idle_prepare(void) { } | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
need_resched () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
241 rmb(); | |
243 if (cpu_is_offline(cpu)) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
do_idle () at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
249 local_irq_disable(); | |
arch_local_irq_disable () at kernel/sched/idle.c:249 | |
249 local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
do_idle () at kernel/sched/idle.c:250 | |
250 arch_cpu_idle_enter(); | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:557 | |
557 tsc_verify_tsc_adjust(false); | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:49 | |
49 { | |
50 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:53 | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
78 } | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:558 | |
558 local_touch_nmi(); | |
local_touch_nmi () at arch/x86/kernel/nmi.c:556 | |
556 __this_cpu_write(last_nmi_rip, 0); | |
557 } | |
do_idle () at kernel/sched/idle.c:258 | |
258 if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:538 | |
538 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:539 | |
539 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_get_device () at ./include/linux/cpuidle.h:155 | |
155 {return __this_cpu_read(cpuidle_devices); } | |
cpuidle_idle_call () at kernel/sched/idle.c:131 | |
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:313 | |
313 return __cpuidle_get_cpu_driver(dev->cpu); | |
__cpuidle_get_cpu_driver (cpu=<optimized out>) at drivers/cpuidle/driver.c:309 | |
309 { | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:314 | |
314 } | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
need_resched () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
149 if (cpuidle_not_available(drv, dev)) { | |
cpuidle_not_available (drv=0x0 <irq_stack_union>, dev=0x0 <irq_stack_union>) at drivers/cpuidle/cpuidle.c:51 | |
51 return off || !initialized || !drv || !dev || !dev->enabled; | |
52 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:150 | |
150 tick_nohz_idle_stop_tick(); | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:956 | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:918 | |
918 int cpu = smp_processor_id(); | |
924 if (ts->timer_expires_base) | |
926 else if (can_stop_idle_tick(cpu, ts)) | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:859 | |
859 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |
868 if (unlikely(!cpu_online(cpu))) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:868 | |
868 if (unlikely(!cpu_online(cpu))) { | |
879 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
882 if (need_resched()) | |
need_resched () at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
can_stop_idle_tick (cpu=<optimized out>, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
885 if (unlikely(local_softirq_pending())) { | |
913 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:927 | |
927 expires = tick_nohz_next_event(ts, cpu); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:646 | |
646 { | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqbegin (sl=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:653 | |
653 basemono = last_jiffies_update; | |
654 basejiff = jiffies; | |
655 } while (read_seqretry(&jiffies_lock, seq)); | |
656 ts->last_jiffies = basejiff; | |
657 ts->timer_expires_base = basemono; | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
rcu_needs_cpu (basemono=16546416592687, nextevt=0xffffffff82403e40) at kernel/rcu/tree_plugin.h:1448 | |
1448 *nextevt = KTIME_MAX; | |
1449 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist); | |
1450 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
irq_work_needs_cpu () at kernel/irq_work.c:128 | |
128 raised = this_cpu_ptr(&raised_list); | |
129 lazy = this_cpu_ptr(&lazy_list); | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
132 if (llist_empty(lazy)) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:132 | |
132 if (llist_empty(lazy)) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:133 | |
133 return false; | |
132 if (llist_empty(lazy)) | |
139 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:669 | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
local_timer_softirq_pending () at kernel/time/tick-sched.c:642 | |
642 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
680 next_tmr = get_next_timer_interrupt(basejiff, basemono); | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1519 | |
1519 { | |
1520 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | |
1529 if (cpu_is_offline(smp_processor_id())) | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1529 | |
1529 if (cpu_is_offline(smp_processor_id())) | |
1532 raw_spin_lock(&base->lock); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
__raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
do_raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
queued_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:145 | |
145 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1533 | |
1533 nextevt = __next_timer_interrupt(base); | |
__next_timer_interrupt (base=0xffff888007819540) at kernel/time/timer.c:1418 | |
1418 { | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=64, offset=9) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=9, offset=0) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=128, offset=98) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=98, offset=64) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=64, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=1, offset=96) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=192, offset=141) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=141, offset=128) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=256, offset=242) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=242, offset=192) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=320, offset=319) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=319, offset=256) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=256, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=4, offset=265) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=384, offset=368) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=320, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=5, offset=373) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=448, offset=446) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=446, offset=384) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=512, offset=456) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=448, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=7, offset=469) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=576, offset=513) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=513, offset=512) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1475 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1534 | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1535 base->next_expiry = nextevt; | |
1541 if (time_after(basej, base->clk)) { | |
1548 if (time_before_eq(nextevt, basej)) { | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1521 u64 expires = KTIME_MAX; | |
1552 if (!is_max_delta) | |
1553 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; | |
1561 if ((expires - basem) > TICK_NSEC) { | |
1562 base->must_forward_clk = true; | |
1563 base->is_idle = true; | |
1566 raw_spin_unlock(&base->lock); | |
__raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
queued_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1568 | |
1568 return cmp_next_hrtimer_event(basem, expires); | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1483 | |
1483 u64 nextevt = hrtimer_get_next_event(); | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1198 | |
1198 { | |
1199 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:151 | |
151 { | |
152 return __raw_spin_lock_irqsave(lock); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_irq_save () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_save_flags () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
do_raw_spin_lock_flags (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
queued_spin_lock (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:153 | |
153 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1205 | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1208 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
queued_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:185 | |
185 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1211 | |
1211 } | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1489 | |
1489 if (expires <= nextevt) | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1569 | |
1569 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:683 | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
681 ts->next_timer = next_tmr; | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
691 if (delta <= (u64)TICK_NSEC) { | |
712 delta = timekeeping_max_deferment(); | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1452 | |
1452 { | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1460 | |
1460 ret = tk->tkr_mono.clock->max_idle_ns; | |
1462 } while (read_seqcount_retry(&tk_core.seq, seq)); | |
1465 } | |
tick_nohz_next_event (ts=0xffff88800781ba40, cpu=130) at kernel/time/tick-sched.c:713 | |
713 if (cpu != tick_do_timer_cpu && | |
715 delta = KTIME_MAX; | |
718 if (delta < (KTIME_MAX - basemono)) | |
719 expires = basemono + delta; | |
723 ts->timer_expires = min_t(u64, expires, next_tick); | |
727 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:931 | |
931 ts->idle_calls++; | |
933 if (expires > 0LL) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:957 | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:934 | |
934 int was_stopped = ts->tick_stopped; | |
936 tick_nohz_stop_tick(ts, cpu); | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:731 | |
731 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | |
747 if (cpu == tick_do_timer_cpu) { | |
732 u64 basemono = ts->timer_expires_base; | |
733 u64 expires = ts->timer_expires; | |
737 ts->timer_expires_base = 0; | |
747 if (cpu == tick_do_timer_cpu) { | |
750 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
755 if (ts->tick_stopped && (expires == ts->next_tick)) { | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:757 | |
757 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:938 | |
938 ts->idle_sleeps++; | |
941 if (!was_stopped && ts->tick_stopped) { | |
939 ts->idle_expires = expires; | |
941 if (!was_stopped && ts->tick_stopped) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:958 | |
958 } | |
cpuidle_idle_call () at kernel/sched/idle.c:151 | |
151 rcu_idle_enter(); | |
rcu_idle_enter () at kernel/rcu/tree.c:645 | |
645 { | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:610 | |
610 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
612 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); | |
616 if (rdp->dynticks_nesting != 1) { | |
613 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:616 | |
616 if (rdp->dynticks_nesting != 1) { | |
rcu_idle_enter () at kernel/rcu/tree.c:647 | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:622 | |
622 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); | |
624 rdp = this_cpu_ptr(&rcu_data); | |
628 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:629 | |
629 rcu_dynticks_eqs_enter(); | |
rcu_dynticks_eqs_enter () at kernel/rcu/tree.c:227 | |
227 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:235 | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_idle_enter () at kernel/rcu/tree.c:648 | |
648 } | |
cpuidle_idle_call () at kernel/sched/idle.c:153 | |
153 default_idle_call(); | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
current_clr_polling_and_test () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
__current_clr_polling () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_clr_polling () at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:109 | |
109 asm volatile(LOCK_PREFIX "andb %1,%0" | |
current_clr_polling_and_test () at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
93 arch_cpu_idle(); | |
arch_cpu_idle () at arch/x86/kernel/process.c:571 | |
571 x86_idle(); | |
__x86_indirect_thunk_rax () at arch/x86/lib/retpoline.S:32 | |
32 GENERATE_THUNK(_ASM_AX) | |
default_idle () at arch/x86/kernel/process.c:578 | |
578 { | |
579 trace_cpu_idle_rcuidle(1, smp_processor_id()); | |
580 safe_halt(); | |
arch_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
native_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
mds_idle_clear_cpu_buffers () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
arch_static_branch_jump (branch=<optimized out>, key=<optimized out>) at ./arch/x86/include/asm/jump_label.h:23 | |
23 asm_volatile_goto("1:" | |
native_safe_halt () at ./arch/x86/include/asm/irqflags.h:60 | |
60 asm volatile("sti; hlt": : :"memory"); | |
582 } | |
cpuidle_idle_call () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
__current_set_polling () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_set_polling () at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:71 | |
71 asm volatile(LOCK_PREFIX "orb %1,%0" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
arch_local_save_flags () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
216 rcu_idle_exit(); | |
rcu_idle_exit () at kernel/rcu/tree.c:806 | |
806 { | |
809 local_irq_save(flags); | |
arch_local_irq_save () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
arch_local_save_flags () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:779 | |
779 rdp = this_cpu_ptr(&rcu_data); | |
780 oldval = rdp->dynticks_nesting; | |
782 if (oldval) { | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:787 | |
787 rcu_dynticks_eqs_exit(); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:250 | |
250 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:258 | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:261 | |
261 if (seq & RCU_DYNTICK_CTRL_MASK) { | |
267 } | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:789 | |
789 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
791 WRITE_ONCE(rdp->dynticks_nesting, 1); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:792 | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
793 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_idle_exit () at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
rcu_idle_exit () at kernel/rcu/tree.c:812 | |
812 } | |
do_idle () at kernel/sched/idle.c:264 | |
264 arch_cpu_idle_exit(); | |
0xffffffff8108eb70 in arch_cpu_idle_exit () at kernel/sched/idle.c:72 | |
72 void __weak arch_cpu_idle_prepare(void) { } | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
need_resched () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
241 rmb(); | |
243 if (cpu_is_offline(cpu)) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
do_idle () at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
249 local_irq_disable(); | |
arch_local_irq_disable () at kernel/sched/idle.c:249 | |
249 local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
do_idle () at kernel/sched/idle.c:250 | |
250 arch_cpu_idle_enter(); | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:557 | |
557 tsc_verify_tsc_adjust(false); | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:49 | |
49 { | |
50 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:53 | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
78 } | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:558 | |
558 local_touch_nmi(); | |
local_touch_nmi () at arch/x86/kernel/nmi.c:556 | |
556 __this_cpu_write(last_nmi_rip, 0); | |
557 } | |
do_idle () at kernel/sched/idle.c:258 | |
258 if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:538 | |
538 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:539 | |
539 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_get_device () at ./include/linux/cpuidle.h:155 | |
155 {return __this_cpu_read(cpuidle_devices); } | |
cpuidle_idle_call () at kernel/sched/idle.c:131 | |
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:313 | |
313 return __cpuidle_get_cpu_driver(dev->cpu); | |
__cpuidle_get_cpu_driver (cpu=<optimized out>) at drivers/cpuidle/driver.c:309 | |
309 { | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:314 | |
314 } | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
need_resched () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
149 if (cpuidle_not_available(drv, dev)) { | |
cpuidle_not_available (drv=0x0 <irq_stack_union>, dev=0x0 <irq_stack_union>) at drivers/cpuidle/cpuidle.c:51 | |
51 return off || !initialized || !drv || !dev || !dev->enabled; | |
52 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:150 | |
150 tick_nohz_idle_stop_tick(); | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:956 | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:918 | |
918 int cpu = smp_processor_id(); | |
924 if (ts->timer_expires_base) | |
926 else if (can_stop_idle_tick(cpu, ts)) | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:859 | |
859 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |
868 if (unlikely(!cpu_online(cpu))) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:868 | |
868 if (unlikely(!cpu_online(cpu))) { | |
879 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
882 if (need_resched()) | |
need_resched () at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
can_stop_idle_tick (cpu=<optimized out>, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
885 if (unlikely(local_softirq_pending())) { | |
913 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:927 | |
927 expires = tick_nohz_next_event(ts, cpu); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:646 | |
646 { | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqbegin (sl=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:653 | |
653 basemono = last_jiffies_update; | |
654 basejiff = jiffies; | |
655 } while (read_seqretry(&jiffies_lock, seq)); | |
656 ts->last_jiffies = basejiff; | |
657 ts->timer_expires_base = basemono; | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
rcu_needs_cpu (basemono=16546416592687, nextevt=0xffffffff82403e40) at kernel/rcu/tree_plugin.h:1448 | |
1448 *nextevt = KTIME_MAX; | |
1449 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist); | |
1450 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
irq_work_needs_cpu () at kernel/irq_work.c:128 | |
128 raised = this_cpu_ptr(&raised_list); | |
129 lazy = this_cpu_ptr(&lazy_list); | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
132 if (llist_empty(lazy)) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:132 | |
132 if (llist_empty(lazy)) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:133 | |
133 return false; | |
132 if (llist_empty(lazy)) | |
139 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:669 | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
local_timer_softirq_pending () at kernel/time/tick-sched.c:642 | |
642 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
680 next_tmr = get_next_timer_interrupt(basejiff, basemono); | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1519 | |
1519 { | |
1520 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | |
1529 if (cpu_is_offline(smp_processor_id())) | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1529 | |
1529 if (cpu_is_offline(smp_processor_id())) | |
1532 raw_spin_lock(&base->lock); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
__raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
do_raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
queued_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:145 | |
145 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1533 | |
1533 nextevt = __next_timer_interrupt(base); | |
__next_timer_interrupt (base=0xffff888007819540) at kernel/time/timer.c:1418 | |
1418 { | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=64, offset=9) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=9, offset=0) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=128, offset=98) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=98, offset=64) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=64, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=1, offset=96) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=192, offset=141) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=141, offset=128) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=256, offset=242) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=242, offset=192) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=320, offset=319) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=319, offset=256) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=256, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=4, offset=265) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=384, offset=368) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=320, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=5, offset=373) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=448, offset=446) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=446, offset=384) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=512, offset=456) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=448, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=7, offset=469) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=576, offset=513) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=513, offset=512) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1475 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1534 | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1535 base->next_expiry = nextevt; | |
1541 if (time_after(basej, base->clk)) { | |
1548 if (time_before_eq(nextevt, basej)) { | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1521 u64 expires = KTIME_MAX; | |
1552 if (!is_max_delta) | |
1553 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; | |
1561 if ((expires - basem) > TICK_NSEC) { | |
1562 base->must_forward_clk = true; | |
1563 base->is_idle = true; | |
1566 raw_spin_unlock(&base->lock); | |
__raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
queued_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1568 | |
1568 return cmp_next_hrtimer_event(basem, expires); | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1483 | |
1483 u64 nextevt = hrtimer_get_next_event(); | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1198 | |
1198 { | |
1199 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:151 | |
151 { | |
152 return __raw_spin_lock_irqsave(lock); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_irq_save () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_save_flags () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
do_raw_spin_lock_flags (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
queued_spin_lock (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:153 | |
153 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1205 | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1208 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
queued_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:185 | |
185 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1211 | |
1211 } | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1489 | |
1489 if (expires <= nextevt) | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1569 | |
1569 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:683 | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
681 ts->next_timer = next_tmr; | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
691 if (delta <= (u64)TICK_NSEC) { | |
712 delta = timekeeping_max_deferment(); | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1452 | |
1452 { | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1460 | |
1460 ret = tk->tkr_mono.clock->max_idle_ns; | |
1462 } while (read_seqcount_retry(&tk_core.seq, seq)); | |
1465 } | |
tick_nohz_next_event (ts=0xffff88800781ba40, cpu=130) at kernel/time/tick-sched.c:713 | |
713 if (cpu != tick_do_timer_cpu && | |
715 delta = KTIME_MAX; | |
718 if (delta < (KTIME_MAX - basemono)) | |
719 expires = basemono + delta; | |
723 ts->timer_expires = min_t(u64, expires, next_tick); | |
727 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:931 | |
931 ts->idle_calls++; | |
933 if (expires > 0LL) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:957 | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:934 | |
934 int was_stopped = ts->tick_stopped; | |
936 tick_nohz_stop_tick(ts, cpu); | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:731 | |
731 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | |
747 if (cpu == tick_do_timer_cpu) { | |
732 u64 basemono = ts->timer_expires_base; | |
733 u64 expires = ts->timer_expires; | |
737 ts->timer_expires_base = 0; | |
747 if (cpu == tick_do_timer_cpu) { | |
750 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
755 if (ts->tick_stopped && (expires == ts->next_tick)) { | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:757 | |
757 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:938 | |
938 ts->idle_sleeps++; | |
941 if (!was_stopped && ts->tick_stopped) { | |
939 ts->idle_expires = expires; | |
941 if (!was_stopped && ts->tick_stopped) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:958 | |
958 } | |
cpuidle_idle_call () at kernel/sched/idle.c:151 | |
151 rcu_idle_enter(); | |
rcu_idle_enter () at kernel/rcu/tree.c:645 | |
645 { | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:610 | |
610 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
612 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); | |
616 if (rdp->dynticks_nesting != 1) { | |
613 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:616 | |
616 if (rdp->dynticks_nesting != 1) { | |
rcu_idle_enter () at kernel/rcu/tree.c:647 | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:622 | |
622 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); | |
624 rdp = this_cpu_ptr(&rcu_data); | |
628 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:629 | |
629 rcu_dynticks_eqs_enter(); | |
rcu_dynticks_eqs_enter () at kernel/rcu/tree.c:227 | |
227 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:235 | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_idle_enter () at kernel/rcu/tree.c:648 | |
648 } | |
cpuidle_idle_call () at kernel/sched/idle.c:153 | |
153 default_idle_call(); | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
current_clr_polling_and_test () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
__current_clr_polling () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_clr_polling () at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:109 | |
109 asm volatile(LOCK_PREFIX "andb %1,%0" | |
current_clr_polling_and_test () at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
93 arch_cpu_idle(); | |
arch_cpu_idle () at arch/x86/kernel/process.c:571 | |
571 x86_idle(); | |
__x86_indirect_thunk_rax () at arch/x86/lib/retpoline.S:32 | |
32 GENERATE_THUNK(_ASM_AX) | |
default_idle () at arch/x86/kernel/process.c:578 | |
578 { | |
579 trace_cpu_idle_rcuidle(1, smp_processor_id()); | |
580 safe_halt(); | |
arch_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
native_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
mds_idle_clear_cpu_buffers () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
arch_static_branch_jump (branch=<optimized out>, key=<optimized out>) at ./arch/x86/include/asm/jump_label.h:23 | |
23 asm_volatile_goto("1:" | |
native_safe_halt () at ./arch/x86/include/asm/irqflags.h:60 | |
60 asm volatile("sti; hlt": : :"memory"); | |
582 } | |
cpuidle_idle_call () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
__current_set_polling () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_set_polling () at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:71 | |
71 asm volatile(LOCK_PREFIX "orb %1,%0" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
arch_local_save_flags () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
216 rcu_idle_exit(); | |
rcu_idle_exit () at kernel/rcu/tree.c:806 | |
806 { | |
809 local_irq_save(flags); | |
arch_local_irq_save () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
arch_local_save_flags () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:779 | |
779 rdp = this_cpu_ptr(&rcu_data); | |
780 oldval = rdp->dynticks_nesting; | |
782 if (oldval) { | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:787 | |
787 rcu_dynticks_eqs_exit(); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:250 | |
250 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:258 | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:261 | |
261 if (seq & RCU_DYNTICK_CTRL_MASK) { | |
267 } | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:789 | |
789 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
791 WRITE_ONCE(rdp->dynticks_nesting, 1); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:792 | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
793 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_idle_exit () at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
rcu_idle_exit () at kernel/rcu/tree.c:812 | |
812 } | |
do_idle () at kernel/sched/idle.c:264 | |
264 arch_cpu_idle_exit(); | |
0xffffffff8108eb70 in arch_cpu_idle_exit () at kernel/sched/idle.c:72 | |
72 void __weak arch_cpu_idle_prepare(void) { } | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
need_resched () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
241 rmb(); | |
243 if (cpu_is_offline(cpu)) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
do_idle () at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
249 local_irq_disable(); | |
arch_local_irq_disable () at kernel/sched/idle.c:249 | |
249 local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
do_idle () at kernel/sched/idle.c:250 | |
250 arch_cpu_idle_enter(); | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:557 | |
557 tsc_verify_tsc_adjust(false); | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:49 | |
49 { | |
50 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:53 | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
78 } | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:558 | |
558 local_touch_nmi(); | |
local_touch_nmi () at arch/x86/kernel/nmi.c:556 | |
556 __this_cpu_write(last_nmi_rip, 0); | |
557 } | |
do_idle () at kernel/sched/idle.c:258 | |
258 if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:538 | |
538 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:539 | |
539 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_get_device () at ./include/linux/cpuidle.h:155 | |
155 {return __this_cpu_read(cpuidle_devices); } | |
cpuidle_idle_call () at kernel/sched/idle.c:131 | |
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:313 | |
313 return __cpuidle_get_cpu_driver(dev->cpu); | |
__cpuidle_get_cpu_driver (cpu=<optimized out>) at drivers/cpuidle/driver.c:309 | |
309 { | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:314 | |
314 } | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
need_resched () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
149 if (cpuidle_not_available(drv, dev)) { | |
cpuidle_not_available (drv=0x0 <irq_stack_union>, dev=0x0 <irq_stack_union>) at drivers/cpuidle/cpuidle.c:51 | |
51 return off || !initialized || !drv || !dev || !dev->enabled; | |
52 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:150 | |
150 tick_nohz_idle_stop_tick(); | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:956 | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:918 | |
918 int cpu = smp_processor_id(); | |
924 if (ts->timer_expires_base) | |
926 else if (can_stop_idle_tick(cpu, ts)) | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:859 | |
859 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |
868 if (unlikely(!cpu_online(cpu))) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:868 | |
868 if (unlikely(!cpu_online(cpu))) { | |
879 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
882 if (need_resched()) | |
need_resched () at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
can_stop_idle_tick (cpu=<optimized out>, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
885 if (unlikely(local_softirq_pending())) { | |
913 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:927 | |
927 expires = tick_nohz_next_event(ts, cpu); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:646 | |
646 { | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqbegin (sl=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:653 | |
653 basemono = last_jiffies_update; | |
654 basejiff = jiffies; | |
655 } while (read_seqretry(&jiffies_lock, seq)); | |
656 ts->last_jiffies = basejiff; | |
657 ts->timer_expires_base = basemono; | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
rcu_needs_cpu (basemono=16546416592687, nextevt=0xffffffff82403e40) at kernel/rcu/tree_plugin.h:1448 | |
1448 *nextevt = KTIME_MAX; | |
1449 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist); | |
1450 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
irq_work_needs_cpu () at kernel/irq_work.c:128 | |
128 raised = this_cpu_ptr(&raised_list); | |
129 lazy = this_cpu_ptr(&lazy_list); | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
132 if (llist_empty(lazy)) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:132 | |
132 if (llist_empty(lazy)) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:133 | |
133 return false; | |
132 if (llist_empty(lazy)) | |
139 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:669 | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
local_timer_softirq_pending () at kernel/time/tick-sched.c:642 | |
642 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
680 next_tmr = get_next_timer_interrupt(basejiff, basemono); | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1519 | |
1519 { | |
1520 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | |
1529 if (cpu_is_offline(smp_processor_id())) | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1529 | |
1529 if (cpu_is_offline(smp_processor_id())) | |
1532 raw_spin_lock(&base->lock); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
__raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
do_raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
queued_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:145 | |
145 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1533 | |
1533 nextevt = __next_timer_interrupt(base); | |
__next_timer_interrupt (base=0xffff888007819540) at kernel/time/timer.c:1418 | |
1418 { | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=64, offset=9) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=9, offset=0) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=128, offset=98) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=98, offset=64) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=64, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=1, offset=96) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=192, offset=141) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=141, offset=128) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=256, offset=242) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=242, offset=192) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=320, offset=319) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=319, offset=256) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=256, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=4, offset=265) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=384, offset=368) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=320, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=5, offset=373) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=448, offset=446) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=446, offset=384) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=512, offset=456) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=448, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=7, offset=469) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=576, offset=513) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=513, offset=512) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1475 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1534 | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1535 base->next_expiry = nextevt; | |
1541 if (time_after(basej, base->clk)) { | |
1548 if (time_before_eq(nextevt, basej)) { | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1521 u64 expires = KTIME_MAX; | |
1552 if (!is_max_delta) | |
1553 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; | |
1561 if ((expires - basem) > TICK_NSEC) { | |
1562 base->must_forward_clk = true; | |
1563 base->is_idle = true; | |
1566 raw_spin_unlock(&base->lock); | |
__raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
queued_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1568 | |
1568 return cmp_next_hrtimer_event(basem, expires); | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1483 | |
1483 u64 nextevt = hrtimer_get_next_event(); | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1198 | |
1198 { | |
1199 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:151 | |
151 { | |
152 return __raw_spin_lock_irqsave(lock); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_irq_save () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_save_flags () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
do_raw_spin_lock_flags (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
queued_spin_lock (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:153 | |
153 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1205 | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1208 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
queued_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:185 | |
185 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1211 | |
1211 } | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1489 | |
1489 if (expires <= nextevt) | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1569 | |
1569 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:683 | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
681 ts->next_timer = next_tmr; | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
691 if (delta <= (u64)TICK_NSEC) { | |
712 delta = timekeeping_max_deferment(); | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1452 | |
1452 { | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1460 | |
1460 ret = tk->tkr_mono.clock->max_idle_ns; | |
1462 } while (read_seqcount_retry(&tk_core.seq, seq)); | |
1465 } | |
tick_nohz_next_event (ts=0xffff88800781ba40, cpu=130) at kernel/time/tick-sched.c:713 | |
713 if (cpu != tick_do_timer_cpu && | |
715 delta = KTIME_MAX; | |
718 if (delta < (KTIME_MAX - basemono)) | |
719 expires = basemono + delta; | |
723 ts->timer_expires = min_t(u64, expires, next_tick); | |
727 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:931 | |
931 ts->idle_calls++; | |
933 if (expires > 0LL) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:957 | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:934 | |
934 int was_stopped = ts->tick_stopped; | |
936 tick_nohz_stop_tick(ts, cpu); | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:731 | |
731 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | |
747 if (cpu == tick_do_timer_cpu) { | |
732 u64 basemono = ts->timer_expires_base; | |
733 u64 expires = ts->timer_expires; | |
737 ts->timer_expires_base = 0; | |
747 if (cpu == tick_do_timer_cpu) { | |
750 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
755 if (ts->tick_stopped && (expires == ts->next_tick)) { | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:757 | |
757 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:938 | |
938 ts->idle_sleeps++; | |
941 if (!was_stopped && ts->tick_stopped) { | |
939 ts->idle_expires = expires; | |
941 if (!was_stopped && ts->tick_stopped) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:958 | |
958 } | |
cpuidle_idle_call () at kernel/sched/idle.c:151 | |
151 rcu_idle_enter(); | |
rcu_idle_enter () at kernel/rcu/tree.c:645 | |
645 { | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:610 | |
610 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
612 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); | |
616 if (rdp->dynticks_nesting != 1) { | |
613 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:616 | |
616 if (rdp->dynticks_nesting != 1) { | |
rcu_idle_enter () at kernel/rcu/tree.c:647 | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:622 | |
622 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); | |
624 rdp = this_cpu_ptr(&rcu_data); | |
628 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:629 | |
629 rcu_dynticks_eqs_enter(); | |
rcu_dynticks_eqs_enter () at kernel/rcu/tree.c:227 | |
227 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:235 | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_idle_enter () at kernel/rcu/tree.c:648 | |
648 } | |
cpuidle_idle_call () at kernel/sched/idle.c:153 | |
153 default_idle_call(); | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
current_clr_polling_and_test () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
__current_clr_polling () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_clr_polling () at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:109 | |
109 asm volatile(LOCK_PREFIX "andb %1,%0" | |
current_clr_polling_and_test () at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
93 arch_cpu_idle(); | |
arch_cpu_idle () at arch/x86/kernel/process.c:571 | |
571 x86_idle(); | |
__x86_indirect_thunk_rax () at arch/x86/lib/retpoline.S:32 | |
32 GENERATE_THUNK(_ASM_AX) | |
default_idle () at arch/x86/kernel/process.c:578 | |
578 { | |
579 trace_cpu_idle_rcuidle(1, smp_processor_id()); | |
580 safe_halt(); | |
arch_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
native_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
mds_idle_clear_cpu_buffers () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
arch_static_branch_jump (branch=<optimized out>, key=<optimized out>) at ./arch/x86/include/asm/jump_label.h:23 | |
23 asm_volatile_goto("1:" | |
native_safe_halt () at ./arch/x86/include/asm/irqflags.h:60 | |
60 asm volatile("sti; hlt": : :"memory"); | |
582 } | |
cpuidle_idle_call () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
__current_set_polling () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_set_polling () at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:71 | |
71 asm volatile(LOCK_PREFIX "orb %1,%0" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
arch_local_save_flags () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
216 rcu_idle_exit(); | |
rcu_idle_exit () at kernel/rcu/tree.c:806 | |
806 { | |
809 local_irq_save(flags); | |
arch_local_irq_save () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
arch_local_save_flags () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:779 | |
779 rdp = this_cpu_ptr(&rcu_data); | |
780 oldval = rdp->dynticks_nesting; | |
782 if (oldval) { | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:787 | |
787 rcu_dynticks_eqs_exit(); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:250 | |
250 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:258 | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:261 | |
261 if (seq & RCU_DYNTICK_CTRL_MASK) { | |
267 } | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:789 | |
789 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
791 WRITE_ONCE(rdp->dynticks_nesting, 1); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:792 | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
793 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_idle_exit () at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
rcu_idle_exit () at kernel/rcu/tree.c:812 | |
812 } | |
do_idle () at kernel/sched/idle.c:264 | |
264 arch_cpu_idle_exit(); | |
0xffffffff8108eb70 in arch_cpu_idle_exit () at kernel/sched/idle.c:72 | |
72 void __weak arch_cpu_idle_prepare(void) { } | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
need_resched () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
241 rmb(); | |
243 if (cpu_is_offline(cpu)) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
do_idle () at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
249 local_irq_disable(); | |
arch_local_irq_disable () at kernel/sched/idle.c:249 | |
249 local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
do_idle () at kernel/sched/idle.c:250 | |
250 arch_cpu_idle_enter(); | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:557 | |
557 tsc_verify_tsc_adjust(false); | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:49 | |
49 { | |
50 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:53 | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
78 } | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:558 | |
558 local_touch_nmi(); | |
local_touch_nmi () at arch/x86/kernel/nmi.c:556 | |
556 __this_cpu_write(last_nmi_rip, 0); | |
557 } | |
do_idle () at kernel/sched/idle.c:258 | |
258 if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:538 | |
538 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:539 | |
539 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_get_device () at ./include/linux/cpuidle.h:155 | |
155 {return __this_cpu_read(cpuidle_devices); } | |
cpuidle_idle_call () at kernel/sched/idle.c:131 | |
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:313 | |
313 return __cpuidle_get_cpu_driver(dev->cpu); | |
__cpuidle_get_cpu_driver (cpu=<optimized out>) at drivers/cpuidle/driver.c:309 | |
309 { | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:314 | |
314 } | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
need_resched () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
149 if (cpuidle_not_available(drv, dev)) { | |
cpuidle_not_available (drv=0x0 <irq_stack_union>, dev=0x0 <irq_stack_union>) at drivers/cpuidle/cpuidle.c:51 | |
51 return off || !initialized || !drv || !dev || !dev->enabled; | |
52 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:150 | |
150 tick_nohz_idle_stop_tick(); | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:956 | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:918 | |
918 int cpu = smp_processor_id(); | |
924 if (ts->timer_expires_base) | |
926 else if (can_stop_idle_tick(cpu, ts)) | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:859 | |
859 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |
868 if (unlikely(!cpu_online(cpu))) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:868 | |
868 if (unlikely(!cpu_online(cpu))) { | |
879 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
882 if (need_resched()) | |
need_resched () at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
can_stop_idle_tick (cpu=<optimized out>, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
885 if (unlikely(local_softirq_pending())) { | |
913 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:927 | |
927 expires = tick_nohz_next_event(ts, cpu); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:646 | |
646 { | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqbegin (sl=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:653 | |
653 basemono = last_jiffies_update; | |
654 basejiff = jiffies; | |
655 } while (read_seqretry(&jiffies_lock, seq)); | |
656 ts->last_jiffies = basejiff; | |
657 ts->timer_expires_base = basemono; | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
rcu_needs_cpu (basemono=16546416592687, nextevt=0xffffffff82403e40) at kernel/rcu/tree_plugin.h:1448 | |
1448 *nextevt = KTIME_MAX; | |
1449 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist); | |
1450 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
irq_work_needs_cpu () at kernel/irq_work.c:128 | |
128 raised = this_cpu_ptr(&raised_list); | |
129 lazy = this_cpu_ptr(&lazy_list); | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
132 if (llist_empty(lazy)) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:132 | |
132 if (llist_empty(lazy)) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:133 | |
133 return false; | |
132 if (llist_empty(lazy)) | |
139 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:669 | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
local_timer_softirq_pending () at kernel/time/tick-sched.c:642 | |
642 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
680 next_tmr = get_next_timer_interrupt(basejiff, basemono); | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1519 | |
1519 { | |
1520 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | |
1529 if (cpu_is_offline(smp_processor_id())) | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1529 | |
1529 if (cpu_is_offline(smp_processor_id())) | |
1532 raw_spin_lock(&base->lock); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
__raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
do_raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
queued_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:145 | |
145 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1533 | |
1533 nextevt = __next_timer_interrupt(base); | |
__next_timer_interrupt (base=0xffff888007819540) at kernel/time/timer.c:1418 | |
1418 { | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=64, offset=9) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=9, offset=0) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=128, offset=98) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=98, offset=64) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=64, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=1, offset=96) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=192, offset=141) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=141, offset=128) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=256, offset=242) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=242, offset=192) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=320, offset=319) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=319, offset=256) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=256, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=4, offset=265) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=384, offset=368) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=320, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=5, offset=373) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=448, offset=446) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=446, offset=384) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=512, offset=456) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=448, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=7, offset=469) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=576, offset=513) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=513, offset=512) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1475 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1534 | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1535 base->next_expiry = nextevt; | |
1541 if (time_after(basej, base->clk)) { | |
1548 if (time_before_eq(nextevt, basej)) { | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1521 u64 expires = KTIME_MAX; | |
1552 if (!is_max_delta) | |
1553 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; | |
1561 if ((expires - basem) > TICK_NSEC) { | |
1562 base->must_forward_clk = true; | |
1563 base->is_idle = true; | |
1566 raw_spin_unlock(&base->lock); | |
__raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
queued_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1568 | |
1568 return cmp_next_hrtimer_event(basem, expires); | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1483 | |
1483 u64 nextevt = hrtimer_get_next_event(); | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1198 | |
1198 { | |
1199 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:151 | |
151 { | |
152 return __raw_spin_lock_irqsave(lock); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_irq_save () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_save_flags () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
do_raw_spin_lock_flags (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
queued_spin_lock (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:153 | |
153 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1205 | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1208 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
queued_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:185 | |
185 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1211 | |
1211 } | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1489 | |
1489 if (expires <= nextevt) | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1569 | |
1569 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:683 | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
681 ts->next_timer = next_tmr; | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
691 if (delta <= (u64)TICK_NSEC) { | |
712 delta = timekeeping_max_deferment(); | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1452 | |
1452 { | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1460 | |
1460 ret = tk->tkr_mono.clock->max_idle_ns; | |
1462 } while (read_seqcount_retry(&tk_core.seq, seq)); | |
1465 } | |
tick_nohz_next_event (ts=0xffff88800781ba40, cpu=130) at kernel/time/tick-sched.c:713 | |
713 if (cpu != tick_do_timer_cpu && | |
715 delta = KTIME_MAX; | |
718 if (delta < (KTIME_MAX - basemono)) | |
719 expires = basemono + delta; | |
723 ts->timer_expires = min_t(u64, expires, next_tick); | |
727 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:931 | |
931 ts->idle_calls++; | |
933 if (expires > 0LL) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:957 | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:934 | |
934 int was_stopped = ts->tick_stopped; | |
936 tick_nohz_stop_tick(ts, cpu); | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:731 | |
731 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | |
747 if (cpu == tick_do_timer_cpu) { | |
732 u64 basemono = ts->timer_expires_base; | |
733 u64 expires = ts->timer_expires; | |
737 ts->timer_expires_base = 0; | |
747 if (cpu == tick_do_timer_cpu) { | |
750 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
755 if (ts->tick_stopped && (expires == ts->next_tick)) { | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:757 | |
757 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:938 | |
938 ts->idle_sleeps++; | |
941 if (!was_stopped && ts->tick_stopped) { | |
939 ts->idle_expires = expires; | |
941 if (!was_stopped && ts->tick_stopped) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:958 | |
958 } | |
cpuidle_idle_call () at kernel/sched/idle.c:151 | |
151 rcu_idle_enter(); | |
rcu_idle_enter () at kernel/rcu/tree.c:645 | |
645 { | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:610 | |
610 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
612 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); | |
616 if (rdp->dynticks_nesting != 1) { | |
613 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:616 | |
616 if (rdp->dynticks_nesting != 1) { | |
rcu_idle_enter () at kernel/rcu/tree.c:647 | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:622 | |
622 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); | |
624 rdp = this_cpu_ptr(&rcu_data); | |
628 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:629 | |
629 rcu_dynticks_eqs_enter(); | |
rcu_dynticks_eqs_enter () at kernel/rcu/tree.c:227 | |
227 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:235 | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_idle_enter () at kernel/rcu/tree.c:648 | |
648 } | |
cpuidle_idle_call () at kernel/sched/idle.c:153 | |
153 default_idle_call(); | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
current_clr_polling_and_test () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
__current_clr_polling () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_clr_polling () at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:109 | |
109 asm volatile(LOCK_PREFIX "andb %1,%0" | |
current_clr_polling_and_test () at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
93 arch_cpu_idle(); | |
arch_cpu_idle () at arch/x86/kernel/process.c:571 | |
571 x86_idle(); | |
__x86_indirect_thunk_rax () at arch/x86/lib/retpoline.S:32 | |
32 GENERATE_THUNK(_ASM_AX) | |
default_idle () at arch/x86/kernel/process.c:578 | |
578 { | |
579 trace_cpu_idle_rcuidle(1, smp_processor_id()); | |
580 safe_halt(); | |
arch_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
native_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
mds_idle_clear_cpu_buffers () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
arch_static_branch_jump (branch=<optimized out>, key=<optimized out>) at ./arch/x86/include/asm/jump_label.h:23 | |
23 asm_volatile_goto("1:" | |
native_safe_halt () at ./arch/x86/include/asm/irqflags.h:60 | |
60 asm volatile("sti; hlt": : :"memory"); | |
582 } | |
cpuidle_idle_call () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
__current_set_polling () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_set_polling () at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:71 | |
71 asm volatile(LOCK_PREFIX "orb %1,%0" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
arch_local_save_flags () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
216 rcu_idle_exit(); | |
rcu_idle_exit () at kernel/rcu/tree.c:806 | |
806 { | |
809 local_irq_save(flags); | |
arch_local_irq_save () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
arch_local_save_flags () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:779 | |
779 rdp = this_cpu_ptr(&rcu_data); | |
780 oldval = rdp->dynticks_nesting; | |
782 if (oldval) { | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:787 | |
787 rcu_dynticks_eqs_exit(); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:250 | |
250 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:258 | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:261 | |
261 if (seq & RCU_DYNTICK_CTRL_MASK) { | |
267 } | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:789 | |
789 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
791 WRITE_ONCE(rdp->dynticks_nesting, 1); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:792 | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
793 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_idle_exit () at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
rcu_idle_exit () at kernel/rcu/tree.c:812 | |
812 } | |
do_idle () at kernel/sched/idle.c:264 | |
264 arch_cpu_idle_exit(); | |
0xffffffff8108eb70 in arch_cpu_idle_exit () at kernel/sched/idle.c:72 | |
72 void __weak arch_cpu_idle_prepare(void) { } | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
need_resched () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
241 rmb(); | |
243 if (cpu_is_offline(cpu)) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
do_idle () at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
249 local_irq_disable(); | |
arch_local_irq_disable () at kernel/sched/idle.c:249 | |
249 local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
do_idle () at kernel/sched/idle.c:250 | |
250 arch_cpu_idle_enter(); | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:557 | |
557 tsc_verify_tsc_adjust(false); | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:49 | |
49 { | |
50 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:53 | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
78 } | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:558 | |
558 local_touch_nmi(); | |
local_touch_nmi () at arch/x86/kernel/nmi.c:556 | |
556 __this_cpu_write(last_nmi_rip, 0); | |
557 } | |
do_idle () at kernel/sched/idle.c:258 | |
258 if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:538 | |
538 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:539 | |
539 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_get_device () at ./include/linux/cpuidle.h:155 | |
155 {return __this_cpu_read(cpuidle_devices); } | |
cpuidle_idle_call () at kernel/sched/idle.c:131 | |
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:313 | |
313 return __cpuidle_get_cpu_driver(dev->cpu); | |
__cpuidle_get_cpu_driver (cpu=<optimized out>) at drivers/cpuidle/driver.c:309 | |
309 { | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:314 | |
314 } | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
need_resched () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
149 if (cpuidle_not_available(drv, dev)) { | |
cpuidle_not_available (drv=0x0 <irq_stack_union>, dev=0x0 <irq_stack_union>) at drivers/cpuidle/cpuidle.c:51 | |
51 return off || !initialized || !drv || !dev || !dev->enabled; | |
52 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:150 | |
150 tick_nohz_idle_stop_tick(); | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:956 | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:918 | |
918 int cpu = smp_processor_id(); | |
924 if (ts->timer_expires_base) | |
926 else if (can_stop_idle_tick(cpu, ts)) | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:859 | |
859 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |
868 if (unlikely(!cpu_online(cpu))) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:868 | |
868 if (unlikely(!cpu_online(cpu))) { | |
879 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
882 if (need_resched()) | |
need_resched () at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
can_stop_idle_tick (cpu=<optimized out>, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
885 if (unlikely(local_softirq_pending())) { | |
913 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:927 | |
927 expires = tick_nohz_next_event(ts, cpu); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:646 | |
646 { | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqbegin (sl=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:653 | |
653 basemono = last_jiffies_update; | |
654 basejiff = jiffies; | |
655 } while (read_seqretry(&jiffies_lock, seq)); | |
656 ts->last_jiffies = basejiff; | |
657 ts->timer_expires_base = basemono; | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
rcu_needs_cpu (basemono=16546416592687, nextevt=0xffffffff82403e40) at kernel/rcu/tree_plugin.h:1448 | |
1448 *nextevt = KTIME_MAX; | |
1449 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist); | |
1450 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
irq_work_needs_cpu () at kernel/irq_work.c:128 | |
128 raised = this_cpu_ptr(&raised_list); | |
129 lazy = this_cpu_ptr(&lazy_list); | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
132 if (llist_empty(lazy)) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:132 | |
132 if (llist_empty(lazy)) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:133 | |
133 return false; | |
132 if (llist_empty(lazy)) | |
139 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:669 | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
local_timer_softirq_pending () at kernel/time/tick-sched.c:642 | |
642 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
680 next_tmr = get_next_timer_interrupt(basejiff, basemono); | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1519 | |
1519 { | |
1520 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | |
1529 if (cpu_is_offline(smp_processor_id())) | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1529 | |
1529 if (cpu_is_offline(smp_processor_id())) | |
1532 raw_spin_lock(&base->lock); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
__raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
do_raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
queued_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:145 | |
145 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1533 | |
1533 nextevt = __next_timer_interrupt(base); | |
__next_timer_interrupt (base=0xffff888007819540) at kernel/time/timer.c:1418 | |
1418 { | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=64, offset=9) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=9, offset=0) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=128, offset=98) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=98, offset=64) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=64, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=1, offset=96) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=192, offset=141) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=141, offset=128) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=256, offset=242) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=242, offset=192) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=320, offset=319) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=319, offset=256) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=256, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=4, offset=265) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=384, offset=368) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=320, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=5, offset=373) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=448, offset=446) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=446, offset=384) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=512, offset=456) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
63 return min(start + __ffs(tmp), nbits); | |
__ffs (word=<optimized out>) at ./arch/x86/include/asm/bitops.h:351 | |
351 asm("rep; bsf %1,%0" | |
_find_next_bit (invert=<optimized out>, start=448, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:63 | |
63 return min(start + __ffs(tmp), nbits); | |
find_next_bit (addr=0xffff888007819568, size=7, offset=469) at lib/find_bit.c:75 | |
75 } | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1407 | |
1407 return pos - start; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1427 | |
1427 if (pos >= 0) { | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1428 unsigned long tmp = clk + (unsigned long) pos; | |
1430 tmp <<= LVL_SHIFT(lvl); | |
1432 next = tmp; | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=576, offset=513) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=513, offset=512) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; | |
1471 clk >>= LVL_CLK_SHIFT; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1472 clk += adj; | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1475 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1534 | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1535 base->next_expiry = nextevt; | |
1541 if (time_after(basej, base->clk)) { | |
1548 if (time_before_eq(nextevt, basej)) { | |
1534 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); | |
1521 u64 expires = KTIME_MAX; | |
1552 if (!is_max_delta) | |
1553 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; | |
1561 if ((expires - basem) > TICK_NSEC) { | |
1562 base->must_forward_clk = true; | |
1563 base->is_idle = true; | |
1566 raw_spin_unlock(&base->lock); | |
__raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
queued_spin_unlock (lock=<optimized out>) at kernel/time/timer.c:1566 | |
1566 raw_spin_unlock(&base->lock); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1568 | |
1568 return cmp_next_hrtimer_event(basem, expires); | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1483 | |
1483 u64 nextevt = hrtimer_get_next_event(); | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1198 | |
1198 { | |
1199 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:151 | |
151 { | |
152 return __raw_spin_lock_irqsave(lock); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_irq_save () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
arch_local_save_flags () at kernel/locking/spinlock.c:152 | |
152 return __raw_spin_lock_irqsave(lock); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
__raw_spin_lock_irqsave (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
do_raw_spin_lock_flags (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
queued_spin_lock (lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./include/linux/spinlock_api_smp.h:119 | |
119 do_raw_spin_lock_flags(lock, &flags); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock_irqsave (lock=0xffff88800781ba40) at kernel/locking/spinlock.c:153 | |
153 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1205 | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1203 raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1205 if (!__hrtimer_hres_active(cpu_base)) | |
1208 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
do_raw_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
queued_spin_unlock (lock=<optimized out>) at kernel/locking/spinlock.c:184 | |
184 __raw_spin_unlock_irqrestore(lock, flags); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:217 | |
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
__raw_spin_unlock_irqrestore (flags=<optimized out>, lock=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at ./include/linux/spinlock_api_smp.h:160 | |
160 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
_raw_spin_unlock_irqrestore (lock=0xffff88800781ba40, flags=130) at kernel/locking/spinlock.c:185 | |
185 } | |
hrtimer_get_next_event () at kernel/time/hrtimer.c:1211 | |
1211 } | |
cmp_next_hrtimer_event (expires=<optimized out>, basem=<optimized out>) at kernel/time/timer.c:1489 | |
1489 if (expires <= nextevt) | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1569 | |
1569 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:683 | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
681 ts->next_timer = next_tmr; | |
683 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
691 if (delta <= (u64)TICK_NSEC) { | |
712 delta = timekeeping_max_deferment(); | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1452 | |
1452 { | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/timekeeping.c:1458 | |
1458 seq = read_seqcount_begin(&tk_core.seq); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
timekeeping_max_deferment () at kernel/time/timekeeping.c:1460 | |
1460 ret = tk->tkr_mono.clock->max_idle_ns; | |
1462 } while (read_seqcount_retry(&tk_core.seq, seq)); | |
1465 } | |
tick_nohz_next_event (ts=0xffff88800781ba40, cpu=130) at kernel/time/tick-sched.c:713 | |
713 if (cpu != tick_do_timer_cpu && | |
715 delta = KTIME_MAX; | |
718 if (delta < (KTIME_MAX - basemono)) | |
719 expires = basemono + delta; | |
723 ts->timer_expires = min_t(u64, expires, next_tick); | |
727 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:931 | |
931 ts->idle_calls++; | |
933 if (expires > 0LL) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:957 | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:934 | |
934 int was_stopped = ts->tick_stopped; | |
936 tick_nohz_stop_tick(ts, cpu); | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:731 | |
731 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | |
747 if (cpu == tick_do_timer_cpu) { | |
732 u64 basemono = ts->timer_expires_base; | |
733 u64 expires = ts->timer_expires; | |
737 ts->timer_expires_base = 0; | |
747 if (cpu == tick_do_timer_cpu) { | |
750 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
755 if (ts->tick_stopped && (expires == ts->next_tick)) { | |
tick_nohz_stop_tick (cpu=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:757 | |
757 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:938 | |
938 ts->idle_sleeps++; | |
941 if (!was_stopped && ts->tick_stopped) { | |
939 ts->idle_expires = expires; | |
941 if (!was_stopped && ts->tick_stopped) { | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:958 | |
958 } | |
cpuidle_idle_call () at kernel/sched/idle.c:151 | |
151 rcu_idle_enter(); | |
rcu_idle_enter () at kernel/rcu/tree.c:645 | |
645 { | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:610 | |
610 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
612 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); | |
616 if (rdp->dynticks_nesting != 1) { | |
613 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:616 | |
616 if (rdp->dynticks_nesting != 1) { | |
rcu_idle_enter () at kernel/rcu/tree.c:647 | |
647 rcu_eqs_enter(false); | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:622 | |
622 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); | |
624 rdp = this_cpu_ptr(&rcu_data); | |
628 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_enter (user=<optimized out>) at kernel/rcu/tree.c:629 | |
629 rcu_dynticks_eqs_enter(); | |
rcu_dynticks_eqs_enter () at kernel/rcu/tree.c:227 | |
227 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:235 | |
235 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_idle_enter () at kernel/rcu/tree.c:648 | |
648 } | |
cpuidle_idle_call () at kernel/sched/idle.c:153 | |
153 default_idle_call(); | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
current_clr_polling_and_test () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
__current_clr_polling () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_clr_polling () at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:42 | |
42 clear_thread_flag(TIF_POLLING_NRFLAG); | |
clear_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:109 | |
109 asm volatile(LOCK_PREFIX "andb %1,%0" | |
current_clr_polling_and_test () at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:55 | |
55 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
default_idle_call () at kernel/sched/idle.c:89 | |
89 if (current_clr_polling_and_test()) { | |
93 arch_cpu_idle(); | |
arch_cpu_idle () at arch/x86/kernel/process.c:571 | |
571 x86_idle(); | |
__x86_indirect_thunk_rax () at arch/x86/lib/retpoline.S:32 | |
32 GENERATE_THUNK(_ASM_AX) | |
default_idle () at arch/x86/kernel/process.c:578 | |
578 { | |
579 trace_cpu_idle_rcuidle(1, smp_processor_id()); | |
580 safe_halt(); | |
arch_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
native_safe_halt () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
mds_idle_clear_cpu_buffers () at arch/x86/kernel/process.c:580 | |
580 safe_halt(); | |
arch_static_branch_jump (branch=<optimized out>, key=<optimized out>) at ./arch/x86/include/asm/jump_label.h:23 | |
23 asm_volatile_goto("1:" | |
native_safe_halt () at ./arch/x86/include/asm/irqflags.h:60 | |
60 asm volatile("sti; hlt": : :"memory"); | |
582 } | |
cpuidle_idle_call () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
__current_set_polling () at kernel/sched/idle.c:208 | |
208 __current_set_polling(); | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
__current_set_polling () at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched/idle.h:24 | |
24 set_thread_flag(TIF_POLLING_NRFLAG); | |
set_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:71 | |
71 asm volatile(LOCK_PREFIX "orb %1,%0" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
arch_local_save_flags () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
cpuidle_idle_call () at kernel/sched/idle.c:213 | |
213 if (WARN_ON_ONCE(irqs_disabled())) | |
216 rcu_idle_exit(); | |
rcu_idle_exit () at kernel/rcu/tree.c:806 | |
806 { | |
809 local_irq_save(flags); | |
arch_local_irq_save () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
arch_local_save_flags () at kernel/rcu/tree.c:809 | |
809 local_irq_save(flags); | |
native_save_fl () at ./arch/x86/include/asm/irqflags.h:29 | |
29 asm volatile("# __raw_save_flags\n\t" | |
arch_local_irq_save () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
arch_local_irq_disable () at ./arch/x86/include/asm/irqflags.h:121 | |
121 arch_local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:779 | |
779 rdp = this_cpu_ptr(&rcu_data); | |
780 oldval = rdp->dynticks_nesting; | |
782 if (oldval) { | |
rcu_idle_exit () at kernel/rcu/tree.c:810 | |
810 rcu_eqs_exit(false); | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:787 | |
787 rcu_dynticks_eqs_exit(); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:250 | |
250 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
atomic_add_return (v=<optimized out>, i=<optimized out>) at kernel/rcu/tree.c:258 | |
258 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); | |
arch_atomic_add_return (v=<optimized out>, i=<optimized out>) at ./arch/x86/include/asm/atomic.h:167 | |
167 return i + xadd(&v->counter, i); | |
rcu_dynticks_eqs_exit () at kernel/rcu/tree.c:261 | |
261 if (seq & RCU_DYNTICK_CTRL_MASK) { | |
267 } | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:789 | |
789 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
791 WRITE_ONCE(rdp->dynticks_nesting, 1); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_eqs_exit (user=<optimized out>) at kernel/rcu/tree.c:792 | |
792 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); | |
793 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); | |
__write_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:220 | |
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | |
rcu_idle_exit () at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
arch_local_irq_restore (flags=<optimized out>) at kernel/rcu/tree.c:811 | |
811 local_irq_restore(flags); | |
native_restore_fl (flags=<optimized out>) at ./arch/x86/include/asm/irqflags.h:41 | |
41 asm volatile("push %0 ; popf" | |
rcu_idle_exit () at kernel/rcu/tree.c:812 | |
812 } | |
do_idle () at kernel/sched/idle.c:264 | |
264 arch_cpu_idle_exit(); | |
0xffffffff8108eb70 in arch_cpu_idle_exit () at kernel/sched/idle.c:72 | |
72 void __weak arch_cpu_idle_prepare(void) { } | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
need_resched () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
do_idle () at kernel/sched/idle.c:239 | |
239 while (!need_resched()) { | |
241 rmb(); | |
243 if (cpu_is_offline(cpu)) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
do_idle () at kernel/sched/idle.c:243 | |
243 if (cpu_is_offline(cpu)) { | |
249 local_irq_disable(); | |
arch_local_irq_disable () at kernel/sched/idle.c:249 | |
249 local_irq_disable(); | |
native_irq_disable () at ./arch/x86/include/asm/irqflags.h:49 | |
49 asm volatile("cli": : :"memory"); | |
do_idle () at kernel/sched/idle.c:250 | |
250 arch_cpu_idle_enter(); | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:557 | |
557 tsc_verify_tsc_adjust(false); | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:49 | |
49 { | |
50 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
tsc_verify_tsc_adjust (resume=false) at arch/x86/kernel/tsc_sync.c:53 | |
53 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
78 } | |
arch_cpu_idle_enter () at arch/x86/kernel/process.c:558 | |
558 local_touch_nmi(); | |
local_touch_nmi () at arch/x86/kernel/nmi.c:556 | |
556 __this_cpu_write(last_nmi_rip, 0); | |
557 } | |
do_idle () at kernel/sched/idle.c:258 | |
258 if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:538 | |
538 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
tick_check_broadcast_expired () at kernel/time/tick-broadcast.c:539 | |
539 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_get_device () at ./include/linux/cpuidle.h:155 | |
155 {return __this_cpu_read(cpuidle_devices); } | |
cpuidle_idle_call () at kernel/sched/idle.c:131 | |
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:313 | |
313 return __cpuidle_get_cpu_driver(dev->cpu); | |
__cpuidle_get_cpu_driver (cpu=<optimized out>) at drivers/cpuidle/driver.c:309 | |
309 { | |
cpuidle_get_cpu_driver (dev=0x0 <irq_stack_union>) at drivers/cpuidle/driver.c:314 | |
314 } | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
need_resched () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
cpuidle_idle_call () at kernel/sched/idle.c:138 | |
138 if (need_resched()) { | |
149 if (cpuidle_not_available(drv, dev)) { | |
cpuidle_not_available (drv=0x0 <irq_stack_union>, dev=0x0 <irq_stack_union>) at drivers/cpuidle/cpuidle.c:51 | |
51 return off || !initialized || !drv || !dev || !dev->enabled; | |
52 } | |
do_idle () at kernel/sched/idle.c:262 | |
262 cpuidle_idle_call(); | |
cpuidle_idle_call () at kernel/sched/idle.c:150 | |
150 tick_nohz_idle_stop_tick(); | |
tick_nohz_idle_stop_tick () at kernel/time/tick-sched.c:956 | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
956 { | |
957 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:918 | |
918 int cpu = smp_processor_id(); | |
924 if (ts->timer_expires_base) | |
926 else if (can_stop_idle_tick(cpu, ts)) | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:859 | |
859 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |
868 if (unlikely(!cpu_online(cpu))) { | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
can_stop_idle_tick (cpu=0, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:868 | |
868 if (unlikely(!cpu_online(cpu))) { | |
879 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
882 if (need_resched()) | |
need_resched () at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
get_current () at ./arch/x86/include/asm/current.h:15 | |
15 return this_cpu_read_stable(current_task); | |
need_resched () at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
test_ti_thread_flag (flag=<optimized out>, ti=<optimized out>) at ./include/linux/sched.h:1743 | |
1743 return unlikely(tif_need_resched()); | |
constant_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:314 | |
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; | |
can_stop_idle_tick (cpu=<optimized out>, ts=<optimized out>, ts=<optimized out>) at kernel/time/tick-sched.c:882 | |
882 if (need_resched()) | |
885 if (unlikely(local_softirq_pending())) { | |
913 } | |
__tick_nohz_idle_stop_tick (ts=<optimized out>) at kernel/time/tick-sched.c:927 | |
927 expires = tick_nohz_next_event(ts, cpu); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:646 | |
646 { | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqbegin (sl=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
raw_read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_seqcount_begin (s=<optimized out>) at kernel/time/tick-sched.c:652 | |
652 seq = read_seqbegin(&jiffies_lock); | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
__read_seqcount_begin (s=<optimized out>) at ./include/linux/seqlock.h:114 | |
114 if (unlikely(ret & 1)) { | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:653 | |
653 basemono = last_jiffies_update; | |
654 basejiff = jiffies; | |
655 } while (read_seqretry(&jiffies_lock, seq)); | |
656 ts->last_jiffies = basejiff; | |
657 ts->timer_expires_base = basemono; | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
rcu_needs_cpu (basemono=16546416592687, nextevt=0xffffffff82403e40) at kernel/rcu/tree_plugin.h:1448 | |
1448 *nextevt = KTIME_MAX; | |
1449 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist); | |
1450 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
irq_work_needs_cpu () at kernel/irq_work.c:128 | |
128 raised = this_cpu_ptr(&raised_list); | |
129 lazy = this_cpu_ptr(&lazy_list); | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:131 | |
131 if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
132 if (llist_empty(lazy)) | |
llist_empty (head=<optimized out>) at kernel/irq_work.c:132 | |
132 if (llist_empty(lazy)) | |
__read_once_size (size=<optimized out>, res=<optimized out>, p=<optimized out>) at ./include/linux/compiler.h:193 | |
193 __READ_ONCE_SIZE; | |
irq_work_needs_cpu () at kernel/irq_work.c:133 | |
133 return false; | |
132 if (llist_empty(lazy)) | |
139 } | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:669 | |
669 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
local_timer_softirq_pending () at kernel/time/tick-sched.c:642 | |
642 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); | |
tick_nohz_next_event (ts=0xffff88800781bf60, cpu=0) at kernel/time/tick-sched.c:670 | |
670 irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
680 next_tmr = get_next_timer_interrupt(basejiff, basemono); | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1519 | |
1519 { | |
1520 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | |
1529 if (cpu_is_offline(smp_processor_id())) | |
cpumask_test_cpu (cpumask=<optimized out>, cpu=<optimized out>) at ./include/linux/cpumask.h:344 | |
344 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | |
variable_test_bit (addr=<optimized out>, nr=<optimized out>) at ./arch/x86/include/asm/bitops.h:321 | |
321 asm volatile(__ASM_SIZE(bt) " %2,%1" | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1529 | |
1529 if (cpu_is_offline(smp_processor_id())) | |
1532 raw_spin_lock(&base->lock); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
__raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
do_raw_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
queued_spin_lock (lock=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at kernel/locking/spinlock.c:144 | |
144 __raw_spin_lock(lock); | |
arch_atomic_try_cmpxchg (new=<optimized out>, old=<optimized out>, v=<optimized out>) at ./arch/x86/include/asm/atomic.h:200 | |
200 return try_cmpxchg(&v->counter, old, new); | |
_raw_spin_lock (lock=0xffff888007819540) at kernel/locking/spinlock.c:145 | |
145 } | |
get_next_timer_interrupt (basej=4311212808, basem=16546416592687) at kernel/time/timer.c:1533 | |
1533 nextevt = __next_timer_interrupt(base); | |
__next_timer_interrupt (base=0xffff888007819540) at kernel/time/timer.c:1418 | |
1418 { | |
1424 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1418 { | |
1422 next = base->clk + NEXT_TIMER_MAX_DELTA; | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1402 | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1425 | |
1425 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1405 | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
1402 unsigned pos, start = offset + clk; | |
1405 pos = find_next_bit(base->pending_map, end, start); | |
find_next_bit (addr=0xffff888007819568, size=64, offset=9) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1406 | |
1406 if (pos < end) | |
1409 pos = find_next_bit(base->pending_map, start, offset); | |
find_next_bit (addr=0xffff888007819568, size=9, offset=0) at lib/find_bit.c:74 | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:73 | |
73 { | |
74 return _find_next_bit(addr, NULL, size, offset, 0UL); | |
_find_next_bit (invert=<optimized out>, start=<optimized out>, nbits=<optimized out>, addr2=<optimized out>, addr1=<optimized out>) at lib/find_bit.c:40 | |
40 if (unlikely(start >= nbits)) | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
43 tmp = addr1[start / BITS_PER_LONG]; | |
49 tmp &= BITMAP_FIRST_WORD_MASK(start); | |
50 start = round_down(start, BITS_PER_LONG); | |
52 while (!tmp) { | |
53 start += BITS_PER_LONG; | |
54 if (start >= nbits) | |
next_pending_bucket (clk=<optimized out>, offset=<optimized out>, base=<optimized out>) at kernel/time/timer.c:1410 | |
1410 return pos < start ? pos + LVL_SIZE - start : -1; | |
__next_timer_interrupt (base=<optimized out>) at kernel/time/timer.c:1470 | |
1470 adj = clk & LVL_CLK_MASK ? 1 : 0; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment