Created
May 12, 2015 05:14
-
-
Save imrehg/ede9c3159a47c91cb4e3 to your computer and use it in GitHub Desktop.
Adeos i-pipe patch backported from 3.10.18 to 3.10.17
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig | |
index 18a9f5e..44ffc9c 100644 | |
--- a/arch/arm/Kconfig | |
+++ b/arch/arm/Kconfig | |
@@ -68,6 +68,11 @@ config ARM | |
Europe. There is an ARM Linux project with a web page at | |
<http://www.arm.linux.org.uk/>. | |
+if IPIPE | |
+config IPIPE_WANT_ACTIVE_MM | |
+ def_bool y | |
+endif | |
+ | |
config ARM_HAS_SG_CHAIN | |
bool | |
@@ -483,6 +488,7 @@ config ARCH_IXP4XX | |
select NEED_MACH_IO_H | |
select USB_EHCI_BIG_ENDIAN_MMIO | |
select USB_EHCI_BIG_ENDIAN_DESC | |
+ select IPIPE_ARM_KUSER_TSC if IPIPE | |
help | |
Support for Intel's IXP4XX (XScale) family of processors. | |
@@ -702,6 +708,7 @@ config ARCH_S3C24XX | |
select MULTI_IRQ_HANDLER | |
select NEED_MACH_GPIO_H | |
select NEED_MACH_IO_H | |
+ select IPIPE_ARM_KUSER_TSC if IPIPE | |
help | |
Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443 | |
and S3C2450 SoCs based systems, such as the Simtec Electronics BAST | |
@@ -1058,6 +1065,14 @@ config ARM_TIMER_SP804 | |
select CLKSRC_MMIO | |
select CLKSRC_OF if OF | |
+if IPIPE | |
+config IPIPE_ARM_KUSER_TSC | |
+ bool | |
+ select GENERIC_TIME_VSYSCALL | |
+ select IPIPE_HAVE_HOSTRT if IPIPE | |
+ default y if ARCH_AT91 || ARM_TIMER_SP804 || ARCH_MXC || ARCH_OMAP || PLAT_PXA || PLAT_S3C24XX || ARCH_SA1100 | |
+endif | |
+ | |
source arch/arm/mm/Kconfig | |
config ARM_NR_BANKS | |
@@ -1594,6 +1609,8 @@ config ARCH_NR_GPIO | |
If unsure, leave the default value. | |
+source kernel/ipipe/Kconfig | |
+ | |
source kernel/Kconfig.preempt | |
config HZ | |
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c | |
index 24b0475..74ba1c7 100644 | |
--- a/arch/arm/boot/compressed/decompress.c | |
+++ b/arch/arm/boot/compressed/decompress.c | |
@@ -1,10 +1,8 @@ | |
-#define _LINUX_STRING_H_ | |
- | |
#include <linux/compiler.h> /* for inline */ | |
#include <linux/types.h> /* for size_t */ | |
#include <linux/stddef.h> /* for NULL */ | |
#include <linux/linkage.h> | |
-#include <asm/string.h> | |
+#include <linux/string.h> | |
extern unsigned long free_mem_ptr; | |
extern unsigned long free_mem_end_ptr; | |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S | |
index 032a8d9..a26991a 100644 | |
--- a/arch/arm/boot/compressed/head.S | |
+++ b/arch/arm/boot/compressed/head.S | |
@@ -1246,6 +1246,15 @@ memdump: mov r12, r0 | |
mov pc, r10 | |
#endif | |
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT | |
+ .text | |
+ .align 0 | |
+ .type mcount %function | |
+ .global mcount | |
+mcount: | |
+ mov pc, lr @ just return | |
+#endif | |
+ | |
.ltorg | |
#ifdef CONFIG_ARM_VIRT_EXT | |
diff --git a/arch/arm/boot/compressed/string.c b/arch/arm/boot/compressed/string.c | |
index 36e53ef..3651693 100644 | |
--- a/arch/arm/boot/compressed/string.c | |
+++ b/arch/arm/boot/compressed/string.c | |
@@ -93,6 +93,23 @@ int strcmp(const char *cs, const char *ct) | |
return res; | |
} | |
+char *strstr(const char *s1, const char *s2) | |
+{ | |
+ size_t l1, l2; | |
+ | |
+ l2 = strlen(s2); | |
+ if (!l2) | |
+ return (char *)s1; | |
+ l1 = strlen(s1); | |
+ while (l1 >= l2) { | |
+ l1--; | |
+ if (!memcmp(s1, s2, l2)) | |
+ return (char *)s1; | |
+ s1++; | |
+ } | |
+ return NULL; | |
+} | |
+ | |
void *memchr(const void *s, int c, size_t count) | |
{ | |
const unsigned char *p = s; | |
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c | |
index 001f491..d8b2b9f 100644 | |
--- a/arch/arm/common/it8152.c | |
+++ b/arch/arm/common/it8152.c | |
@@ -26,6 +26,7 @@ | |
#include <linux/irq.h> | |
#include <linux/io.h> | |
#include <linux/export.h> | |
+#include <linux/ipipe.h> | |
#include <asm/mach/pci.h> | |
#include <asm/hardware/it8152.h> | |
@@ -124,21 +125,21 @@ void it8152_irq_demux(unsigned int irq, struct irq_desc *desc) | |
bits_pd &= ((1 << IT8152_PD_IRQ_COUNT) - 1); | |
while (bits_pd) { | |
i = __ffs(bits_pd); | |
- generic_handle_irq(IT8152_PD_IRQ(i)); | |
+ ipipe_handle_demuxed_irq(IT8152_PD_IRQ(i)); | |
bits_pd &= ~(1 << i); | |
} | |
bits_lp &= ((1 << IT8152_LP_IRQ_COUNT) - 1); | |
while (bits_lp) { | |
i = __ffs(bits_lp); | |
- generic_handle_irq(IT8152_LP_IRQ(i)); | |
+ ipipe_handle_demuxed_irq(IT8152_LP_IRQ(i)); | |
bits_lp &= ~(1 << i); | |
} | |
bits_ld &= ((1 << IT8152_LD_IRQ_COUNT) - 1); | |
while (bits_ld) { | |
i = __ffs(bits_ld); | |
- generic_handle_irq(IT8152_LD_IRQ(i)); | |
+ ipipe_handle_demuxed_irq(IT8152_LD_IRQ(i)); | |
bits_ld &= ~(1 << i); | |
} | |
} | |
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c | |
index ddc7407..df40367 100644 | |
--- a/arch/arm/common/timer-sp.c | |
+++ b/arch/arm/common/timer-sp.c | |
@@ -28,11 +28,25 @@ | |
#include <linux/of.h> | |
#include <linux/of_address.h> | |
#include <linux/of_irq.h> | |
+#include <linux/module.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include <asm/sched_clock.h> | |
#include <asm/hardware/arm_timer.h> | |
#include <asm/hardware/timer-sp.h> | |
+#ifdef CONFIG_IPIPE | |
+static struct __ipipe_tscinfo tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN, | |
+ .u = { | |
+ { | |
+ .mask = 0xffffffff, | |
+ }, | |
+ }, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static long __init sp804_get_clock_rate(struct clk *clk) | |
{ | |
long rate; | |
@@ -72,6 +86,7 @@ static u32 sp804_read(void) | |
} | |
void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, | |
+ unsigned long phys, | |
const char *name, | |
struct clk *clk, | |
int use_sched_clock) | |
@@ -106,12 +121,25 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, | |
sched_clock_base = base; | |
setup_sched_clock(sp804_read, 32, rate); | |
} | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ tsc_info.freq = rate; | |
+ tsc_info.counter_vaddr = (unsigned long)base + TIMER_VALUE; | |
+ tsc_info.u.counter_paddr = phys + TIMER_VALUE; | |
+ __ipipe_tsc_register(&tsc_info); | |
+#endif | |
} | |
static void __iomem *clkevt_base; | |
static unsigned long clkevt_reload; | |
+static inline void sp804_timer_ack(void) | |
+{ | |
+ /* clear the interrupt */ | |
+ writel(1, clkevt_base + TIMER_INTCLR); | |
+} | |
+ | |
/* | |
* IRQ handler for the timer | |
*/ | |
@@ -119,8 +147,10 @@ static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id) | |
{ | |
struct clock_event_device *evt = dev_id; | |
- /* clear the interrupt */ | |
- writel(1, clkevt_base + TIMER_INTCLR); | |
+ if (!clockevent_ipipe_stolen(evt)) | |
+ sp804_timer_ack(); | |
+ | |
+ __ipipe_tsc_update(); | |
evt->event_handler(evt); | |
@@ -165,11 +195,20 @@ static int sp804_set_next_event(unsigned long next, | |
return 0; | |
} | |
+#ifdef CONFIG_IPIPE | |
+static struct ipipe_timer sp804_itimer = { | |
+ .ack = sp804_timer_ack, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static struct clock_event_device sp804_clockevent = { | |
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
.set_mode = sp804_set_mode, | |
.set_next_event = sp804_set_next_event, | |
.rating = 300, | |
+#ifdef CONFIG_IPIPE | |
+ .ipipe_timer = &sp804_itimer, | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
static struct irqaction sp804_timer_irq = { | |
@@ -204,6 +243,10 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc | |
writel(0, base + TIMER_CTRL); | |
+#ifdef CONFIG_IPIPE | |
+ sp804_itimer.irq = irq; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
setup_irq(irq, &sp804_timer_irq); | |
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); | |
} | |
@@ -211,6 +254,7 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc | |
static void __init sp804_of_init(struct device_node *np) | |
{ | |
static bool initialized = false; | |
+ struct resource res; | |
void __iomem *base; | |
int irq; | |
u32 irq_num = 0; | |
@@ -247,13 +291,18 @@ static void __init sp804_of_init(struct device_node *np) | |
if (irq <= 0) | |
goto err; | |
+ if (of_address_to_resource(np, 0, &res)) | |
+ res.start = 0; | |
+ | |
of_property_read_u32(np, "arm,sp804-has-irq", &irq_num); | |
if (irq_num == 2) { | |
__sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); | |
- __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); | |
+ __sp804_clocksource_and_sched_clock_init(base, res.start, | |
+ name, clk1, 1); | |
} else { | |
__sp804_clockevents_init(base, irq, clk1 , name); | |
__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, | |
+ res.start + TIMER_2_BASE, | |
name, clk2, 1); | |
} | |
initialized = true; | |
@@ -267,6 +316,7 @@ CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init); | |
static void __init integrator_cp_of_init(struct device_node *np) | |
{ | |
static int init_count = 0; | |
+ struct resource res; | |
void __iomem *base; | |
int irq; | |
const char *name = of_get_property(np, "compatible", NULL); | |
@@ -281,8 +331,11 @@ static void __init integrator_cp_of_init(struct device_node *np) | |
if (init_count == 2 || !of_device_is_available(np)) | |
goto err; | |
+ if (of_address_to_resource(np, 0, &res)) | |
+ res.start = 0; | |
+ | |
if (!init_count) | |
- sp804_clocksource_init(base, name); | |
+ sp804_clocksource_init(base, res.start, name); | |
else { | |
irq = irq_of_parse_and_map(np, 0); | |
if (irq <= 0) | |
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild | |
index d3db398..7879c3c 100644 | |
--- a/arch/arm/include/asm/Kbuild | |
+++ b/arch/arm/include/asm/Kbuild | |
@@ -17,7 +17,6 @@ generic-y += msgbuf.h | |
generic-y += param.h | |
generic-y += parport.h | |
generic-y += poll.h | |
-generic-y += resource.h | |
generic-y += sections.h | |
generic-y += segment.h | |
generic-y += sembuf.h | |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h | |
index 05ee9ee..afa863c 100644 | |
--- a/arch/arm/include/asm/assembler.h | |
+++ b/arch/arm/include/asm/assembler.h | |
@@ -88,6 +88,18 @@ | |
.macro enable_irq_notrace | |
cpsie i | |
.endm | |
+ | |
+ .macro disable_irq_cond | |
+#ifdef CONFIG_IPIPE | |
+ cpsid i | |
+#endif /* CONFIG_IPIPE */ | |
+ .endm | |
+ | |
+ .macro enable_irq_cond | |
+#ifdef CONFIG_IPIPE | |
+ cpsie i | |
+#endif /* CONFIG_IPIPE */ | |
+ .endm | |
#else | |
.macro disable_irq_notrace | |
msr cpsr_c, #PSR_I_BIT | SVC_MODE | |
@@ -96,10 +108,22 @@ | |
.macro enable_irq_notrace | |
msr cpsr_c, #SVC_MODE | |
.endm | |
+ | |
+ .macro disable_irq_cond | |
+#ifdef CONFIG_IPIPE | |
+ msr cpsr_c, #PSR_I_BIT | SVC_MODE | |
+#endif /* CONFIG_IPIPE */ | |
+ .endm | |
+ | |
+ .macro enable_irq_cond | |
+#ifdef CONFIG_IPIPE | |
+ msr cpsr_c, #SVC_MODE | |
+#endif /* CONFIG_IPIPE */ | |
+ .endm | |
#endif | |
.macro asm_trace_hardirqs_off | |
-#if defined(CONFIG_TRACE_IRQFLAGS) | |
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_IPIPE) | |
stmdb sp!, {r0-r3, ip, lr} | |
bl trace_hardirqs_off | |
ldmia sp!, {r0-r3, ip, lr} | |
@@ -107,7 +131,7 @@ | |
.endm | |
.macro asm_trace_hardirqs_on_cond, cond | |
-#if defined(CONFIG_TRACE_IRQFLAGS) | |
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_IPIPE) | |
/* | |
* actually the registers should be pushed and pop'd conditionally, but | |
* after bl the flags are certainly clobbered | |
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h | |
index da1c77d..6402553 100644 | |
--- a/arch/arm/include/asm/atomic.h | |
+++ b/arch/arm/include/asm/atomic.h | |
@@ -160,10 +160,10 @@ static inline int atomic_add_return(int i, atomic_t *v) | |
unsigned long flags; | |
int val; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
val = v->counter; | |
v->counter = val += i; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return val; | |
} | |
@@ -174,10 +174,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |
unsigned long flags; | |
int val; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
val = v->counter; | |
v->counter = val -= i; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return val; | |
} | |
@@ -188,11 +188,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
int ret; | |
unsigned long flags; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
ret = v->counter; | |
if (likely(ret == old)) | |
v->counter = new; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return ret; | |
} | |
@@ -201,9 +201,9 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |
{ | |
unsigned long flags; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
*addr &= ~mask; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
} | |
#endif /* __LINUX_ARM_ARCH__ */ | |
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h | |
index e691ec9..d065a3e 100644 | |
--- a/arch/arm/include/asm/bitops.h | |
+++ b/arch/arm/include/asm/bitops.h | |
@@ -41,9 +41,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long * | |
p += bit >> 5; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
*p |= mask; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
} | |
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) | |
@@ -53,9 +53,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long | |
p += bit >> 5; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
*p &= ~mask; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
} | |
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) | |
@@ -65,9 +65,9 @@ static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned lon | |
p += bit >> 5; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
*p ^= mask; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
} | |
static inline int | |
@@ -79,10 +79,10 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) | |
p += bit >> 5; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
res = *p; | |
*p = res | mask; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return (res & mask) != 0; | |
} | |
@@ -96,10 +96,10 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) | |
p += bit >> 5; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
res = *p; | |
*p = res & ~mask; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return (res & mask) != 0; | |
} | |
@@ -113,10 +113,10 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | |
p += bit >> 5; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
res = *p; | |
*p = res ^ mask; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return (res & mask) != 0; | |
} | |
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h | |
index 7af5c6c..87c0f79 100644 | |
--- a/arch/arm/include/asm/bug.h | |
+++ b/arch/arm/include/asm/bug.h | |
@@ -2,6 +2,7 @@ | |
#define _ASMARM_BUG_H | |
#include <linux/linkage.h> | |
+#include <linux/kernel.h> /* For BUILD_BUG_ON */ | |
#ifdef CONFIG_BUG | |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h | |
index 17d0ae8..060f737 100644 | |
--- a/arch/arm/include/asm/cacheflush.h | |
+++ b/arch/arm/include/asm/cacheflush.h | |
@@ -11,11 +11,13 @@ | |
#define _ASMARM_CACHEFLUSH_H | |
#include <linux/mm.h> | |
+#include <linux/sched.h> | |
#include <asm/glue-cache.h> | |
#include <asm/shmparam.h> | |
#include <asm/cachetype.h> | |
#include <asm/outercache.h> | |
+#include <asm/fcse.h> | |
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | |
@@ -167,6 +169,27 @@ extern void dmac_flush_range(const void *, const void *); | |
#endif | |
+#ifdef CONFIG_ARM_FCSE | |
+#define FCSE_CACHE_MASK (~(L1_CACHE_BYTES - 1)) | |
+#define FCSE_CACHE_ALIGN(addr) (((addr) + ~FCSE_CACHE_MASK) & FCSE_CACHE_MASK) | |
+ | |
+static inline void | |
+fcse_flush_cache_user_range(struct vm_area_struct *vma, | |
+ unsigned long start, unsigned long end) | |
+{ | |
+ if (cache_is_vivt() | |
+ && fcse_mm_in_cache(vma->vm_mm)) { | |
+ start = fcse_va_to_mva(vma->vm_mm, start & FCSE_CACHE_MASK); | |
+ end = fcse_va_to_mva(vma->vm_mm, FCSE_CACHE_ALIGN(end)); | |
+ __cpuc_flush_user_range(start, end, vma->vm_flags); | |
+ } | |
+} | |
+#undef FCSE_CACHE_MASK | |
+#undef FCSE_CACHE_ALIGN | |
+#else /* ! CONFIG_ARM_FCSE */ | |
+#define fcse_flush_cache_user_range(vma, start, end) do { } while (0) | |
+#endif /* ! CONFIG_ARM_FCSE */ | |
+ | |
/* | |
* Copy user data from/to a page which is mapped into a different | |
* processes address space. Really, we want to allow our "user | |
@@ -174,9 +197,10 @@ extern void dmac_flush_range(const void *, const void *); | |
*/ | |
extern void copy_to_user_page(struct vm_area_struct *, struct page *, | |
unsigned long, void *, const void *, unsigned long); | |
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | |
- do { \ | |
- memcpy(dst, src, len); \ | |
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | |
+ do { \ | |
+ fcse_flush_cache_user_range(vma, vaddr, vaddr + len); \ | |
+ memcpy(dst, src, len); \ | |
} while (0) | |
/* | |
@@ -223,8 +247,11 @@ static inline void __flush_icache_all(void) | |
static inline void vivt_flush_cache_mm(struct mm_struct *mm) | |
{ | |
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | |
+ if (fcse_mm_in_cache(mm)) { | |
+ unsigned seq = fcse_flush_all_start(); | |
__cpuc_flush_user_all(); | |
+ fcse_flush_all_done(seq, 1); | |
+ } | |
} | |
static inline void | |
@@ -232,9 +259,11 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned | |
{ | |
struct mm_struct *mm = vma->vm_mm; | |
- if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | |
- __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | |
- vma->vm_flags); | |
+ if (!mm || fcse_mm_in_cache(mm)) { | |
+ start = fcse_va_to_mva(mm, start & PAGE_MASK); | |
+ end = fcse_va_to_mva(mm, PAGE_ALIGN(end)); | |
+ __cpuc_flush_user_range(start, end, vma->vm_flags); | |
+ } | |
} | |
static inline void | |
@@ -242,8 +271,9 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig | |
{ | |
struct mm_struct *mm = vma->vm_mm; | |
- if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { | |
- unsigned long addr = user_addr & PAGE_MASK; | |
+ if (!mm || fcse_mm_in_cache(mm)) { | |
+ unsigned long addr; | |
+ addr = fcse_va_to_mva(mm, user_addr) & PAGE_MASK; | |
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | |
} | |
} | |
@@ -268,14 +298,22 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr | |
* Harvard caches are synchronised for the user space address range. | |
* This is used for the ARM private sys_cacheflush system call. | |
*/ | |
-#define flush_cache_user_range(start,end) \ | |
- __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) | |
+#define flush_cache_user_range(start, end) \ | |
+ ({ \ | |
+ struct mm_struct *_mm = current->mm; \ | |
+ unsigned long _start, _end; \ | |
+ _start = fcse_va_to_mva(_mm, start) & PAGE_MASK; \ | |
+ _end = PAGE_ALIGN(fcse_va_to_mva(_mm, end)); \ | |
+ __cpuc_coherent_user_range(_start, _end); \ | |
+ }) | |
/* | |
* Perform necessary cache operations to ensure that data previously | |
* stored within this range of addresses can be executed by the CPU. | |
*/ | |
-#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) | |
+#define flush_icache_range(s,e) \ | |
+ __cpuc_coherent_kern_range(fcse_va_to_mva(current->mm, (s)), \ | |
+ fcse_va_to_mva(current->mm, (e))) | |
/* | |
* Perform necessary cache operations to ensure that the TLB will | |
@@ -316,7 +354,8 @@ static inline void flush_anon_page(struct vm_area_struct *vma, | |
extern void __flush_anon_page(struct vm_area_struct *vma, | |
struct page *, unsigned long); | |
if (PageAnon(page)) | |
- __flush_anon_page(vma, page, vmaddr); | |
+ __flush_anon_page(vma, page, | |
+ fcse_va_to_mva(vma->vm_mm, vmaddr)); | |
} | |
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | |
@@ -345,9 +384,11 @@ extern void flush_kernel_dcache_page(struct page *); | |
*/ | |
static inline void flush_cache_vmap(unsigned long start, unsigned long end) | |
{ | |
- if (!cache_is_vipt_nonaliasing()) | |
+ if (!cache_is_vipt_nonaliasing()) { | |
+ unsigned seq = fcse_flush_all_start(); | |
flush_cache_all(); | |
- else | |
+ fcse_flush_all_done(seq, 1); | |
+ } else | |
/* | |
* set_pte_at() called from vmap_pte_range() does not | |
* have a DSB after cleaning the cache line. | |
@@ -357,8 +398,11 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) | |
static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | |
{ | |
- if (!cache_is_vipt_nonaliasing()) | |
+ if (!cache_is_vipt_nonaliasing()) { | |
+ unsigned seq = fcse_flush_all_start(); | |
flush_cache_all(); | |
+ fcse_flush_all_done(seq, 1); | |
+ } | |
} | |
/* | |
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h | |
index 4f009c1..a295e13 100644 | |
--- a/arch/arm/include/asm/cmpxchg.h | |
+++ b/arch/arm/include/asm/cmpxchg.h | |
@@ -63,17 +63,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size | |
#error SMP is not supported on this platform | |
#endif | |
case 1: | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
ret = *(volatile unsigned char *)ptr; | |
*(volatile unsigned char *)ptr = x; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
break; | |
case 4: | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
ret = *(volatile unsigned long *)ptr; | |
*(volatile unsigned long *)ptr = x; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
break; | |
#else | |
case 1: | |
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S | |
index 88d6181..b23d57c 100644 | |
--- a/arch/arm/include/asm/entry-macro-multi.S | |
+++ b/arch/arm/include/asm/entry-macro-multi.S | |
@@ -11,7 +11,11 @@ | |
@ routine called with r0 = irq number, r1 = struct pt_regs * | |
@ | |
adrne lr, BSYM(1b) | |
+#ifdef CONFIG_IPIPE | |
+ bne __ipipe_grab_irq | |
+#else | |
bne asm_do_IRQ | |
+#endif | |
#ifdef CONFIG_SMP | |
/* | |
@@ -24,8 +28,12 @@ | |
ALT_UP_B(9997f) | |
movne r1, sp | |
adrne lr, BSYM(1b) | |
+#ifdef CONFIG_IPIPE | |
+ bne __ipipe_grab_ipi | |
+#else | |
bne do_IPI | |
#endif | |
+#endif | |
9997: | |
.endm | |
diff --git a/arch/arm/include/asm/fcse.h b/arch/arm/include/asm/fcse.h | |
new file mode 100644 | |
index 0000000..af67648 | |
--- /dev/null | |
+++ b/arch/arm/include/asm/fcse.h | |
@@ -0,0 +1,201 @@ | |
+/* | |
+ * arch/arm/include/asm/fcse.h | |
+ * | |
+ * Helper header for using the ARM Fast Context Switch Extension with | |
+ * processors supporting it, lifted from the Fast Address Space | |
+ * Switching (FASS) patch for ARM Linux. | |
+ * | |
+ * Copyright (C) 2001, 2002 Adam Wiggins <[email protected]> | |
+ * Copyright (C) 2007 Sebastian Smolorz <[email protected]> | |
+ * Copyright (C) 2008 Richard Cochran | |
+ * Copyright (C) 2009-2011 Gilles Chanteperdrix <[email protected]> | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License version 2 as | |
+ * published by the Free Software Foundation. | |
+ */ | |
+#ifndef __ASM_ARM_FCSE_H | |
+#define __ASM_ARM_FCSE_H | |
+ | |
+#ifdef CONFIG_ARM_FCSE | |
+ | |
+#include <linux/mm_types.h> /* For struct mm_struct */ | |
+#include <linux/sched.h> | |
+#include <linux/hardirq.h> | |
+ | |
+#include <asm/bitops.h> | |
+#include <asm/cachetype.h> | |
+ | |
+#define FCSE_PID_SHIFT 25 | |
+ | |
+/* Size of PID relocation area */ | |
+#define FCSE_PID_TASK_SIZE (1UL << FCSE_PID_SHIFT) | |
+ | |
+/* Mask to get rid of PID from relocated address */ | |
+#define FCSE_PID_MASK (FCSE_PID_TASK_SIZE - 1) | |
+ | |
+#define FCSE_PID_INVALID (~0 << FCSE_PID_SHIFT) | |
+ | |
+#define FCSE_NR_PIDS (TASK_SIZE / FCSE_PID_TASK_SIZE) | |
+#define FCSE_PID_MAX (FCSE_NR_PIDS - 1) | |
+ | |
+#ifdef CONFIG_ARM_FCSE_DEBUG | |
+#define FCSE_BUG_ON(expr) BUG_ON(expr) | |
+#else /* !CONFIG_ARM_FCSE_DEBUG */ | |
+#define FCSE_BUG_ON(expr) do { } while(0) | |
+#endif /* !CONFIG_ARM_FCSE_DEBUG */ | |
+ | |
+struct vm_unmapped_area_info; | |
+ | |
+extern unsigned long fcse_pids_cache_dirty[]; | |
+ | |
+int fcse_pid_alloc(struct mm_struct *mm); | |
+void fcse_pid_free(struct mm_struct *mm); | |
+unsigned fcse_flush_all_start(void); | |
+void fcse_flush_all_done(unsigned seq, unsigned dirty); | |
+unsigned long | |
+fcse_check_mmap_inner(struct mm_struct *mm, | |
+ struct vm_unmapped_area_info *info, | |
+ unsigned long addr, unsigned long flags); | |
+ | |
+/* Sets the CPU's PID Register */ | |
+static inline void fcse_pid_set(unsigned long pid) | |
+{ | |
+ __asm__ __volatile__ ("mcr p15, 0, %0, c13, c0, 0" | |
+ : /* */: "r" (pid) : "cc", "memory"); | |
+} | |
+ | |
+static inline unsigned long fcse_pid_get(void) | |
+{ | |
+ unsigned long pid; | |
+ __asm__ __volatile__ ("mrc p15, 0, %0, c13, c0, 0" | |
+ : "=r"(pid) : /* */ : "cc", "memory"); | |
+ return pid; | |
+} | |
+ | |
+static inline unsigned long fcse_mva_to_va(unsigned long mva) | |
+{ | |
+ unsigned long va; | |
+ | |
+ if (!cache_is_vivt()) | |
+ return mva; | |
+ | |
+ va = fcse_pid_get() ^ mva; | |
+ return (va & 0xfe000000) ? mva : va; | |
+} | |
+ | |
+static inline unsigned long | |
+fcse_va_to_mva(struct mm_struct *mm, unsigned long va) | |
+{ | |
+ if (cache_is_vivt() && va < FCSE_PID_TASK_SIZE) { | |
+ return mm->context.fcse.pid | va; | |
+ } | |
+ return va; | |
+} | |
+ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+struct fcse_user { | |
+ struct mm_struct *mm; | |
+ unsigned count; | |
+}; | |
+extern struct fcse_user fcse_pids_user[]; | |
+extern struct mm_struct *fcse_large_process; | |
+int fcse_switch_mm_start_inner(struct mm_struct *next); | |
+void fcse_switch_mm_end_inner(struct mm_struct *next); | |
+void fcse_pid_reference(unsigned pid); | |
+ | |
+static inline int fcse_switch_mm_start(struct mm_struct *next) | |
+{ | |
+ if (!cache_is_vivt()) | |
+ return 0; | |
+ | |
+ return fcse_switch_mm_start_inner(next); | |
+} | |
+ | |
+static inline void fcse_switch_mm_end(struct mm_struct *next) | |
+{ | |
+ if (!cache_is_vivt()) | |
+ return; | |
+ | |
+ fcse_switch_mm_end_inner(next); | |
+} | |
+ | |
+static inline int fcse_mm_in_cache(struct mm_struct *mm) | |
+{ | |
+ unsigned fcse_pid = mm->context.fcse.pid >> FCSE_PID_SHIFT; | |
+ int res; | |
+ res = test_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty) | |
+ && fcse_pids_user[fcse_pid].mm == mm; | |
+ return res; | |
+} | |
+#else /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+static inline int fcse_switch_mm_start(struct mm_struct *next) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+static inline void fcse_switch_mm_end(struct mm_struct *next) | |
+{ | |
+ unsigned fcse_pid; | |
+ | |
+ if (!cache_is_vivt()) | |
+ return; | |
+ | |
+ fcse_pid = next->context.fcse.pid >> FCSE_PID_SHIFT; | |
+ set_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty); | |
+ fcse_pid_set(next->context.fcse.pid); | |
+} | |
+ | |
+static inline int fcse_mm_in_cache(struct mm_struct *mm) | |
+{ | |
+ unsigned fcse_pid = mm->context.fcse.pid >> FCSE_PID_SHIFT; | |
+ return test_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty); | |
+} | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ | |
+static inline unsigned long | |
+fcse_check_mmap_addr(struct mm_struct *mm, | |
+ unsigned long addr, unsigned long len, | |
+ struct vm_unmapped_area_info *info, unsigned long flags) | |
+{ | |
+ if ((addr & ~PAGE_MASK) == 0 && addr + len <= FCSE_TASK_SIZE) | |
+ return addr; | |
+ | |
+ return fcse_check_mmap_inner(mm, info, addr, flags); | |
+} | |
+ | |
+static inline void fcse_mark_dirty(struct mm_struct *mm) | |
+{ | |
+ if (cache_is_vivt()) { | |
+ set_bit(FCSE_PID_MAX - (mm->context.fcse.pid >> FCSE_PID_SHIFT), | |
+ fcse_pids_cache_dirty); | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ if (mm->context.fcse.large) | |
+ fcse_large_process = mm; | |
+#endif | |
+ } | |
+} | |
+ | |
+#define fcse() (cache_is_vivt()) | |
+#else /* ! CONFIG_ARM_FCSE */ | |
+#define fcse_switch_mm_start(next) 1 | |
+#define fcse_switch_mm_end(next) do { (void)(next); } while(0) | |
+#define fcse_mva_to_va(mva) (mva) | |
+#define fcse_va_to_mva(mm, x) ({ (void)(mm); (x); }) | |
+#define fcse_mark_dirty(mm) do { (void)(mm); } while(0) | |
+#define fcse_flush_all_start() (0) | |
+#define fcse_flush_all_done(seq, dirty) do { (void)(seq); } while (0) | |
+#define fcse_mm_in_cache(mm) \ | |
+ (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | |
+#define fcse_check_mmap_addr(mm, addr, len, info, flags) (addr) | |
+#define fcse() (0) | |
+#endif /* ! CONFIG_ARM_FCSE */ | |
+ | |
+#ifdef CONFIG_ARM_FCSE_MESSAGES | |
+void fcse_notify_segv(struct mm_struct *mm, | |
+ unsigned long addr, struct pt_regs *regs); | |
+#else /* !FCSE_MESSAGES */ | |
+#define fcse_notify_segv(mm, addr, regs) do { } while(0) | |
+#endif /* !FCSE_MESSAGES */ | |
+ | |
+#endif /* __ASM_ARM_FCSE_H */ | |
diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h | |
index bb28af7..780ca50 100644 | |
--- a/arch/arm/include/asm/hardware/timer-sp.h | |
+++ b/arch/arm/include/asm/hardware/timer-sp.h | |
@@ -1,19 +1,22 @@ | |
struct clk; | |
void __sp804_clocksource_and_sched_clock_init(void __iomem *, | |
+ unsigned long, | |
const char *, struct clk *, int); | |
void __sp804_clockevents_init(void __iomem *, unsigned int, | |
struct clk *, const char *); | |
-static inline void sp804_clocksource_init(void __iomem *base, const char *name) | |
+static inline void sp804_clocksource_init(void __iomem *base, | |
+ unsigned long phys, const char *name) | |
{ | |
- __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); | |
+ __sp804_clocksource_and_sched_clock_init(base, phys, name, NULL, 0); | |
} | |
static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, | |
+ unsigned long phys, | |
const char *name) | |
{ | |
- __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); | |
+ __sp804_clocksource_and_sched_clock_init(base, phys, name, NULL, 1); | |
} | |
static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) | |
diff --git a/arch/arm/include/asm/ipipe.h b/arch/arm/include/asm/ipipe.h | |
new file mode 100644 | |
index 0000000..7c156bf | |
--- /dev/null | |
+++ b/arch/arm/include/asm/ipipe.h | |
@@ -0,0 +1,268 @@ | |
+/* -*- linux-c -*- | |
+ * arch/arm/include/asm/ipipe.h | |
+ * | |
+ * Copyright (C) 2002-2005 Philippe Gerum. | |
+ * Copyright (C) 2005 Stelian Pop. | |
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix. | |
+ * Copyright (C) 2010 Philippe Gerum (SMP port). | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __ARM_IPIPE_H | |
+#define __ARM_IPIPE_H | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+#define BROKEN_BUILTIN_RETURN_ADDRESS | |
+#undef __BUILTIN_RETURN_ADDRESS0 | |
+#undef __BUILTIN_RETURN_ADDRESS1 | |
+#ifdef CONFIG_FRAME_POINTER | |
+#define __BUILTIN_RETURN_ADDRESS0 arm_return_addr(0) | |
+#define __BUILTIN_RETURN_ADDRESS1 arm_return_addr(1) | |
+extern unsigned long arm_return_addr(int level); | |
+#else | |
+#define __BUILTIN_RETURN_ADDRESS0 ((unsigned long)__builtin_return_address(0)) | |
+#define __BUILTIN_RETURN_ADDRESS1 (0) | |
+#endif | |
+ | |
+#include <linux/ipipe_trace.h> | |
+ | |
+#define IPIPE_CORE_RELEASE 1 | |
+ | |
+struct ipipe_domain; | |
+ | |
+#define IPIPE_TSC_TYPE_NONE 0 | |
+#define IPIPE_TSC_TYPE_FREERUNNING 1 | |
+#define IPIPE_TSC_TYPE_DECREMENTER 2 | |
+#define IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN 3 | |
+#define IPIPE_TSC_TYPE_FREERUNNING_TWICE 4 | |
+#define IPIPE_TSC_TYPE_FREERUNNING_ARCH 5 | |
+ | |
+/* tscinfo, exported to user-space */ | |
+struct __ipipe_tscinfo { | |
+ unsigned type; | |
+ unsigned freq; | |
+ unsigned long counter_vaddr; | |
+ union { | |
+ struct { | |
+ unsigned long counter_paddr; | |
+ unsigned long long mask; | |
+ }; | |
+ struct { | |
+ unsigned *counter; /* Hw counter physical address */ | |
+ unsigned long long mask; /* Significant bits in the hw counter. */ | |
+ unsigned long long *tsc; /* 64 bits tsc value. */ | |
+ } fr; | |
+ struct { | |
+ unsigned *counter; /* Hw counter physical address */ | |
+ unsigned long long mask; /* Significant bits in the hw counter. */ | |
+ unsigned *last_cnt; /* Counter value when updating | |
+ tsc value. */ | |
+ unsigned long long *tsc; /* 64 bits tsc value. */ | |
+ } dec; | |
+ } u; | |
+}; | |
+ | |
+struct ipipe_arch_sysinfo { | |
+ struct __ipipe_tscinfo tsc; | |
+}; | |
+ | |
+ | |
+/* arch specific stuff */ | |
+extern char __ipipe_tsc_area[]; | |
+void __ipipe_mach_get_tscinfo(struct __ipipe_tscinfo *info); | |
+ | |
+#ifdef CONFIG_IPIPE_ARM_KUSER_TSC | |
+unsigned long long __ipipe_tsc_get(void) __attribute__((long_call)); | |
+void __ipipe_tsc_register(struct __ipipe_tscinfo *info); | |
+void __ipipe_tsc_update(void); | |
+extern unsigned long __ipipe_kuser_tsc_freq; | |
+#define __ipipe_hrclock_freq __ipipe_kuser_tsc_freq | |
+#else /* ! generic tsc */ | |
+unsigned long long __ipipe_mach_get_tsc(void); | |
+#define __ipipe_tsc_get() __ipipe_mach_get_tsc() | |
+#ifndef __ipipe_hrclock_freq | |
+#define __ipipe_hrclock_freq __ipipe_hrtimer_freq | |
+#endif /* !__ipipe_mach_hrclock_freq */ | |
+#endif /* ! generic tsc */ | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+extern void (*__ipipe_mach_hrtimer_debug)(unsigned irq); | |
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */ | |
+ | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ | |
+#define ipipe_mm_switch_protect(flags) \ | |
+ do { \ | |
+ (void)(flags); \ | |
+ } while(0) | |
+ | |
+#define ipipe_mm_switch_unprotect(flags) \ | |
+ do { \ | |
+ (void)(flags); \ | |
+ } while(0) | |
+ | |
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
+ | |
+#define ipipe_mm_switch_protect(flags) \ | |
+ flags = hard_cond_local_irq_save() | |
+ | |
+#define ipipe_mm_switch_unprotect(flags) \ | |
+ hard_cond_local_irq_restore(flags) | |
+ | |
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
+ | |
+#define ipipe_get_active_mm() (__this_cpu_read(ipipe_percpu.active_mm)) | |
+ | |
+#define ipipe_read_tsc(t) do { t = __ipipe_tsc_get(); } while(0) | |
+#define __ipipe_read_timebase() __ipipe_tsc_get() | |
+ | |
+#define ipipe_tsc2ns(t) \ | |
+({ \ | |
+ unsigned long long delta = (t)*1000; \ | |
+ do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \ | |
+ (unsigned long)delta; \ | |
+}) | |
+#define ipipe_tsc2us(t) \ | |
+({ \ | |
+ unsigned long long delta = (t); \ | |
+ do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \ | |
+ (unsigned long)delta; \ | |
+}) | |
+ | |
+static inline const char *ipipe_clock_name(void) | |
+{ | |
+ return "ipipe_tsc"; | |
+} | |
+ | |
+/* Private interface -- Internal use only */ | |
+ | |
+#define __ipipe_enable_irq(irq) enable_irq(irq) | |
+#define __ipipe_disable_irq(irq) disable_irq(irq) | |
+ | |
+/* PIC muting */ | |
+struct ipipe_mach_pic_muter { | |
+ void (*enable_irqdesc)(struct ipipe_domain *ipd, unsigned irq); | |
+ void (*disable_irqdesc)(struct ipipe_domain *ipd, unsigned irq); | |
+ void (*mute)(void); | |
+ void (*unmute)(void); | |
+}; | |
+ | |
+extern struct ipipe_mach_pic_muter ipipe_pic_muter; | |
+ | |
+void ipipe_pic_muter_register(struct ipipe_mach_pic_muter *muter); | |
+ | |
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq); | |
+ | |
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq); | |
+ | |
+static inline void ipipe_mute_pic(void) | |
+{ | |
+ if (ipipe_pic_muter.mute) | |
+ ipipe_pic_muter.mute(); | |
+} | |
+ | |
+static inline void ipipe_unmute_pic(void) | |
+{ | |
+ if (ipipe_pic_muter.unmute) | |
+ ipipe_pic_muter.unmute(); | |
+} | |
+ | |
+#define ipipe_notify_root_preemption() do { } while(0) | |
+ | |
+#ifdef CONFIG_SMP | |
+void __ipipe_early_core_setup(void); | |
+void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd); | |
+void __ipipe_root_localtimer(unsigned int irq, void *cookie); | |
+void __ipipe_send_vnmi(void (*fn)(void *), cpumask_t cpumask, void *arg); | |
+void __ipipe_do_vnmi(unsigned int irq, void *cookie); | |
+void __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs); | |
+void __ipipe_ipis_alloc(void); | |
+void __ipipe_ipis_request(void); | |
+ | |
+static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs) | |
+{ | |
+ __ipipe_grab_ipi(irq, regs); | |
+} | |
+#else /* !CONFIG_SMP */ | |
+#define __ipipe_early_core_setup() do { } while(0) | |
+#define __ipipe_hook_critical_ipi(ipd) do { } while(0) | |
+#endif /* !CONFIG_SMP */ | |
+#ifndef __ipipe_mach_init_platform | |
+#define __ipipe_mach_init_platform() do { } while(0) | |
+#endif | |
+ | |
+void __ipipe_enable_pipeline(void); | |
+ | |
+void __ipipe_do_critical_sync(unsigned irq, void *cookie); | |
+ | |
+void __ipipe_grab_irq(int irq, struct pt_regs *regs); | |
+ | |
+void __ipipe_exit_irq(struct pt_regs *regs); | |
+ | |
+static inline void ipipe_handle_multi_irq(int irq, struct pt_regs *regs) | |
+{ | |
+ __ipipe_grab_irq(irq, regs); | |
+} | |
+ | |
+static inline unsigned long __ipipe_ffnz(unsigned long ul) | |
+{ | |
+ return ffs(ul) - 1; | |
+} | |
+ | |
+#define __ipipe_syscall_watched_p(p, sc) \ | |
+ (ipipe_notifier_enabled_p(p) || (unsigned long)sc >= __ARM_NR_BASE + 64) | |
+ | |
+#define __ipipe_root_tick_p(regs) (!arch_irqs_disabled_flags(regs->ARM_cpsr)) | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+#define __ipipe_tsc_update() do { } while(0) | |
+ | |
+#define hard_smp_processor_id() smp_processor_id() | |
+ | |
+#define ipipe_mm_switch_protect(flags) \ | |
+ do { \ | |
+ (void) (flags); \ | |
+ } while(0) | |
+ | |
+#define ipipe_mm_switch_unprotect(flags) \ | |
+ do { \ | |
+ (void) (flags); \ | |
+ } while(0) | |
+ | |
+static inline void ipipe_handle_multi_irq(int irq, struct pt_regs *regs) | |
+{ | |
+ handle_IRQ(irq, regs); | |
+} | |
+ | |
+#ifdef CONFIG_SMP | |
+static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs) | |
+{ | |
+ handle_IPI(irq, regs); | |
+} | |
+#endif /* CONFIG_SMP */ | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+#if defined (CONFIG_IPIPE_DEBUG) && \ | |
+ (defined(CONFIG_DEBUG_LL) || defined(CONFIG_SERIAL_8250_CONSOLE)) | |
+void __ipipe_serial_debug(const char *fmt, ...); | |
+#else | |
+#define __ipipe_serial_debug(fmt, args...) do { } while (0) | |
+#endif | |
+ | |
+#endif /* !__ARM_IPIPE_H */ | |
diff --git a/arch/arm/include/asm/ipipe_base.h b/arch/arm/include/asm/ipipe_base.h | |
new file mode 100644 | |
index 0000000..4a50ccd | |
--- /dev/null | |
+++ b/arch/arm/include/asm/ipipe_base.h | |
@@ -0,0 +1,147 @@ | |
+/* -*- linux-c -*- | |
+ * arch/arm/include/asm/ipipe_base.h | |
+ * | |
+ * Copyright (C) 2007 Gilles Chanteperdrix. | |
+ * Copyright (C) 2010 Philippe Gerum (SMP port). | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __ASM_ARM_IPIPE_BASE_H | |
+#define __ASM_ARM_IPIPE_BASE_H | |
+ | |
+#include <asm/irq.h> /* For NR_IRQS */ | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+#define IPIPE_NR_ROOT_IRQS 1024 | |
+ | |
+#define IPIPE_NR_XIRQS IPIPE_NR_ROOT_IRQS | |
+ | |
+#ifdef CONFIG_SMP | |
+ | |
+extern unsigned __ipipe_first_ipi; | |
+ | |
+#define IPIPE_CRITICAL_IPI __ipipe_first_ipi | |
+#define IPIPE_HRTIMER_IPI (IPIPE_CRITICAL_IPI + 1) | |
+#define IPIPE_RESCHEDULE_IPI (IPIPE_CRITICAL_IPI + 2) | |
+#define IPIPE_SERVICE_VNMI (IPIPE_CRITICAL_IPI + 3) | |
+ | |
+#define IPIPE_LAST_IPI IPIPE_SERVICE_VNMI | |
+ | |
+#define hard_smp_processor_id() \ | |
+ ({ \ | |
+ unsigned int cpunum; \ | |
+ __asm__ __volatile__ ("\n" \ | |
+ "1: mrc p15, 0, %0, c0, c0, 5\n" \ | |
+ " .pushsection \".alt.smp.init\", \"a\"\n" \ | |
+ " .long 1b\n" \ | |
+ " mov %0, #0\n" \ | |
+ " .popsection" \ | |
+ : "=r" (cpunum)); \ | |
+ cpunum &= 0xFF; \ | |
+ }) | |
+ | |
+extern u32 __cpu_logical_map[]; | |
+#define ipipe_processor_id() (__cpu_logical_map[hard_smp_processor_id()]) | |
+ | |
+#define IPIPE_ARCH_HAVE_VIRQ_IPI | |
+ | |
+#else /* !CONFIG_SMP */ | |
+#define ipipe_processor_id() (0) | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
+/* ARM traps */ | |
+#define IPIPE_TRAP_ACCESS 0 /* Data or instruction access exception */ | |
+#define IPIPE_TRAP_SECTION 1 /* Section fault */ | |
+#define IPIPE_TRAP_DABT 2 /* Generic data abort */ | |
+#define IPIPE_TRAP_UNKNOWN 3 /* Unknown exception */ | |
+#define IPIPE_TRAP_BREAK 4 /* Instruction breakpoint */ | |
+#define IPIPE_TRAP_FPU 5 /* Floating point exception */ | |
+#define IPIPE_TRAP_VFP 6 /* VFP floating point exception */ | |
+#define IPIPE_TRAP_UNDEFINSTR 7 /* Undefined instruction */ | |
+#define IPIPE_TRAP_ALIGNMENT 8 /* Unaligned access exception */ | |
+#define IPIPE_TRAP_MAYDAY 9 /* Internal recovery trap */ | |
+#define IPIPE_NR_FAULTS 10 | |
+ | |
+#ifndef __ASSEMBLY__ | |
+ | |
+#ifdef CONFIG_SMP | |
+ | |
+void ipipe_stall_root(void); | |
+ | |
+unsigned long ipipe_test_and_stall_root(void); | |
+ | |
+unsigned long ipipe_test_root(void); | |
+ | |
+#else /* !CONFIG_SMP */ | |
+ | |
+#include <asm/irqflags.h> | |
+ | |
+#if __GNUC__ >= 4 | |
+/* Alias to ipipe_root_cpudom_var(status) */ | |
+extern unsigned long __ipipe_root_status; | |
+#else | |
+extern unsigned long *const __ipipe_root_status_addr; | |
+#define __ipipe_root_status (*__ipipe_root_status_addr) | |
+#endif | |
+ | |
+static inline void ipipe_stall_root(void) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ __ipipe_root_status |= 1; | |
+ hard_local_irq_restore(flags); | |
+} | |
+ | |
+static inline unsigned ipipe_test_root(void) | |
+{ | |
+ return __ipipe_root_status & 1; | |
+} | |
+ | |
+static inline unsigned ipipe_test_and_stall_root(void) | |
+{ | |
+ unsigned long flags, res; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ res = __ipipe_root_status; | |
+ __ipipe_root_status = res | 1; | |
+ hard_local_irq_restore(flags); | |
+ | |
+ return res & 1; | |
+} | |
+ | |
+#endif /* !CONFIG_SMP */ | |
+ | |
+#endif /* !__ASSEMBLY__ */ | |
+ | |
+#ifdef CONFIG_IPIPE_LEGACY | |
+#define __IPIPE_FEATURE_PREEMPTIBLE_SWITCH 1 | |
+#define __IPIPE_FEATURE_SYSINFO_V2 1 | |
+ | |
+#ifdef CONFIG_VFP | |
+#define __IPIPE_FEATURE_VFP_SAFE 1 | |
+#endif | |
+ | |
+#ifdef CONFIG_IPIPE_ARM_KUSER_TSC | |
+#define __IPIPE_FEATURE_KUSER_TSC 1 | |
+#endif | |
+#endif /* CONFIG_IPIPE_LEGACY */ | |
+ | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+#endif /* __ASM_ARM_IPIPE_BASE_H */ | |
diff --git a/arch/arm/include/asm/ipipe_hwirq.h b/arch/arm/include/asm/ipipe_hwirq.h | |
new file mode 100644 | |
index 0000000..6b864aa | |
--- /dev/null | |
+++ b/arch/arm/include/asm/ipipe_hwirq.h | |
@@ -0,0 +1,269 @@ | |
+/* -*- linux-c -*- | |
+ * arch/arm/include/asm/ipipe_hwirq.h | |
+ * | |
+ * Copyright (C) 2002-2005 Philippe Gerum. | |
+ * Copyright (C) 2005 Stelian Pop. | |
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix. | |
+ * Copyright (C) 2010 Philippe Gerum (SMP port). | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef _ASM_ARM_IPIPE_HWIRQ_H | |
+#define _ASM_ARM_IPIPE_HWIRQ_H | |
+ | |
+#define hard_local_irq_restore_notrace(x) \ | |
+ __asm__ __volatile__( \ | |
+ "msr cpsr_c, %0 @ hard_local_irq_restore\n" \ | |
+ : \ | |
+ : "r" (x) \ | |
+ : "memory", "cc") | |
+ | |
+static inline void hard_local_irq_disable_notrace(void) | |
+{ | |
+#if __LINUX_ARM_ARCH__ >= 6 | |
+ __asm__("cpsid i @ __cli" : : : "memory", "cc"); | |
+#else /* linux arch <= 5 */ | |
+ unsigned long temp; | |
+ __asm__ __volatile__( | |
+ "mrs %0, cpsr @ hard_local_irq_disable\n" | |
+ "orr %0, %0, #128\n" | |
+ "msr cpsr_c, %0" | |
+ : "=r" (temp) | |
+ : | |
+ : "memory", "cc"); | |
+#endif /* linux arch <= 5 */ | |
+} | |
+ | |
+static inline void hard_local_irq_enable_notrace(void) | |
+{ | |
+#if __LINUX_ARM_ARCH__ >= 6 | |
+ __asm__("cpsie i @ __sti" : : : "memory", "cc"); | |
+#else /* linux arch <= 5 */ | |
+ unsigned long temp; | |
+ __asm__ __volatile__( | |
+ "mrs %0, cpsr @ hard_local_irq_enable\n" | |
+ "bic %0, %0, #128\n" | |
+ "msr cpsr_c, %0" | |
+ : "=r" (temp) | |
+ : | |
+ : "memory", "cc"); | |
+#endif /* linux arch <= 5 */ | |
+} | |
+ | |
+static inline void hard_local_fiq_disable_notrace(void) | |
+{ | |
+#if __LINUX_ARM_ARCH__ >= 6 | |
+ __asm__("cpsid f @ __clf" : : : "memory", "cc"); | |
+#else /* linux arch <= 5 */ | |
+ unsigned long temp; | |
+ __asm__ __volatile__( | |
+ "mrs %0, cpsr @ clf\n" | |
+ "orr %0, %0, #64\n" | |
+ "msr cpsr_c, %0" | |
+ : "=r" (temp) | |
+ : | |
+ : "memory", "cc"); | |
+#endif /* linux arch <= 5 */ | |
+} | |
+ | |
+static inline void hard_local_fiq_enable_notrace(void) | |
+{ | |
+#if __LINUX_ARM_ARCH__ >= 6 | |
+ __asm__("cpsie f @ __stf" : : : "memory", "cc"); | |
+#else /* linux arch <= 5 */ | |
+ unsigned long temp; | |
+ __asm__ __volatile__( | |
+ "mrs %0, cpsr @ stf\n" | |
+ "bic %0, %0, #64\n" | |
+ "msr cpsr_c, %0" | |
+ : "=r" (temp) | |
+ : | |
+ : "memory", "cc"); | |
+#endif /* linux arch <= 5 */ | |
+} | |
+ | |
+static inline unsigned long hard_local_irq_save_notrace(void) | |
+{ | |
+ unsigned long res; | |
+#if __LINUX_ARM_ARCH__ >= 6 | |
+ __asm__ __volatile__( | |
+ "mrs %0, cpsr @ hard_local_irq_save\n" | |
+ "cpsid i" | |
+ : "=r" (res) : : "memory", "cc"); | |
+#else /* linux arch <= 5 */ | |
+ unsigned long temp; | |
+ __asm__ __volatile__( | |
+ "mrs %0, cpsr @ hard_local_irq_save\n" | |
+ "orr %1, %0, #128\n" | |
+ "msr cpsr_c, %1" | |
+ : "=r" (res), "=r" (temp) | |
+ : | |
+ : "memory", "cc"); | |
+#endif /* linux arch <= 5 */ | |
+ return res; | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+#include <linux/ipipe_trace.h> | |
+ | |
+static inline int arch_irqs_disabled_flags(unsigned long flags) | |
+{ | |
+ return (int)((flags) & PSR_I_BIT); | |
+} | |
+ | |
+static inline unsigned long hard_local_save_flags(void) | |
+{ | |
+ unsigned long flags; | |
+ __asm__ __volatile__( | |
+ "mrs %0, cpsr @ hard_local_save_flags" | |
+ : "=r" (flags) : : "memory", "cc"); | |
+ return flags; | |
+} | |
+ | |
+#define hard_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags) | |
+ | |
+static inline int hard_irqs_disabled(void) | |
+{ | |
+ return hard_irqs_disabled_flags(hard_local_save_flags()); | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ | |
+static inline void hard_local_irq_disable(void) | |
+{ | |
+ if (!hard_irqs_disabled()) { | |
+ hard_local_irq_disable_notrace(); | |
+ ipipe_trace_begin(0x80000000); | |
+ } | |
+} | |
+ | |
+static inline void hard_local_irq_enable(void) | |
+{ | |
+ if (hard_irqs_disabled()) { | |
+ ipipe_trace_end(0x80000000); | |
+ hard_local_irq_enable_notrace(); | |
+ } | |
+} | |
+ | |
+static inline unsigned long hard_local_irq_save(void) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_local_irq_save_notrace(); | |
+ if (!arch_irqs_disabled_flags(flags)) | |
+ ipipe_trace_begin(0x80000001); | |
+ | |
+ return flags; | |
+} | |
+ | |
+static inline void hard_local_irq_restore(unsigned long x) | |
+{ | |
+ if (!arch_irqs_disabled_flags(x)) | |
+ ipipe_trace_end(0x80000001); | |
+ | |
+ hard_local_irq_restore_notrace(x); | |
+} | |
+ | |
+#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */ | |
+ | |
+#define hard_local_irq_disable hard_local_irq_disable_notrace | |
+#define hard_local_irq_enable hard_local_irq_enable_notrace | |
+#define hard_local_irq_save hard_local_irq_save_notrace | |
+#define hard_local_irq_restore hard_local_irq_restore_notrace | |
+ | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
+ | |
+#define arch_local_irq_disable() \ | |
+ ({ \ | |
+ ipipe_stall_root(); \ | |
+ barrier(); \ | |
+ }) | |
+ | |
+#define arch_local_irq_enable() \ | |
+ do { \ | |
+ barrier(); \ | |
+ ipipe_unstall_root(); \ | |
+ } while (0) | |
+ | |
+#define local_fiq_enable() ipipe_unstall_root() | |
+ | |
+#define local_fiq_disable() ipipe_stall_root() | |
+ | |
+#define arch_local_irq_restore(flags) \ | |
+ do { \ | |
+ if (!arch_irqs_disabled_flags(flags)) \ | |
+ arch_local_irq_enable(); \ | |
+ } while (0) | |
+ | |
+#define arch_local_irq_save() \ | |
+ ({ \ | |
+ unsigned long _flags; \ | |
+ _flags = ipipe_test_and_stall_root() << 7; \ | |
+ barrier(); \ | |
+ _flags; \ | |
+ }) | |
+ | |
+#define arch_local_save_flags() \ | |
+ ({ \ | |
+ unsigned long _flags; \ | |
+ _flags = ipipe_test_root() << 7; \ | |
+ barrier(); \ | |
+ _flags; \ | |
+ }) | |
+ | |
+#define arch_irqs_disabled() ipipe_test_root() | |
+#define hard_irq_disable() hard_local_irq_disable() | |
+ | |
+static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real) | |
+{ | |
+ /* Merge virtual and real interrupt mask bits into a single | |
+ 32bit word. */ | |
+ return (real & ~(1L << 8)) | ((virt != 0) << 8); | |
+} | |
+ | |
+static inline int arch_demangle_irq_bits(unsigned long *x) | |
+{ | |
+ int virt = (*x & (1 << 8)) != 0; | |
+ *x &= ~(1L << 8); | |
+ return virt; | |
+} | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+#define hard_local_irq_save() arch_local_irq_save() | |
+#define hard_local_irq_restore(x) arch_local_irq_restore(x) | |
+#define hard_local_irq_enable() arch_local_irq_enable() | |
+#define hard_local_irq_disable() arch_local_irq_disable() | |
+#define hard_irqs_disabled() irqs_disabled() | |
+ | |
+#define hard_cond_local_irq_enable() do { } while(0) | |
+#define hard_cond_local_irq_disable() do { } while(0) | |
+#define hard_cond_local_irq_save() 0 | |
+#define hard_cond_local_irq_restore(flags) do { (void)(flags); } while(0) | |
+ | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
+#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE) | |
+#define hard_smp_local_irq_save() hard_local_irq_save() | |
+#define hard_smp_local_irq_restore(flags) hard_local_irq_restore(flags) | |
+#else /* !CONFIG_SMP */ | |
+#define hard_smp_local_irq_save() 0 | |
+#define hard_smp_local_irq_restore(flags) do { (void)(flags); } while(0) | |
+#endif /* CONFIG_SMP */ | |
+ | |
+#endif /* _ASM_ARM_IPIPE_HWIRQ_H */ | |
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h | |
index 53c15de..28fc521 100644 | |
--- a/arch/arm/include/asm/irq.h | |
+++ b/arch/arm/include/asm/irq.h | |
@@ -6,9 +6,14 @@ | |
#ifndef CONFIG_SPARSE_IRQ | |
#include <mach/irqs.h> | |
#else | |
+#if !defined(CONFIG_IPIPE) || defined(CONFIG_IRQ_DOMAIN) | |
#define NR_IRQS NR_IRQS_LEGACY | |
+#else | |
+#define NR_IRQS 512 | |
+#endif | |
#endif | |
+ | |
#ifndef irq_canonicalize | |
#define irq_canonicalize(i) (i) | |
#endif | |
@@ -38,4 +43,3 @@ extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); | |
#endif | |
#endif | |
- | |
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h | |
index 1e6cca5..c7470ff 100644 | |
--- a/arch/arm/include/asm/irqflags.h | |
+++ b/arch/arm/include/asm/irqflags.h | |
@@ -5,6 +5,10 @@ | |
#include <asm/ptrace.h> | |
+#include <asm/ipipe_hwirq.h> | |
+ | |
+#ifndef CONFIG_IPIPE | |
+ | |
/* | |
* CPU interrupt mask handling. | |
*/ | |
@@ -151,5 +155,6 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) | |
return flags & PSR_I_BIT; | |
} | |
+#endif /* CONFIG_IPIPE */ | |
#endif | |
#endif | |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h | |
index 57870ab..b1200d8 100644 | |
--- a/arch/arm/include/asm/memory.h | |
+++ b/arch/arm/include/asm/memory.h | |
@@ -37,7 +37,12 @@ | |
*/ | |
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) | |
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) | |
+#ifndef CONFIG_ARM_FCSE | |
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) | |
+#else /* CONFIG_ARM_FCSE */ | |
+#define TASK_UNMAPPED_BASE UL(0x00800000) | |
+#endif /* CONFIG_ARM_FCSE */ | |
+#define FCSE_TASK_SIZE UL(0x02000000) | |
/* | |
* The maximum size of a 26-bit user space task. | |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h | |
index 6f18da0..817744a 100644 | |
--- a/arch/arm/include/asm/mmu.h | |
+++ b/arch/arm/include/asm/mmu.h | |
@@ -9,6 +9,17 @@ typedef struct { | |
#else | |
int switch_pending; | |
#endif | |
+#ifdef CONFIG_ARM_FCSE | |
+ struct { | |
+ unsigned long pid; | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ unsigned shared_dirty_pages; | |
+ unsigned large : 1; | |
+ unsigned high_pages; | |
+ unsigned long highest_pid; | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+ } fcse; | |
+#endif /* CONFIG_ARM_FCSE */ | |
unsigned int vmalloc_seq; | |
unsigned long sigpage; | |
} mm_context_t; | |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h | |
index e0b10f1..c467074 100644 | |
--- a/arch/arm/include/asm/mmu_context.h | |
+++ b/arch/arm/include/asm/mmu_context.h | |
@@ -19,12 +19,14 @@ | |
#include <asm/cachetype.h> | |
#include <asm/proc-fns.h> | |
#include <asm-generic/mm_hooks.h> | |
+#include <asm/fcse.h> | |
void __check_vmalloc_seq(struct mm_struct *mm); | |
#ifdef CONFIG_CPU_HAS_ASID | |
-void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); | |
+int check_and_switch_context(struct mm_struct *mm, | |
+ struct task_struct *tsk, bool may_defer); | |
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) | |
#ifdef CONFIG_ARM_ERRATA_798181 | |
@@ -41,13 +43,14 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | |
#ifdef CONFIG_MMU | |
-static inline void check_and_switch_context(struct mm_struct *mm, | |
- struct task_struct *tsk) | |
+static inline int | |
+check_and_switch_context(struct mm_struct *mm, | |
+ struct task_struct *tsk, bool may_defer) | |
{ | |
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) | |
__check_vmalloc_seq(mm); | |
- if (irqs_disabled()) | |
+ if (may_defer && irqs_disabled()) { | |
/* | |
* cpu_switch_mm() needs to flush the VIVT caches. To avoid | |
* high interrupt latencies, defer the call and continue | |
@@ -56,16 +59,29 @@ static inline void check_and_switch_context(struct mm_struct *mm, | |
* finish_arch_post_lock_switch() call. | |
*/ | |
mm->context.switch_pending = 1; | |
- else | |
- cpu_switch_mm(mm->pgd, mm); | |
+ return -EAGAIN; | |
+ } else { | |
+ cpu_switch_mm(mm->pgd, mm, fcse_switch_mm_start(mm)); | |
+ } | |
+ | |
+ return 0; | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE | |
+extern void deferred_switch_mm(struct mm_struct *mm); | |
+#else /* !I-pipe */ | |
+static inline void deferred_switch_mm(struct mm_struct *next) | |
+{ | |
+ cpu_switch_mm(next->pgd, next, fcse_switch_mm_start(next)); | |
+ fcse_switch_mm_end(next); | |
} | |
+#endif /* !I-pipe */ | |
#define finish_arch_post_lock_switch \ | |
finish_arch_post_lock_switch | |
static inline void finish_arch_post_lock_switch(void) | |
{ | |
struct mm_struct *mm = current->mm; | |
- | |
if (mm && mm->context.switch_pending) { | |
/* | |
* Preemption must be disabled during cpu_switch_mm() as we | |
@@ -73,23 +89,60 @@ static inline void finish_arch_post_lock_switch(void) | |
* switch_pending again in case we were preempted and the | |
* switch to this mm was already done. | |
*/ | |
+ unsigned long flags; | |
preempt_disable(); | |
+ ipipe_mm_switch_protect(flags); | |
if (mm->context.switch_pending) { | |
mm->context.switch_pending = 0; | |
- cpu_switch_mm(mm->pgd, mm); | |
+ deferred_switch_mm(current->mm); | |
} | |
+ ipipe_mm_switch_unprotect(flags); | |
preempt_enable_no_resched(); | |
} | |
} | |
- | |
#endif /* CONFIG_MMU */ | |
-#define init_new_context(tsk,mm) 0 | |
+static inline int | |
+init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
+{ | |
+#ifdef CONFIG_ARM_FCSE | |
+ int fcse_pid; | |
-#endif /* CONFIG_CPU_HAS_ASID */ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ if (!mm->context.fcse.large) { | |
+ fcse_pid = fcse_pid_alloc(mm); | |
+ mm->context.fcse.pid = fcse_pid << FCSE_PID_SHIFT; | |
+ } else { | |
+ /* We are normally forking a process vith a virtual address | |
+ space larger than 32 MB, so its pid should be 0. */ | |
+ FCSE_BUG_ON(mm->context.fcse.pid); | |
+ fcse_pid_reference(0); | |
+ } | |
+ /* If we are forking, set_pte_at will restore the correct high pages | |
+ count, and shared writable pages are write-protected again. */ | |
+ mm->context.fcse.high_pages = 0; | |
+ mm->context.fcse.highest_pid = 0; | |
+ mm->context.fcse.shared_dirty_pages = 0; | |
+#else /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ fcse_pid = fcse_pid_alloc(mm); | |
+ if (fcse_pid < 0) { | |
+ /* | |
+ * Set mm pid to FCSE_PID_INVALID, as even when | |
+ * init_new_context fails, destroy_context is called. | |
+ */ | |
+ mm->context.fcse.pid = FCSE_PID_INVALID; | |
+ return fcse_pid; | |
+ } | |
+ mm->context.fcse.pid = fcse_pid << FCSE_PID_SHIFT; | |
+ | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ FCSE_BUG_ON(fcse_mm_in_cache(mm)); | |
+#endif /* CONFIG_ARM_FCSE */ | |
+ | |
+ return 0; | |
+} | |
-#define destroy_context(mm) do { } while(0) | |
-#define activate_mm(prev,next) switch_mm(prev, next, NULL) | |
+#endif /* !CONFIG_CPU_HAS_ASID */ | |
/* | |
* This is called when "tsk" is about to enter lazy TLB mode. | |
@@ -111,12 +164,12 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
* calling the CPU specific function when the mm hasn't | |
* actually changed. | |
*/ | |
-static inline void | |
-switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
- struct task_struct *tsk) | |
+static inline int | |
+__do_switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
+ struct task_struct *tsk, bool may_defer) | |
{ | |
#ifdef CONFIG_MMU | |
- unsigned int cpu = smp_processor_id(); | |
+ const unsigned int cpu = ipipe_processor_id(); | |
#ifdef CONFIG_SMP | |
/* check for possible thread migration */ | |
@@ -125,13 +178,82 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
__flush_icache_all(); | |
#endif | |
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { | |
- check_and_switch_context(next, tsk); | |
- if (cache_is_vivt()) | |
+ int rc = check_and_switch_context(next, tsk, may_defer); | |
+#ifdef CONFIG_IPIPE | |
+ if (rc < 0) { | |
+ cpumask_clear_cpu(cpu, mm_cpumask(next)); | |
+ return rc; | |
+ } | |
+#ifdef CONFIG_ARM_FCSE | |
+ if (tsk) | |
+ set_tsk_thread_flag(tsk, TIF_SWITCHED); | |
+#endif /* CONFIG_ARM_FCSE */ | |
+#else /* !CONFIG_IPIPE */ | |
+ (void)rc; | |
+#endif /* CONFIG_IPIPE */ | |
+ if (cache_is_vivt() && prev) | |
cpumask_clear_cpu(cpu, mm_cpumask(prev)); | |
- } | |
-#endif | |
+ } else | |
+ fcse_mark_dirty(next); | |
+#endif /* CONFIG_MMU */ | |
+ return 0; | |
+} | |
+ | |
+#if defined(CONFIG_IPIPE) && defined(CONFIG_MMU) | |
+extern void __switch_mm_inner(struct mm_struct *prev, struct mm_struct *next, | |
+ struct task_struct *tsk); | |
+#else /* !I-pipe || !MMU */ | |
+#define __switch_mm_inner(prev, next, tsk) __do_switch_mm(prev, next, tsk, true) | |
+#endif /* !I-pipe || !MMU */ | |
+ | |
+static inline void | |
+ipipe_switch_mm_head(struct mm_struct *prev, struct mm_struct *next, | |
+ struct task_struct *tsk) | |
+{ | |
+ __do_switch_mm(prev, next, tsk, false); | |
+ fcse_switch_mm_end(next); | |
+} | |
+ | |
+static inline void | |
+__switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
+ struct task_struct *tsk) | |
+{ | |
+ __switch_mm_inner(prev, next, tsk); | |
+} | |
+ | |
+static inline void | |
+switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
+ struct task_struct *tsk) | |
+{ | |
+#ifdef CONFIG_MMU | |
+ unsigned long flags; | |
+ ipipe_mm_switch_protect(flags); | |
+ __switch_mm(prev, next, tsk); | |
+ ipipe_mm_switch_unprotect(flags); | |
+#endif /* CONFIG_MMU */ | |
} | |
#define deactivate_mm(tsk,mm) do { } while (0) | |
+#ifndef CONFIG_ARM_FCSE_BEST_EFFORT | |
+#define activate_mm(prev,next) __switch_mm(prev, next, NULL) | |
+#else /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+#define activate_mm(prev,next) \ | |
+ ({ \ | |
+ __switch_mm(prev, next, NULL); \ | |
+ FCSE_BUG_ON(current->mm == next && !fcse_mm_in_cache(next)); \ | |
+ }) | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+static inline void destroy_context(struct mm_struct *mm) | |
+{ | |
+#ifdef CONFIG_ARM_FCSE | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ FCSE_BUG_ON(mm->context.fcse.shared_dirty_pages); | |
+ FCSE_BUG_ON(mm->context.fcse.high_pages); | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+ if (mm->context.fcse.pid != FCSE_PID_INVALID) | |
+ fcse_pid_free(mm); | |
+#endif /* CONFIG_ARM_FCSE */ | |
+} | |
+ | |
#endif | |
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h | |
index 209e650..faa7e7f 100644 | |
--- a/arch/arm/include/asm/percpu.h | |
+++ b/arch/arm/include/asm/percpu.h | |
@@ -20,7 +20,7 @@ | |
* Same as asm-generic/percpu.h, except that we store the per cpu offset | |
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 | |
*/ | |
-#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) | |
+#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) && !defined(CONFIG_IPIPE_TRACE) | |
static inline void set_my_cpu_offset(unsigned long off) | |
{ | |
/* Set TPIDRPRW */ | |
@@ -43,6 +43,10 @@ static inline unsigned long __my_cpu_offset(void) | |
} | |
#define __my_cpu_offset __my_cpu_offset() | |
#else | |
+#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE) | |
+#define __my_cpu_offset (per_cpu_offset(ipipe_processor_id())) | |
+#endif /* SMP && IPIPE */ | |
+ | |
#define set_my_cpu_offset(x) do {} while(0) | |
#endif /* CONFIG_SMP */ | |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h | |
index 9bcd262..da15b3e 100644 | |
--- a/arch/arm/include/asm/pgtable.h | |
+++ b/arch/arm/include/asm/pgtable.h | |
@@ -45,6 +45,8 @@ | |
#define LIBRARY_TEXT_START 0x0c000000 | |
#ifndef __ASSEMBLY__ | |
+#include <asm/fcse.h> | |
+ | |
extern void __pte_error(const char *file, int line, pte_t); | |
extern void __pmd_error(const char *file, int line, pmd_t); | |
extern void __pgd_error(const char *file, int line, pgd_t); | |
@@ -160,6 +162,46 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
#define __S111 __PAGE_SHARED_EXEC | |
#ifndef __ASSEMBLY__ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+#define fcse_account_page_removal(mm, addr, val) do { \ | |
+ struct mm_struct *_mm = (mm); \ | |
+ unsigned long _addr = (addr); \ | |
+ unsigned long _val = (val); \ | |
+ if (pte_present(_val) && ((_val) & L_PTE_SHARED)) \ | |
+ --_mm->context.fcse.shared_dirty_pages; \ | |
+ if (pte_present(_val) && _addr < TASK_SIZE) { \ | |
+ if (_addr >= FCSE_TASK_SIZE \ | |
+ && 0 == --_mm->context.fcse.high_pages) \ | |
+ mm->context.fcse.highest_pid = 0; \ | |
+ } \ | |
+} while (0) | |
+ | |
+#define fcse_account_page_addition(mm, addr, val) ({ \ | |
+ struct mm_struct *_mm = (mm); \ | |
+ unsigned long _addr = (addr); \ | |
+ unsigned long _val = (val); \ | |
+ if (pte_present(_val) && (_val & L_PTE_SHARED)) { \ | |
+ if ((_val & (PTE_CACHEABLE | L_PTE_RDONLY | L_PTE_DIRTY)) \ | |
+ != (PTE_CACHEABLE | L_PTE_DIRTY)) \ | |
+ _val &= ~L_PTE_SHARED; \ | |
+ else \ | |
+ ++_mm->context.fcse.shared_dirty_pages; \ | |
+ } \ | |
+ if (pte_present(_val) \ | |
+ && _addr < TASK_SIZE && _addr >= FCSE_TASK_SIZE) { \ | |
+ unsigned long pid = _addr / FCSE_TASK_SIZE; \ | |
+ ++_mm->context.fcse.high_pages; \ | |
+ BUG_ON(mm->context.fcse.large == 0); \ | |
+ if (pid > mm->context.fcse.highest_pid) \ | |
+ mm->context.fcse.highest_pid = pid; \ | |
+ } \ | |
+ _val; \ | |
+}) | |
+#else /* CONFIG_ARM_FCSE_GUARANTEED || !CONFIG_ARM_FCSE */ | |
+#define fcse_account_page_removal(mm, addr, val) do { } while (0) | |
+#define fcse_account_page_addition(mm, addr, val) (val) | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED || !CONFIG_ARM_FCSE */ | |
+ | |
/* | |
* ZERO_PAGE is a global shared page that is always zero: used | |
* for zero-mapped memory areas etc.. | |
@@ -173,10 +215,14 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
/* to find an entry in a page-table-directory */ | |
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) | |
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | |
+#define pgd_offset(mm, addr) \ | |
+ ({ \ | |
+ struct mm_struct *_mm = (mm); \ | |
+ (_mm->pgd + pgd_index(fcse_va_to_mva(_mm, (addr)))); \ | |
+ }) | |
/* to find an entry in a kernel page-table-directory */ | |
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | |
+#define pgd_offset_k(addr) (init_mm.pgd + pgd_index(addr)) | |
#define pmd_none(pmd) (!pmd_val(pmd)) | |
#define pmd_present(pmd) (pmd_val(pmd)) | |
@@ -209,7 +255,10 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |
#define pte_page(pte) pfn_to_page(pte_pfn(pte)) | |
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) | |
-#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) | |
+#define pte_clear(mm,addr,ptep) do { \ | |
+ fcse_account_page_removal(mm, addr, pte_val(*ptep)); \ | |
+ set_pte_ext(ptep, __pte(0), 0); \ | |
+} while (0) | |
#define pte_none(pte) (!pte_val(pte)) | |
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) | |
@@ -230,10 +279,14 @@ extern void __sync_icache_dcache(pte_t pteval); | |
#endif | |
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
- pte_t *ptep, pte_t pteval) | |
+ pte_t *ptep, pte_t pteval) | |
{ | |
unsigned long ext = 0; | |
+ fcse_account_page_removal(mm, addr, pte_val(*ptep)); | |
+ pte_val(pteval) = | |
+ fcse_account_page_addition(mm, addr, pte_val(pteval)); | |
+ | |
if (addr < TASK_SIZE && pte_present_user(pteval)) { | |
__sync_icache_dcache(pteval); | |
ext |= PTE_EXT_NG; | |
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h | |
index f3628fb..6ac4f33 100644 | |
--- a/arch/arm/include/asm/proc-fns.h | |
+++ b/arch/arm/include/asm/proc-fns.h | |
@@ -60,7 +60,12 @@ extern struct processor { | |
/* | |
* Set the page table | |
*/ | |
+#ifndef CONFIG_ARM_FCSE_BEST_EFFORT | |
void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); | |
+#else /* !CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+ void (*switch_mm)(unsigned long pgd_phys, | |
+ struct mm_struct *mm, unsigned flush); | |
+#endif /* !CONFIG_ARM_FCSE_BEST_EFFORT */ | |
/* | |
* Set a possibly extended PTE. Non-extended PTEs should | |
* ignore 'ext'. | |
@@ -82,7 +87,12 @@ extern void cpu_proc_init(void); | |
extern void cpu_proc_fin(void); | |
extern int cpu_do_idle(void); | |
extern void cpu_dcache_clean_area(void *, int); | |
+#ifndef CONFIG_ARM_FCSE_BEST_EFFORT | |
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | |
+#else /* !CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+extern void cpu_do_switch_mm(unsigned long pgd_phys, | |
+ struct mm_struct *mm, unsigned flush); | |
+#endif /* !CONFIG_ARM_FCSE_BEST_EFFORT */ | |
#ifdef CONFIG_ARM_LPAE | |
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); | |
#else | |
@@ -113,7 +123,16 @@ extern void cpu_resume(void); | |
#ifdef CONFIG_MMU | |
-#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) | |
+#ifndef CONFIG_ARM_FCSE_BEST_EFFORT | |
+#define cpu_switch_mm(pgd,mm,fcse_switch) \ | |
+ ({ \ | |
+ (void)(fcse_switch); \ | |
+ cpu_do_switch_mm(virt_to_phys(pgd), (mm)); \ | |
+ }) | |
+#else /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+#define cpu_switch_mm(pgd,mm,fcse_switch) \ | |
+ cpu_do_switch_mm(virt_to_phys(pgd), (mm), (fcse_switch)) | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
#ifdef CONFIG_ARM_LPAE | |
#define cpu_get_pgd() \ | |
@@ -130,7 +149,7 @@ extern void cpu_resume(void); | |
#define cpu_get_pgd() \ | |
({ \ | |
unsigned long pg; \ | |
- __asm__("mrc p15, 0, %0, c2, c0, 0" \ | |
+ __asm__ __volatile__ ("mrc p15, 0, %0, c2, c0, 0" \ | |
: "=r" (pg) : : "cc"); \ | |
pg &= ~0x3fff; \ | |
(pgd_t *)phys_to_virt(pg); \ | |
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h | |
index 413f387..bc1c6c0 100644 | |
--- a/arch/arm/include/asm/processor.h | |
+++ b/arch/arm/include/asm/processor.h | |
@@ -24,9 +24,14 @@ | |
#include <asm/types.h> | |
#ifdef __KERNEL__ | |
+#ifndef CONFIG_ARM_FCSE | |
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ | |
TASK_SIZE : TASK_SIZE_26) | |
#define STACK_TOP_MAX TASK_SIZE | |
+#else /* CONFIG_ARM_FCSE */ | |
+#define STACK_TOP FCSE_TASK_SIZE | |
+#define STACK_TOP_MAX FCSE_TASK_SIZE | |
+#endif /* CONFIG_ARM_FCSE */ | |
#endif | |
struct debug_info { | |
diff --git a/arch/arm/include/asm/resource.h b/arch/arm/include/asm/resource.h | |
new file mode 100644 | |
index 0000000..6579eec | |
--- /dev/null | |
+++ b/arch/arm/include/asm/resource.h | |
@@ -0,0 +1,16 @@ | |
+#ifndef _ARM_RESOURCE_H | |
+#define _ARM_RESOURCE_H | |
+ | |
+/* | |
+ * When FCSE is enabled, reduce the default stack size to 1MB, and maximum | |
+ * to 16MB, the address space is only 32MB. | |
+ */ | |
+#ifdef CONFIG_ARM_FCSE | |
+#define _STK_LIM (1024*1024) | |
+ | |
+#define _STK_LIM_MAX (16*1024*1024) | |
+#endif /* CONFIG_ARM_FCSE */ | |
+ | |
+#include <asm-generic/resource.h> | |
+ | |
+#endif | |
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h | |
index fa09e6b..2da5ffe 100644 | |
--- a/arch/arm/include/asm/switch_to.h | |
+++ b/arch/arm/include/asm/switch_to.h | |
@@ -10,9 +10,18 @@ | |
*/ | |
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
#define switch_to(prev,next,last) \ | |
do { \ | |
+ hard_cond_local_irq_disable(); \ | |
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ | |
+ hard_cond_local_irq_enable(); \ | |
} while (0) | |
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
+#define switch_to(prev,next,last) \ | |
+do { \ | |
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ | |
+} while (0) | |
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
#endif /* __ASM_ARM_SWITCH_TO_H */ | |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h | |
index f00b569..8dda78c 100644 | |
--- a/arch/arm/include/asm/thread_info.h | |
+++ b/arch/arm/include/asm/thread_info.h | |
@@ -26,6 +26,7 @@ struct exec_domain; | |
#include <asm/types.h> | |
#include <asm/domain.h> | |
+#include <ipipe/thread_info.h> | |
typedef unsigned long mm_segment_t; | |
@@ -68,6 +69,8 @@ struct thread_info { | |
unsigned long thumbee_state; /* ThumbEE Handler Base register */ | |
#endif | |
struct restart_block restart_block; | |
+ | |
+ struct ipipe_threadinfo ipipe_data; | |
}; | |
#define INIT_THREAD_INFO(tsk) \ | |
@@ -156,6 +159,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |
#define TIF_USING_IWMMXT 17 | |
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | |
#define TIF_RESTORE_SIGMASK 20 | |
+#ifdef CONFIG_IPIPE | |
+#define TIF_MMSWITCH_INT 22 | |
+#ifdef CONFIG_ARM_FCSE | |
+#define TIF_SWITCHED 23 | |
+#endif /* CONFIG_ARM_FCSE */ | |
+#endif /* CONFIG_IPIPE */ | |
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | |
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | |
@@ -165,6 +174,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | |
#define _TIF_SECCOMP (1 << TIF_SECCOMP) | |
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | |
+#ifdef CONFIG_IPIPE | |
+#define _TIF_MMSWITCH_INT (1 << TIF_MMSWITCH_INT) | |
+#ifdef CONFIG_ARM_FCSE | |
+#define _TIF_SWITCHED (1 << TIF_SWITCHED) | |
+#endif /* CONFIG_ARM_FCSE */ | |
+#endif /* CONFIG_IPIPE */ | |
/* Checks for any syscall work in entry-common.S */ | |
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h | |
index a3625d1..dfa92dc 100644 | |
--- a/arch/arm/include/asm/tlbflush.h | |
+++ b/arch/arm/include/asm/tlbflush.h | |
@@ -202,6 +202,7 @@ | |
#ifndef __ASSEMBLY__ | |
#include <linux/sched.h> | |
+#include <asm/fcse.h> | |
struct cpu_tlb_fns { | |
void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); | |
@@ -375,7 +376,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |
const int zero = 0; | |
const unsigned int __tlb_flag = __cpu_tlb_flags; | |
- uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); | |
+ uaddr = (fcse_va_to_mva(vma->vm_mm, uaddr) & PAGE_MASK) | |
+ | ASID(vma->vm_mm); | |
if (tlb_flag(TLB_WB)) | |
dsb(); | |
@@ -498,7 +500,15 @@ static inline void clean_pmd_entry(void *pmd) | |
/* | |
* Convert calls to our calling convention. | |
*/ | |
-#define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) | |
+#define local_flush_tlb_range(vma, start, end) \ | |
+ ({ \ | |
+ struct mm_struct *_mm = (vma)->vm_mm; \ | |
+ unsigned long _start, _end; \ | |
+ _start = fcse_va_to_mva(_mm, start); \ | |
+ _end = fcse_va_to_mva(_mm, end); \ | |
+ __cpu_flush_user_tlb_range(_start, _end, vma); \ | |
+ }) | |
+ | |
#define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) | |
#ifndef CONFIG_SMP | |
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h | |
index 7e1f760..fc2ddcb 100644 | |
--- a/arch/arm/include/asm/uaccess.h | |
+++ b/arch/arm/include/asm/uaccess.h | |
@@ -13,6 +13,7 @@ | |
*/ | |
#include <linux/string.h> | |
#include <linux/thread_info.h> | |
+#include <linux/ipipe.h> | |
#include <asm/errno.h> | |
#include <asm/memory.h> | |
#include <asm/domain.h> | |
@@ -143,7 +144,7 @@ extern int __get_user_4(void *); | |
#define get_user(x,p) \ | |
({ \ | |
- might_fault(); \ | |
+ __ipipe_uaccess_might_fault(); \ | |
__get_user_check(x,p); \ | |
}) | |
@@ -188,7 +189,7 @@ extern int __put_user_8(void *, unsigned long long); | |
#define put_user(x,p) \ | |
({ \ | |
- might_fault(); \ | |
+ __ipipe_uaccess_might_fault(); \ | |
__put_user_check(x,p); \ | |
}) | |
@@ -245,7 +246,7 @@ do { \ | |
unsigned long __gu_addr = (unsigned long)(ptr); \ | |
unsigned long __gu_val; \ | |
__chk_user_ptr(ptr); \ | |
- might_fault(); \ | |
+ __ipipe_uaccess_might_fault(); \ | |
switch (sizeof(*(ptr))) { \ | |
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ | |
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ | |
@@ -327,7 +328,7 @@ do { \ | |
unsigned long __pu_addr = (unsigned long)(ptr); \ | |
__typeof__(*(ptr)) __pu_val = (x); \ | |
__chk_user_ptr(ptr); \ | |
- might_fault(); \ | |
+ __ipipe_uaccess_might_fault(); \ | |
switch (sizeof(*(ptr))) { \ | |
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ | |
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ | |
@@ -416,7 +417,6 @@ do { \ | |
: "r" (x), "i" (-EFAULT) \ | |
: "cc") | |
- | |
#ifdef CONFIG_MMU | |
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); | |
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); | |
diff --git a/arch/arm/include/uapi/asm/mman.h b/arch/arm/include/uapi/asm/mman.h | |
index 41f99c5..a1a116f 100644 | |
--- a/arch/arm/include/uapi/asm/mman.h | |
+++ b/arch/arm/include/uapi/asm/mman.h | |
@@ -1,3 +1,5 @@ | |
+#define MAP_BRK 0x80000 | |
+ | |
#include <asm-generic/mman.h> | |
#define arch_mmap_check(addr, len, flags) \ | |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile | |
index 5f3338e..612aff0 100644 | |
--- a/arch/arm/kernel/Makefile | |
+++ b/arch/arm/kernel/Makefile | |
@@ -80,6 +80,8 @@ endif | |
head-y := head$(MMUEXT).o | |
obj-$(CONFIG_DEBUG_LL) += debug.o | |
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | |
+obj-$(CONFIG_IPIPE) += ipipe.o | |
+obj-$(CONFIG_IPIPE_ARM_KUSER_TSC) += ipipe_tsc.o ipipe_tsc_asm.o | |
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o | |
obj-$(CONFIG_ARM_PSCI) += psci.o | |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S | |
index d43c7e5..b2ff606 100644 | |
--- a/arch/arm/kernel/entry-armv.S | |
+++ b/arch/arm/kernel/entry-armv.S | |
@@ -4,6 +4,7 @@ | |
* Copyright (C) 1996,1997,1998 Russell King. | |
* ARM700 fix by Matthew Godbolt ([email protected]) | |
* nommu support by Hyok S. Choi ([email protected]) | |
+ * Copyright (C) 2005 Stelian Pop. | |
* | |
* This program is free software; you can redistribute it and/or modify | |
* it under the terms of the GNU General Public License version 2 as | |
@@ -45,6 +46,10 @@ | |
arch_irq_handler_default | |
#endif | |
9997: | |
+#ifdef CONFIG_IPIPE | |
+ bl __ipipe_check_root_interruptible | |
+ cmp r0, #1 | |
+#endif /* CONFIG_IPIPE */ | |
.endm | |
.macro pabt_helper | |
@@ -185,6 +190,14 @@ ENDPROC(__und_invalid) | |
#ifdef CONFIG_TRACE_IRQFLAGS | |
bl trace_hardirqs_off | |
#endif | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ mov r0, #1 /* IPIPE_TRACE_BEGIN */ | |
+ mov r3, #0x90000000 | |
+ ldr r2, [sp, #S_PC] | |
+ mov r1, pc | |
+ bl ipipe_trace_asm | |
+ ldmia r7, {r2 - r6} | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
.endm | |
.align 5 | |
@@ -200,6 +213,9 @@ ENDPROC(__dabt_svc) | |
__irq_svc: | |
svc_entry | |
irq_handler | |
+#ifdef CONFIG_IPIPE | |
+ bne __ipipe_fast_svc_irq_exit | |
+#endif | |
#ifdef CONFIG_PREEMPT | |
get_thread_info tsk | |
@@ -211,6 +227,9 @@ __irq_svc: | |
blne svc_preempt | |
#endif | |
+#ifdef CONFIG_IPIPE | |
+__ipipe_fast_svc_irq_exit: | |
+#endif | |
svc_exit r5, irq = 1 @ return from exception | |
UNWIND(.fnend ) | |
ENDPROC(__irq_svc) | |
@@ -220,12 +239,16 @@ ENDPROC(__irq_svc) | |
#ifdef CONFIG_PREEMPT | |
svc_preempt: | |
mov r8, lr | |
+#ifndef CONFIG_IPIPE | |
1: bl preempt_schedule_irq @ irq en/disable is done inside | |
+#else /* CONFIG_IPIPE */ | |
+1: bl __ipipe_preempt_schedule_irq @ irq en/disable is done inside | |
+#endif /* CONFIG_IPIPE */ | |
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS | |
tst r0, #_TIF_NEED_RESCHED | |
moveq pc, r8 @ go again | |
b 1b | |
-#endif | |
+#endif /* CONFIG_PREEMPT */ | |
__und_fault: | |
@ Correct the PC such that it is pointing at the instruction | |
@@ -250,6 +273,14 @@ __und_svc: | |
#else | |
svc_entry | |
#endif | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ mov r0, #7 @ r0 = IPIPE_TRAP_UNDEFINSTR | |
+ mov r1, sp @ r1 = ®s | |
+ bl __ipipe_notify_trap @ branch to trap handler | |
+ cmp r0, #0 | |
+ bne __und_svc_finish | |
+#endif /* CONFIG_IPIPE */ | |
@ | |
@ call emulation code, which returns using r9 if it has emulated | |
@ the instruction, or the more conventional lr if we are to treat | |
@@ -319,6 +350,15 @@ ENDPROC(__pabt_svc) | |
sub sp, sp, #S_FRAME_SIZE | |
ARM( stmib sp, {r1 - r12} ) | |
THUMB( stmia sp, {r0 - r12} ) | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ mov r4, r0 | |
+ mov r0, #1 /* IPIPE_TRACE_BEGIN */ | |
+ mov r3, #0x90000000 | |
+ ldr r2, [r4, #4] /* lr_<exception> */ | |
+ mov r1, pc | |
+ bl ipipe_trace_asm | |
+ mov r0, r4 | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
ldmia r0, {r3 - r5} | |
add r0, sp, #S_PC @ here for interlock avoidance | |
@@ -386,6 +426,9 @@ __irq_usr: | |
usr_entry | |
kuser_cmpxchg_check | |
irq_handler | |
+#ifdef CONFIG_IPIPE | |
+ bne __ipipe_ret_to_user_irqs_disabled | |
+#endif /* CONFIG_IPIPE */ | |
get_thread_info tsk | |
mov why, #0 | |
b ret_to_user_from_irq | |
@@ -398,6 +441,14 @@ ENDPROC(__irq_usr) | |
__und_usr: | |
usr_entry | |
+#ifdef CONFIG_IPIPE | |
+ mov r0, #7 @ r0 = IPIPE_TRAP_UNDEFINSTR | |
+ mov r1, sp @ r1 = ®s | |
+ bl __ipipe_notify_trap @ branch to trap handler | |
+ cmp r0, #0 | |
+ bne ret_from_exception | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
mov r2, r4 | |
mov r3, r5 | |
@@ -669,6 +720,12 @@ __pabt_usr: | |
ENTRY(ret_from_exception) | |
UNWIND(.fnstart ) | |
UNWIND(.cantunwind ) | |
+#ifdef CONFIG_IPIPE | |
+ disable_irq | |
+ bl __ipipe_check_root | |
+ cmp r0, #1 | |
+ bne __ipipe_ret_to_user_irqs_disabled @ Fast exit path over non-root domains | |
+#endif /* CONFIG_IPIPE */ | |
get_thread_info tsk | |
mov why, #0 | |
b ret_to_user | |
@@ -706,7 +763,11 @@ ENTRY(__switch_to) | |
add r4, r2, #TI_CPU_SAVE | |
ldr r0, =thread_notify_head | |
mov r1, #THREAD_NOTIFY_SWITCH | |
+#ifndef CONFIG_IPIPE | |
bl atomic_notifier_call_chain | |
+#else /* CONFIG_IPIPE */ | |
+ bl __ipipe_switch_to_notifier_call_chain | |
+#endif /* CONFIG_IPIPE */ | |
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | |
str r7, [r8] | |
#endif | |
@@ -741,6 +802,50 @@ ENDPROC(__switch_to) | |
#endif | |
.endm | |
+#ifdef CONFIG_IPIPE | |
+/* | |
+ I-pipe tsc area, here we store data shared with user-space for | |
+ tsc-emulation. If CONFIG_IPIPE_ARM_KUSER_TSC is enabled | |
+ __ipipe_kuser_get_tsc will be overwritten with the real TSC | |
+ emulation code. | |
+*/ | |
+ .globl __ipipe_tsc_area | |
+ .equ __ipipe_tsc_area, CONFIG_VECTORS_BASE + 0x1000 + __ipipe_tsc_area_start - __kuser_helper_end | |
+ | |
+#ifdef CONFIG_IPIPE_ARM_KUSER_TSC | |
+ .globl __ipipe_tsc_addr | |
+ .equ __ipipe_tsc_addr, CONFIG_VECTORS_BASE + 0x1000 + .LCcntr_addr - __kuser_helper_end | |
+ | |
+ .globl __ipipe_tsc_get | |
+ .equ __ipipe_tsc_get, CONFIG_VECTORS_BASE + 0x1000 + __ipipe_kuser_get_tsc - __kuser_helper_end | |
+#endif | |
+ | |
+ .align 5 | |
+ .globl __ipipe_tsc_area_start | |
+__ipipe_tsc_area_start: | |
+ .rep 3 | |
+ .word 0 | |
+ .endr | |
+ | |
+#ifdef CONFIG_IPIPE_ARM_KUSER_TSC | |
+ .rep 4 | |
+ .word 0 | |
+ .endr | |
+.LCcntr_addr: | |
+ .word 0 | |
+ | |
+ .align 5 | |
+__ipipe_kuser_get_tsc: | |
+ nop | |
+ mov r0, #0 | |
+ mov r1, #0 | |
+ usr_ret lr | |
+ .rep 20 | |
+ .word 0 | |
+ .endr | |
+#endif | |
+#endif | |
+ | |
.macro kuser_pad, sym, size | |
.if (. - \sym) & 3 | |
.rept 4 - (. - \sym) & 3 | |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S | |
index bc5bc0a..41400d8 100644 | |
--- a/arch/arm/kernel/entry-common.S | |
+++ b/arch/arm/kernel/entry-common.S | |
@@ -2,6 +2,7 @@ | |
* linux/arch/arm/kernel/entry-common.S | |
* | |
* Copyright (C) 2000 Russell King | |
+ * Copyright (C) 2005 Stelian Pop. | |
* | |
* This program is free software; you can redistribute it and/or modify | |
* it under the terms of the GNU General Public License version 2 as | |
@@ -42,6 +43,15 @@ ret_fast_syscall: | |
ct_user_enter | |
restore_user_regs fast = 1, offset = S_OFF | |
+ | |
+#ifdef CONFIG_IPIPE | |
+__ipipe_ret_to_user: | |
+ disable_irq @ disable interrupts | |
+ENTRY(__ipipe_ret_to_user_irqs_disabled) | |
+ slow_restore_user_regs | |
+ENDPROC(__ipipe_ret_to_user_irqs_disabled) | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
UNWIND(.fnend ) | |
/* | |
@@ -71,12 +81,7 @@ ENTRY(ret_to_user_from_irq) | |
bne work_pending | |
no_work_pending: | |
asm_trace_hardirqs_on | |
- | |
- /* perform architecture specific actions before user return */ | |
- arch_ret_to_user r1, lr | |
- ct_user_enter save = 0 | |
- | |
- restore_user_regs fast = 0, offset = 0 | |
+ slow_restore_user_regs | |
ENDPROC(ret_to_user_from_irq) | |
ENDPROC(ret_to_user) | |
@@ -84,6 +89,7 @@ ENDPROC(ret_to_user) | |
* This is how we return from a fork. | |
*/ | |
ENTRY(ret_from_fork) | |
+ enable_irq_cond | |
bl schedule_tail | |
cmp r5, #0 | |
movne r0, r4 | |
@@ -361,6 +367,16 @@ ENTRY(vector_swi) | |
str r8, [sp, #S_PSR] @ Save CPSR | |
str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | |
zero_fp | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ mov r4, lr | |
+ mov r0, #1 /* IPIPE_TRACE_BEGIN */ | |
+ mov r3, #0x90000000 | |
+ sub r2, lr, #4 /* calling PC */ | |
+ mov r1, pc | |
+ bl ipipe_trace_asm | |
+ mov lr, r4 | |
+ ldm sp, {r0 - r4} | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
/* | |
* Get the system call number. | |
@@ -407,7 +423,9 @@ ENTRY(vector_swi) | |
enable_irq | |
ct_user_exit | |
+#ifndef CONFIG_IPIPE | |
get_thread_info tsk | |
+#endif /* !CONFIG_IPIPE */ | |
adr tbl, sys_call_table @ load syscall table pointer | |
#if defined(CONFIG_OABI_COMPAT) | |
@@ -425,6 +443,17 @@ ENTRY(vector_swi) | |
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number | |
#endif | |
+#ifdef CONFIG_IPIPE | |
+ mov r1, sp | |
+ mov r0, scno | |
+ bl __ipipe_syscall_root | |
+ cmp r0, #0 | |
+ blt __ipipe_ret_to_user | |
+ get_thread_info tsk | |
+ bgt ret_slow_syscall | |
+ ldmia sp, { r0 - r3 } | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
local_restart: | |
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing | |
stmdb sp!, {r4, r5} @ push fifth and sixth args | |
@@ -440,7 +469,7 @@ local_restart: | |
2: mov why, #0 @ no longer a real syscall | |
cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) | |
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back | |
- bcs arm_syscall | |
+ bcs arm_syscall | |
b sys_ni_syscall @ not private func | |
ENDPROC(vector_swi) | |
@@ -477,6 +506,9 @@ __sys_trace_return: | |
__cr_alignment: | |
.word cr_alignment | |
#endif | |
+#ifdef CONFIG_IPIPE | |
+ .word __ipipe_syscall_root | |
+#endif | |
.ltorg | |
/* | |
@@ -606,3 +638,28 @@ ENTRY(sys_oabi_call_table) | |
#endif | |
+ | |
+#if defined(CONFIG_FRAME_POINTER) && (CONFIG_IPIPE_TRACE) | |
+ | |
+ .text | |
+ .align 0 | |
+ .type arm_return_addr %function | |
+ .global arm_return_addr | |
+ | |
+arm_return_addr: | |
+ mov ip, r0 | |
+ mov r0, fp | |
+3: | |
+ cmp r0, #0 | |
+ beq 1f @ frame list hit end, bail | |
+ cmp ip, #0 | |
+ beq 2f @ reached desired frame | |
+ ldr r0, [r0, #-12] @ else continue, get next fp | |
+ sub ip, ip, #1 | |
+ b 3b | |
+2: | |
+ ldr r0, [r0, #-4] @ get target return address | |
+1: | |
+ mov pc, lr | |
+ | |
+#endif | |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S | |
index 160f337..8d77bff 100644 | |
--- a/arch/arm/kernel/entry-header.S | |
+++ b/arch/arm/kernel/entry-header.S | |
@@ -22,7 +22,7 @@ | |
@ | |
#define S_OFF 8 | |
-/* | |
+/* | |
* The SWI code relies on the fact that R0 is at the bottom of the stack | |
* (due to slow/fast restore user regs). | |
*/ | |
@@ -77,6 +77,9 @@ | |
.macro svc_exit, rpsr, irq = 0 | |
.if \irq != 0 | |
@ IRQs already off | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+ bl __ipipe_bugon_irqs_enabled | |
+#endif | |
#ifdef CONFIG_TRACE_IRQFLAGS | |
@ The parent context IRQs must have been enabled to get here in | |
@ the first place, so there's no point checking the PSR I bit. | |
@@ -92,6 +95,14 @@ | |
blne trace_hardirqs_off | |
#endif | |
.endif | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ mov r0, #2 /* IPIPE_TRACE_END */ | |
+ mov r3, #0x90000000 | |
+ ldr r2, [sp, #S_PC] | |
+ mov r1, pc | |
+ bl ipipe_trace_asm | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
msr spsr_cxsf, \rpsr | |
#if defined(CONFIG_CPU_V6) | |
ldr r0, [sp] | |
@@ -106,6 +117,22 @@ | |
.endm | |
.macro restore_user_regs, fast = 0, offset = 0 | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+ bl __ipipe_bugon_irqs_enabled | |
+#endif | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ .if \fast | |
+ mov r4, r0 | |
+ .endif | |
+ mov r0, #2 /* IPIPE_TRACE_END */ | |
+ mov r3, #0x90000000 | |
+ ldr r2, [sp, #\offset + S_PC] | |
+ mov r1, pc | |
+ bl ipipe_trace_asm | |
+ .if \fast | |
+ mov r0, r4 | |
+ .endif | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | |
ldr lr, [sp, #\offset + S_PC]! @ get pc | |
msr spsr_cxsf, r1 @ save in spsr_svc | |
@@ -140,6 +167,9 @@ | |
.macro svc_exit, rpsr, irq = 0 | |
.if \irq != 0 | |
@ IRQs already off | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+ bl __ipipe_bugon_irqs_enabled | |
+#endif | |
#ifdef CONFIG_TRACE_IRQFLAGS | |
@ The parent context IRQs must have been enabled to get here in | |
@ the first place, so there's no point checking the PSR I bit. | |
@@ -155,6 +185,14 @@ | |
blne trace_hardirqs_off | |
#endif | |
.endif | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ mov r0, #2 /* IPIPE_TRACE_END */ | |
+ mov r3, #0x90000000 | |
+ ldr r2, [sp, #S_PC] | |
+ mov r1, pc | |
+ bl ipipe_trace_asm | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
ldr lr, [sp, #S_SP] @ top of the stack | |
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc | |
clrex @ clear the exclusive monitor | |
@@ -166,6 +204,22 @@ | |
.endm | |
.macro restore_user_regs, fast = 0, offset = 0 | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+ bl __ipipe_bugon_irqs_enabled | |
+#endif | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ .if \fast | |
+ mov r4, r0 | |
+ .endif | |
+ mov r0, #2 /* IPIPE_TRACE_END */ | |
+ mov r3, #0x90000000 | |
+ ldr r2, [sp, #\offset + S_PC] | |
+ mov r1, pc | |
+ bl ipipe_trace_asm | |
+ .if \fast | |
+ mov r0, r4 | |
+ .endif | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
clrex @ clear the exclusive monitor | |
mov r2, sp | |
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr | |
@@ -225,6 +279,13 @@ | |
#endif | |
.endm | |
+ .macro slow_restore_user_regs | |
+ /* perform architecture specific actions before user return */ | |
+ arch_ret_to_user r1, lr | |
+ ct_user_enter save = 0 | |
+ restore_user_regs fast = 0, offset = 0 | |
+ .endm | |
+ | |
/* | |
* These are the registers used in the syscall handler, and allow us to | |
* have in theory up to 7 arguments to a function - r0 to r6. | |
diff --git a/arch/arm/kernel/ipipe.c b/arch/arm/kernel/ipipe.c | |
new file mode 100644 | |
index 0000000..7a5417d | |
--- /dev/null | |
+++ b/arch/arm/kernel/ipipe.c | |
@@ -0,0 +1,611 @@ | |
+/* -*- linux-c -*- | |
+ * linux/arch/arm/kernel/ipipe.c | |
+ * | |
+ * Copyright (C) 2002-2005 Philippe Gerum. | |
+ * Copyright (C) 2004 Wolfgang Grandegger (Adeos/arm port over 2.4). | |
+ * Copyright (C) 2005 Heikki Lindholm (PowerPC 970 fixes). | |
+ * Copyright (C) 2005 Stelian Pop. | |
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix. | |
+ * Copyright (C) 2010 Philippe Gerum (SMP port). | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ * | |
+ * Architecture-dependent I-PIPE support for ARM. | |
+ */ | |
+ | |
+#include <linux/kernel.h> | |
+#include <linux/smp.h> | |
+#include <linux/sched.h> | |
+#include <linux/slab.h> | |
+#include <linux/bitops.h> | |
+#include <linux/interrupt.h> | |
+#include <linux/module.h> | |
+#include <linux/errno.h> | |
+#include <linux/kallsyms.h> | |
+#include <linux/kprobes.h> | |
+#include <linux/ipipe_trace.h> | |
+#include <linux/irq.h> | |
+#include <linux/irqnr.h> | |
+#include <linux/prefetch.h> | |
+#include <linux/cpu.h> | |
+#include <linux/ipipe_domain.h> | |
+#include <linux/ipipe_tickdev.h> | |
+#include <asm/system.h> | |
+#include <asm/atomic.h> | |
+#include <asm/hardirq.h> | |
+#include <asm/io.h> | |
+#include <asm/unistd.h> | |
+#include <asm/mach/irq.h> | |
+#include <asm/mmu_context.h> | |
+#include <asm/exception.h> | |
+ | |
+static void __ipipe_do_IRQ(unsigned irq, void *cookie); | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+void (*__ipipe_mach_hrtimer_debug)(unsigned irq); | |
+#endif | |
+ | |
+#ifdef CONFIG_SMP | |
+ | |
+struct __ipipe_vnmidata { | |
+ void (*fn)(void *); | |
+ void *arg; | |
+ cpumask_t cpumask; | |
+}; | |
+ | |
+static struct __ipipe_vnmislot { | |
+ ipipe_spinlock_t lock; | |
+ struct __ipipe_vnmidata *data; | |
+ ipipe_rwlock_t data_lock; | |
+} __ipipe_vnmi __cacheline_aligned_in_smp = { | |
+ .lock = IPIPE_SPIN_LOCK_UNLOCKED, | |
+ .data = NULL, | |
+ .data_lock = IPIPE_RW_LOCK_UNLOCKED, | |
+}; | |
+ | |
+void __ipipe_early_core_setup(void) | |
+{ | |
+ __ipipe_mach_init_platform(); | |
+} | |
+ | |
+void ipipe_stall_root(void) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ ipipe_root_only(); | |
+ flags = hard_smp_local_irq_save(); | |
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status); | |
+ hard_smp_local_irq_restore(flags); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_stall_root); | |
+ | |
+unsigned long ipipe_test_and_stall_root(void) | |
+{ | |
+ unsigned long flags; | |
+ int x; | |
+ | |
+ ipipe_root_only(); | |
+ flags = hard_smp_local_irq_save(); | |
+ x = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status); | |
+ hard_smp_local_irq_restore(flags); | |
+ | |
+ return x; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_test_and_stall_root); | |
+ | |
+unsigned long ipipe_test_root(void) | |
+{ | |
+ unsigned long flags; | |
+ int x; | |
+ | |
+ flags = hard_smp_local_irq_save(); | |
+ x = test_bit(IPIPE_STALL_FLAG, &__ipipe_root_status); | |
+ hard_smp_local_irq_restore(flags); | |
+ | |
+ return x; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_test_root); | |
+ | |
+void __ipipe_do_vnmi(unsigned int irq, void *cookie) | |
+{ | |
+ int cpu = ipipe_processor_id(); | |
+ struct __ipipe_vnmidata *data; | |
+ | |
+ read_lock(&__ipipe_vnmi.data_lock); | |
+ | |
+ data = __ipipe_vnmi.data; | |
+ if (likely(data && cpumask_test_cpu(cpu, &data->cpumask))) { | |
+ data->fn(data->arg); | |
+ cpu_clear(cpu, data->cpumask); | |
+ } | |
+ | |
+ read_unlock(&__ipipe_vnmi.data_lock); | |
+} | |
+ | |
+static inline void | |
+hook_internal_ipi(struct ipipe_domain *ipd, int virq, | |
+ void (*handler)(unsigned int irq, void *cookie)) | |
+{ | |
+ ipd->irqs[virq].ackfn = NULL; | |
+ ipd->irqs[virq].handler = handler; | |
+ ipd->irqs[virq].cookie = NULL; | |
+ /* Immediately handle in the current domain but *never* pass */ | |
+ ipd->irqs[virq].control = IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK; | |
+} | |
+ | |
+void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd) | |
+{ | |
+ __ipipe_ipis_alloc(); | |
+ hook_internal_ipi(ipd, IPIPE_CRITICAL_IPI, __ipipe_do_critical_sync); | |
+ hook_internal_ipi(ipd, IPIPE_SERVICE_VNMI, __ipipe_do_vnmi); | |
+} | |
+ | |
+void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask) | |
+{ | |
+ if (WARN_ON_ONCE(irq_get_chip(irq)->irq_set_affinity == NULL)) | |
+ return; | |
+ | |
+ cpus_and(cpumask, cpumask, *cpu_online_mask); | |
+ if (WARN_ON_ONCE(cpus_empty(cpumask))) | |
+ return; | |
+ | |
+ irq_get_chip(irq)->irq_set_affinity(irq_get_irq_data(irq), &cpumask, true); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_set_irq_affinity); | |
+ | |
+void __ipipe_send_vnmi(void (*fn)(void *), cpumask_t cpumask, void *arg) | |
+{ | |
+ struct __ipipe_vnmidata data; | |
+ unsigned long flags; | |
+ int cpu; | |
+ | |
+ data.fn = fn; | |
+ data.arg = arg; | |
+ data.cpumask = cpumask; | |
+ | |
+ while (!spin_trylock_irqsave(&__ipipe_vnmi.lock, flags)) { | |
+ if (hard_irqs_disabled()) | |
+ __ipipe_do_vnmi(IPIPE_SERVICE_VNMI, NULL); | |
+ cpu_relax(); | |
+ } | |
+ | |
+ cpu = ipipe_processor_id(); | |
+ cpu_clear(cpu, data.cpumask); | |
+ if (cpus_empty(data.cpumask)) { | |
+ spin_unlock_irqrestore(&__ipipe_vnmi.lock, flags); | |
+ return; | |
+ } | |
+ | |
+ write_lock(&__ipipe_vnmi.data_lock); | |
+ __ipipe_vnmi.data = &data; | |
+ write_unlock(&__ipipe_vnmi.data_lock); | |
+ | |
+ ipipe_send_ipi(IPIPE_SERVICE_VNMI, data.cpumask); | |
+ while (!cpus_empty(data.cpumask)) | |
+ cpu_relax(); | |
+ | |
+ write_lock(&__ipipe_vnmi.data_lock); | |
+ __ipipe_vnmi.data = NULL; | |
+ write_unlock(&__ipipe_vnmi.data_lock); | |
+ | |
+ spin_unlock_irqrestore(&__ipipe_vnmi.lock, flags); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_send_vnmi); | |
+#endif /* CONFIG_SMP */ | |
+ | |
+/* | |
+ * ipipe_raise_irq() -- Push the interrupt at front of the pipeline | |
+ * just like if it has been actually received from a hw source. Also | |
+ * works for virtual interrupts. | |
+ */ | |
+void ipipe_raise_irq(unsigned irq) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ __ipipe_dispatch_irq(irq, IPIPE_IRQF_NOACK); | |
+ hard_local_irq_restore(flags); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_raise_irq); | |
+ | |
+int ipipe_get_sysinfo(struct ipipe_sysinfo *info) | |
+{ | |
+ info->sys_nr_cpus = num_online_cpus(); | |
+ info->sys_cpu_freq = __ipipe_hrclock_freq; | |
+ info->sys_hrtimer_irq = per_cpu(ipipe_percpu.hrtimer_irq, 0); | |
+ info->sys_hrtimer_freq = __ipipe_hrtimer_freq; | |
+ info->sys_hrclock_freq = __ipipe_hrclock_freq; | |
+ __ipipe_mach_get_tscinfo(&info->arch.tsc); | |
+ | |
+ return 0; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_get_sysinfo); | |
+ | |
+static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ desc->ipipe_ack(irq, desc); | |
+} | |
+ | |
+struct ipipe_mach_pic_muter ipipe_pic_muter; | |
+EXPORT_SYMBOL_GPL(ipipe_pic_muter); | |
+ | |
+void ipipe_pic_muter_register(struct ipipe_mach_pic_muter *muter) | |
+{ | |
+ ipipe_pic_muter = *muter; | |
+} | |
+ | |
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |
+{ | |
+ /* With sparse IRQs, some irqs may not have a descriptor */ | |
+ if (irq_to_desc(irq) == NULL) | |
+ return; | |
+ | |
+ if (ipipe_pic_muter.enable_irqdesc) | |
+ ipipe_pic_muter.enable_irqdesc(ipd, irq); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_enable_irqdesc); | |
+ | |
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |
+{ | |
+ if (ipipe_pic_muter.disable_irqdesc) | |
+ ipipe_pic_muter.disable_irqdesc(ipd, irq); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_disable_irqdesc); | |
+ | |
+/* | |
+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw | |
+ * interrupts are off, and secondary CPUs are still lost in space. | |
+ */ | |
+void __ipipe_enable_pipeline(void) | |
+{ | |
+ unsigned long flags; | |
+ unsigned int irq; | |
+ | |
+#ifdef CONFIG_CPU_ARM926T | |
+ /* | |
+ * We do not want "wfi" to be called in arm926ejs based | |
+ * processor, as this causes Linux to disable the I-cache | |
+ * when idle. | |
+ */ | |
+ extern void cpu_arm926_proc_init(void); | |
+ if (likely(cpu_proc_init == &cpu_arm926_proc_init)) { | |
+ printk("I-pipe: ARM926EJ-S detected, disabling wfi instruction" | |
+ " in idle loop\n"); | |
+ cpu_idle_poll_ctrl(true); | |
+ } | |
+#endif | |
+ flags = ipipe_critical_enter(NULL); | |
+ | |
+ /* virtualize all interrupts from the root domain. */ | |
+ for (irq = 0; irq < IPIPE_NR_ROOT_IRQS; irq++) | |
+ ipipe_request_irq(ipipe_root_domain, | |
+ irq, | |
+ (ipipe_irq_handler_t)__ipipe_do_IRQ, | |
+ NULL, __ipipe_ack_irq); | |
+ | |
+#ifdef CONFIG_SMP | |
+ __ipipe_ipis_request(); | |
+#endif /* CONFIG_SMP */ | |
+ | |
+ ipipe_critical_exit(flags); | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+unsigned asmlinkage __ipipe_bugon_irqs_enabled(unsigned x) | |
+{ | |
+ BUG_ON(!hard_irqs_disabled()); | |
+ return x; /* Preserve r0 */ | |
+} | |
+#endif | |
+ | |
+asmlinkage int __ipipe_check_root(void) | |
+{ | |
+ return __ipipe_root_p; | |
+} | |
+ | |
+asmlinkage int __ipipe_check_root_interruptible(void) | |
+{ | |
+ return __ipipe_root_p && !irqs_disabled(); | |
+} | |
+ | |
+__kprobes int | |
+__ipipe_switch_to_notifier_call_chain(struct atomic_notifier_head *nh, | |
+ unsigned long val, void *v) | |
+{ | |
+ unsigned long flags; | |
+ int ret; | |
+ | |
+ local_irq_save(flags); | |
+ ret = atomic_notifier_call_chain(nh, val, v); | |
+ __ipipe_restore_root_nosync(flags); | |
+ | |
+ return ret; | |
+} | |
+ | |
+asmlinkage int __ipipe_syscall_root(unsigned long scno, struct pt_regs *regs) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ unsigned long orig_r7; | |
+ int ret = 0; | |
+ | |
+ WARN_ON_ONCE(hard_irqs_disabled()); | |
+ | |
+ /* | |
+ * We use r7 to pass the syscall number to the other domains. | |
+ */ | |
+ orig_r7 = regs->ARM_r7; | |
+ regs->ARM_r7 = __NR_SYSCALL_BASE + scno; | |
+ | |
+ /* | |
+ * This routine either returns: | |
+ * 0 -- if the syscall is to be passed to Linux; | |
+ * <0 -- if the syscall should not be passed to Linux, and no | |
+ * tail work should be performed; | |
+ * >0 -- if the syscall should not be passed to Linux but the | |
+ * tail work has to be performed (for handling signals etc). | |
+ */ | |
+ | |
+ if (!__ipipe_syscall_watched_p(current, regs->ARM_r7)) | |
+ goto out; | |
+ | |
+ ret = __ipipe_notify_syscall(regs); | |
+ | |
+ hard_local_irq_disable(); | |
+ | |
+ /* | |
+ * This is the end of the syscall path, so we may | |
+ * safely assume a valid Linux task stack here. | |
+ */ | |
+ if (current->ipipe.flags & PF_MAYDAY) { | |
+ current->ipipe.flags &= ~PF_MAYDAY; | |
+ __ipipe_notify_trap(IPIPE_TRAP_MAYDAY, regs); | |
+ } | |
+ | |
+ if (!__ipipe_root_p) | |
+ ret = -1; | |
+ else { | |
+ p = ipipe_this_cpu_root_context(); | |
+ if (__ipipe_ipending_p(p)) | |
+ __ipipe_sync_stage(); | |
+ } | |
+ | |
+ hard_local_irq_enable(); | |
+out: | |
+ regs->ARM_r7 = orig_r7; | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+ BUG_ON(ret > 0 && current_thread_info()->restart_block.fn != | |
+ do_no_restart_syscall); | |
+#endif | |
+ return ret; | |
+} | |
+ | |
+void __ipipe_exit_irq(struct pt_regs *regs) | |
+{ | |
+ if (user_mode(regs) && | |
+ (current->ipipe.flags & PF_MAYDAY) != 0) { | |
+ /* | |
+ * Testing for user_regs() eliminates foreign stack | |
+ * contexts, including from careless domains which did | |
+ * not set the foreign stack bit (foreign stacks are | |
+ * always kernel-based). | |
+ */ | |
+ current->ipipe.flags &= ~PF_MAYDAY; | |
+ __ipipe_notify_trap(IPIPE_TRAP_MAYDAY, regs); | |
+ } | |
+} | |
+ | |
+/* hw irqs off */ | |
+asmlinkage void __exception __ipipe_grab_irq(int irq, struct pt_regs *regs) | |
+{ | |
+ struct ipipe_percpu_data *p = __ipipe_this_cpu_ptr(&ipipe_percpu); | |
+ | |
+ ipipe_trace_irq_entry(irq); | |
+ | |
+ if (p->hrtimer_irq == -1) | |
+ goto copy_regs; | |
+ | |
+ if (irq == p->hrtimer_irq) { | |
+ /* | |
+ * Given our deferred dispatching model for regular IRQs, we | |
+ * only record CPU regs for the last timer interrupt, so that | |
+ * the timer handler charges CPU times properly. It is assumed | |
+ * that other interrupt handlers don't actually care for such | |
+ * information. | |
+ */ | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+ if (__ipipe_mach_hrtimer_debug) | |
+ __ipipe_mach_hrtimer_debug(irq); | |
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */ | |
+ copy_regs: | |
+ p->tick_regs.ARM_cpsr = | |
+ (p->curr == &p->root | |
+ ? regs->ARM_cpsr | |
+ : regs->ARM_cpsr | PSR_I_BIT); | |
+ p->tick_regs.ARM_pc = regs->ARM_pc; | |
+ } | |
+ | |
+ __ipipe_dispatch_irq(irq, 0); | |
+ | |
+ ipipe_trace_irq_exit(irq); | |
+ | |
+ __ipipe_exit_irq(regs); | |
+} | |
+ | |
+static void __ipipe_do_IRQ(unsigned irq, void *cookie) | |
+{ | |
+ handle_IRQ(irq, __this_cpu_ptr(&ipipe_percpu.tick_regs)); | |
+} | |
+ | |
+#ifdef CONFIG_MMU | |
+void __switch_mm_inner(struct mm_struct *prev, struct mm_struct *next, | |
+ struct task_struct *tsk) | |
+{ | |
+#ifdef CONFIG_IPIPE_WANT_ACTIVE_MM | |
+ struct mm_struct ** const active_mm = | |
+ __this_cpu_ptr(&ipipe_percpu.active_mm); | |
+#endif /* CONFIG_IPIPE_WANT_ACTIVE_MM */ | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ struct thread_info *const tip = current_thread_info(); | |
+ prev = *active_mm; | |
+ clear_bit(TIF_MMSWITCH_INT, &tip->flags); | |
+ barrier(); | |
+ *active_mm = NULL; | |
+ barrier(); | |
+ for (;;) { | |
+ unsigned long flags; | |
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
+ | |
+ int rc __maybe_unused = __do_switch_mm(prev, next, tsk, true); | |
+ | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ /* | |
+ * Reading thread_info flags and setting active_mm | |
+ * must be done atomically. | |
+ */ | |
+ flags = hard_local_irq_save(); | |
+ if (__test_and_clear_bit(TIF_MMSWITCH_INT, &tip->flags) == 0) { | |
+ if (rc < 0) | |
+ *active_mm = prev; | |
+ else { | |
+ *active_mm = next; | |
+ fcse_switch_mm_end(next); | |
+ } | |
+ hard_local_irq_restore(flags); | |
+ return; | |
+ } | |
+ hard_local_irq_restore(flags); | |
+ | |
+ if (rc < 0) | |
+ /* | |
+ * We were interrupted by head domain, which | |
+ * may have changed the mm context, mm context | |
+ * is now unknown, but will be switched in | |
+ * deferred_switch_mm | |
+ */ | |
+ return; | |
+ | |
+ prev = NULL; | |
+ } | |
+#elif defined(CONFIG_IPIPE_WANT_ACTIVE_MM) | |
+ if (rc < 0) | |
+ *active_mm = prev; | |
+ else { | |
+ *active_mm = next; | |
+ fcse_switch_mm_end(next); | |
+ } | |
+#endif /* IPIPE_WANT_ACTIVE_MM */ | |
+} | |
+ | |
+#ifdef finish_arch_post_lock_switch | |
+void deferred_switch_mm(struct mm_struct *next) | |
+{ | |
+#ifdef CONFIG_IPIPE_WANT_ACTIVE_MM | |
+ struct mm_struct ** const active_mm = | |
+ __this_cpu_ptr(&ipipe_percpu.active_mm); | |
+ struct mm_struct *prev = *active_mm; | |
+#endif /* CONFIG_IPIPE_WANT_ACTIVE_MM */ | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ struct thread_info *const tip = current_thread_info(); | |
+ clear_bit(TIF_MMSWITCH_INT, &tip->flags); | |
+ barrier(); | |
+ *active_mm = NULL; | |
+ barrier(); | |
+ for (;;) { | |
+ unsigned long flags; | |
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
+ | |
+ __do_switch_mm(prev, next, NULL, false); | |
+ | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ /* | |
+ * Reading thread_info flags and setting active_mm | |
+ * must be done atomically. | |
+ */ | |
+ flags = hard_local_irq_save(); | |
+ if (__test_and_clear_bit(TIF_MMSWITCH_INT, &tip->flags) == 0) { | |
+ *active_mm = next; | |
+ fcse_switch_mm_end(next); | |
+ hard_local_irq_restore(flags); | |
+ return; | |
+ } | |
+ hard_local_irq_restore(flags); | |
+ prev = NULL; | |
+ } | |
+#elif defined(CONFIG_IPIPE_WANT_ACTIVE_MM) | |
+ *active_mm = next; | |
+ fcse_switch_mm_end(next); | |
+#endif /* CONFIG_IPIPE_WANT_ACTIVE_MM */ | |
+} | |
+#endif | |
+#endif /* CONFIG_MMU */ | |
+ | |
+#if defined(CONFIG_IPIPE_DEBUG) && defined(CONFIG_DEBUG_LL) | |
+void printascii(const char *s); | |
+ | |
+static IPIPE_DEFINE_SPINLOCK(serial_debug_lock); | |
+ | |
+void __ipipe_serial_debug(const char *fmt, ...) | |
+{ | |
+ unsigned long flags; | |
+ char buf[128]; | |
+ va_list ap; | |
+ int n; | |
+ | |
+ va_start(ap, fmt); | |
+ n = vsnprintf(buf, sizeof(buf) - 2, fmt, ap); | |
+ va_end(ap); | |
+ | |
+ if (n > 0 && buf[n - 1] == '\n') { | |
+ buf[n] = '\r'; | |
+ buf[n+1] = '\0'; | |
+ } | |
+ | |
+ spin_lock_irqsave(&serial_debug_lock, flags); | |
+ printascii(buf); | |
+ spin_unlock_irqrestore(&serial_debug_lock, flags); | |
+} | |
+ | |
+#ifndef CONFIG_SERIAL_8250_CONSOLE | |
+EXPORT_SYMBOL_GPL(__ipipe_serial_debug); | |
+#endif | |
+ | |
+#endif | |
+ | |
+EXPORT_SYMBOL_GPL(do_munmap); | |
+EXPORT_SYMBOL_GPL(show_stack); | |
+EXPORT_SYMBOL_GPL(init_mm); | |
+#ifndef MULTI_CPU | |
+EXPORT_SYMBOL_GPL(cpu_do_switch_mm); | |
+#endif | |
+EXPORT_SYMBOL_GPL(__check_vmalloc_seq); | |
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
+EXPORT_SYMBOL_GPL(tasklist_lock); | |
+#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */ | |
+ | |
+#ifndef CONFIG_SPARSE_IRQ | |
+EXPORT_SYMBOL_GPL(irq_desc); | |
+#endif | |
+ | |
+#ifdef CONFIG_CPU_HAS_ASID | |
+EXPORT_SYMBOL_GPL(check_and_switch_context); | |
+#endif /* CONFIG_CPU_HAS_ASID */ | |
+ | |
+#ifdef CONFIG_SMP | |
+EXPORT_SYMBOL_GPL(__cpu_logical_map); | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+EXPORT_SYMBOL_GPL(cpu_architecture); | |
diff --git a/arch/arm/kernel/ipipe_tsc.c b/arch/arm/kernel/ipipe_tsc.c | |
new file mode 100644 | |
index 0000000..fa2d382 | |
--- /dev/null | |
+++ b/arch/arm/kernel/ipipe_tsc.c | |
@@ -0,0 +1,179 @@ | |
+#include <linux/kernel.h> | |
+#include <linux/module.h> | |
+#include <linux/clocksource.h> | |
+#include <linux/sched.h> | |
+#include <linux/ipipe_tickdev.h> | |
+ | |
+#include <linux/ipipe.h> | |
+ | |
+#include <asm/cacheflush.h> | |
+#include <asm/traps.h> | |
+ | |
+typedef unsigned long long __ipipe_tsc_t(void); | |
+ | |
+extern __ipipe_tsc_t __ipipe_freerunning_64, | |
+ __ipipe_freerunning_32, | |
+ __ipipe_freerunning_countdown_32, | |
+ __ipipe_freerunning_16, | |
+ __ipipe_freerunning_countdown_16, | |
+ __ipipe_decrementer_16, | |
+ __ipipe_freerunning_twice_16, | |
+ __ipipe_freerunning_arch; | |
+extern unsigned long __ipipe_tsc_addr; | |
+ | |
+static struct __ipipe_tscinfo tsc_info; | |
+ | |
+static struct clocksource clksrc = { | |
+ .name = "ipipe_tsc", | |
+ .rating = 0x7fffffff, | |
+ .read = (typeof(clksrc.read))__ipipe_tsc_get, | |
+ .mask = CLOCKSOURCE_MASK(64), | |
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS, | |
+}; | |
+ | |
+struct ipipe_tsc_value_t { | |
+ unsigned long long last_tsc; | |
+ unsigned last_cnt; | |
+}; | |
+ | |
+unsigned long __ipipe_kuser_tsc_freq; | |
+ | |
+struct ipipe_tsc_value_t *ipipe_tsc_value; | |
+ | |
+void __ipipe_tsc_register(struct __ipipe_tscinfo *info) | |
+{ | |
+ struct ipipe_tsc_value_t *vector_tsc_value; | |
+ unsigned long *tsc_addr; | |
+ __ipipe_tsc_t *implem; | |
+ unsigned long flags; | |
+ int registered; | |
+ char *tsc_area; | |
+ | |
+#if !defined(CONFIG_CPU_USE_DOMAINS) | |
+ extern char __ipipe_tsc_area_start[], __kuser_helper_end[]; | |
+ | |
+ tsc_area = (char *)vectors_page + 0x1000 | |
+ + (__ipipe_tsc_area_start - __kuser_helper_end); | |
+ tsc_addr = (unsigned long *) | |
+ (tsc_area + ((char *)&__ipipe_tsc_addr - __ipipe_tsc_area)); | |
+#else | |
+ tsc_area = __ipipe_tsc_area; | |
+ tsc_addr = &__ipipe_tsc_addr; | |
+#endif | |
+ registered = ipipe_tsc_value != NULL; | |
+ ipipe_tsc_value = (struct ipipe_tsc_value_t *)tsc_area; | |
+ vector_tsc_value = (struct ipipe_tsc_value_t *)__ipipe_tsc_area; | |
+ | |
+ switch(info->type) { | |
+ case IPIPE_TSC_TYPE_FREERUNNING: | |
+ switch(info->u.mask) { | |
+ case 0xffff: | |
+ implem = &__ipipe_freerunning_16; | |
+ break; | |
+ case 0xffffffff: | |
+ implem = &__ipipe_freerunning_32; | |
+ break; | |
+ case 0xffffffffffffffffULL: | |
+ implem = &__ipipe_freerunning_64; | |
+ break; | |
+ default: | |
+ goto unimplemented; | |
+ } | |
+ break; | |
+ | |
+ case IPIPE_TSC_TYPE_DECREMENTER: | |
+ if (info->u.mask != 0xffff) | |
+ goto unimplemented; | |
+ implem = &__ipipe_decrementer_16; | |
+ break; | |
+ | |
+ case IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN: | |
+ switch(info->u.mask) { | |
+ case 0xffff: | |
+ implem = &__ipipe_freerunning_countdown_16; | |
+ break; | |
+ case 0xffffffff: | |
+ implem = &__ipipe_freerunning_countdown_32; | |
+ break; | |
+ default: | |
+ goto unimplemented; | |
+ } | |
+ break; | |
+ | |
+ case IPIPE_TSC_TYPE_FREERUNNING_TWICE: | |
+ if (info->u.mask != 0xffff) | |
+ goto unimplemented; | |
+ implem = &__ipipe_freerunning_twice_16; | |
+ break; | |
+ | |
+ case IPIPE_TSC_TYPE_FREERUNNING_ARCH: | |
+ implem = &__ipipe_freerunning_arch; | |
+ break; | |
+ | |
+ default: | |
+ unimplemented: | |
+ printk("I-pipel: Unimplemented tsc configuration, " | |
+ "type: %d, mask: 0x%08Lx\n", info->type, info->u.mask); | |
+ BUG(); | |
+ } | |
+ | |
+ tsc_info = *info; | |
+ *tsc_addr = tsc_info.counter_vaddr; | |
+ if (tsc_info.type == IPIPE_TSC_TYPE_DECREMENTER) { | |
+ tsc_info.u.dec.last_cnt = &vector_tsc_value->last_cnt; | |
+ tsc_info.u.dec.tsc = &vector_tsc_value->last_tsc; | |
+ } else | |
+ tsc_info.u.fr.tsc = &vector_tsc_value->last_tsc; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ ipipe_tsc_value->last_tsc = 0; | |
+ memcpy(tsc_area + 0x20, implem, 0x60); | |
+ flush_icache_range((unsigned long)(tsc_area), | |
+ (unsigned long)(tsc_area + 0x80)); | |
+ hard_local_irq_restore(flags); | |
+ | |
+ printk(KERN_INFO "I-pipe, %u.%03u MHz clocksource\n", | |
+ tsc_info.freq / 1000000, (tsc_info.freq % 1000000) / 1000); | |
+ if (!registered) | |
+ clocksource_register_hz(&clksrc, tsc_info.freq); | |
+ else | |
+ __clocksource_updatefreq_hz(&clksrc, tsc_info.freq); | |
+ | |
+ __ipipe_kuser_tsc_freq = tsc_info.freq; | |
+} | |
+ | |
+void __ipipe_mach_get_tscinfo(struct __ipipe_tscinfo *info) | |
+{ | |
+ *info = tsc_info; | |
+} | |
+ | |
+void __ipipe_tsc_update(void) | |
+{ | |
+ if (ipipe_tsc_value == NULL) | |
+ return; | |
+ | |
+ if (tsc_info.type == IPIPE_TSC_TYPE_DECREMENTER) { | |
+ unsigned cnt = *(unsigned *)tsc_info.counter_vaddr; | |
+ int offset = ipipe_tsc_value->last_cnt - cnt; | |
+ if (offset < 0) | |
+ offset += tsc_info.u.dec.mask + 1; | |
+ ipipe_tsc_value->last_tsc += offset; | |
+ ipipe_tsc_value->last_cnt = cnt; | |
+ return; | |
+ } | |
+ | |
+ /* Update last_tsc, in order to remain compatible with legacy | |
+ user-space 32 bits free-running counter implementation */ | |
+ ipipe_tsc_value->last_tsc = __ipipe_tsc_get() - 1; | |
+} | |
+EXPORT_SYMBOL(__ipipe_tsc_get); | |
+ | |
+void update_vsyscall(struct timekeeper *tk) | |
+{ | |
+ if (tk->clock == &clksrc) | |
+ ipipe_update_hostrt(tk); | |
+} | |
+ | |
+void update_vsyscall_tz(void) | |
+{ | |
+} | |
diff --git a/arch/arm/kernel/ipipe_tsc_asm.S b/arch/arm/kernel/ipipe_tsc_asm.S | |
new file mode 100644 | |
index 0000000..0f92e38 | |
--- /dev/null | |
+++ b/arch/arm/kernel/ipipe_tsc_asm.S | |
@@ -0,0 +1,297 @@ | |
+#include <asm/assembler.h> | |
+#include <asm/asm-offsets.h> | |
+#include <asm/glue.h> | |
+ | |
+ THUMB( .arm ) | |
+ | |
+ .macro usr_ret, reg | |
+#ifdef CONFIG_ARM_THUMB | |
+ bx \reg | |
+#else | |
+ mov pc, \reg | |
+#endif | |
+ .endm | |
+ | |
+ .macro usr_reteq, reg | |
+#ifdef CONFIG_ARM_THUMB | |
+ bxeq \reg | |
+#else | |
+ moveq pc, \reg | |
+#endif | |
+ .endm | |
+ | |
+ .macro myldrd, rd1, rd2, rtmp, label | |
+#if __LINUX_ARM_ARCH__ < 5 | |
+ adr \rtmp, \label | |
+ ldm \rtmp, { \rd1, \rd2 } | |
+#else | |
+ ldrd \rd1, \label | |
+#endif | |
+ .endm | |
+ | |
+/* | |
+ We use the same mechanism as Linux user helpers to store | |
+ variables and functions related to TSC emulation, so that they | |
+ can also be used in user-space. | |
+ | |
+ The function ipipe_tsc_register will copy the proper | |
+ implemntation to the vectors page. We repeat the data area so | |
+ that the PC relative operations are computed correctly. | |
+*/ | |
+ | |
+ .align 5 | |
+ .rep 7 | |
+ .word 0 | |
+ .endr | |
+.LCfr64_cntr_addr: | |
+ .word 0 | |
+ | |
+ .align 5 | |
+ .globl __ipipe_freerunning_64 | |
+__ipipe_freerunning_64: | |
+ ldr r0, .LCfr64_cntr_addr | |
+/* User-space entry-point: r0 is the hardware counter virtual address */ | |
+ mov r2, r0 | |
+#ifndef CONFIG_CPU_BIG_ENDIAN | |
+/* Little endian */ | |
+ ldr r1, [r2, #4] | |
+1: ldr r0, [r2] | |
+ ldr r3, [r2, #4] | |
+ cmp r3, r1 | |
+ usr_reteq lr | |
+ mov r1, r3 | |
+ b 1b | |
+#else /* Big endian */ | |
+ ldr r0, [r2] | |
+1: ldr r1, [r2, #4] | |
+ ldr r3, [r2] | |
+ cmp r3, r0 | |
+ usr_reteq lr | |
+ mov r0, r3 | |
+ b 1b | |
+#endif /* Big endian */ | |
+ | |
+ .align 5 | |
+.LCfr32_last_tsc: | |
+ .rep 7 | |
+ .word 0 | |
+ .endr | |
+.LCfr32_cntr_addr: | |
+ .word 0 | |
+ | |
+ .align 5 | |
+ .globl __ipipe_freerunning_32 | |
+__ipipe_freerunning_32: | |
+ ldr r0, .LCfr32_cntr_addr | |
+/* User-space entry-point: r0 is the hardware counter virtual address */ | |
+ myldrd r2, r3, r1, .LCfr32_last_tsc | |
+#ifndef CONFIG_CPU_BIG_ENDIAN | |
+/* Little endian */ | |
+ ldr r0, [r0] | |
+ cmp r2, r0 | |
+ adc r1, r3, #0 | |
+#else /* Big endian */ | |
+ ldr r1, [r0] | |
+ cmp r3, r1 | |
+ adc r0, r2, #0 | |
+#endif /* Big endian */ | |
+ usr_ret lr | |
+ | |
+ .align 5 | |
+.LCfrcd32_last_tsc: | |
+ .rep 7 | |
+ .word 0 | |
+ .endr | |
+.LCfrcd32_cntr_addr: | |
+ .word 0 | |
+ | |
+ .align 5 | |
+ .globl __ipipe_freerunning_countdown_32 | |
+__ipipe_freerunning_countdown_32: | |
+ ldr r0, .LCfrcd32_cntr_addr | |
+/* User-space entry-point: r0 is the hardware counter virtual address */ | |
+ myldrd r2, r3, r1, .LCfrcd32_last_tsc | |
+#ifndef CONFIG_CPU_BIG_ENDIAN | |
+/* Little endian */ | |
+ ldr r0, [r0] | |
+ mvn r0, r0 | |
+ cmp r2, r0 | |
+ adc r1, r3, #0 | |
+#else /* Big endian */ | |
+ ldr r1, [r0] | |
+ mvn r1, r1 | |
+ cmp r3, r1 | |
+ adc r0, r2, #0 | |
+#endif /* Big endian */ | |
+ usr_ret lr | |
+ | |
+ .align 5 | |
+.LCfr16_last_tsc: | |
+ .rep 7 | |
+ .word 0 | |
+ .endr | |
+.LCfr16_cntr_addr: | |
+ .word 0 | |
+ | |
+ .align 5 | |
+ .globl __ipipe_freerunning_16 | |
+__ipipe_freerunning_16: | |
+ ldr r0, .LCfr16_cntr_addr | |
+/* User-space entry-point: r0 is the hardware counter virtual address */ | |
+1: myldrd r2, r3, r1, .LCfr16_last_tsc | |
+ ldrh ip, [r0] | |
+#ifndef CONFIG_CPU_BIG_ENDIAN | |
+/* Little endian */ | |
+ ldr r1, .LCfr16_last_tsc | |
+ cmp r1, r2 | |
+ mov r1, r2, lsr #16 | |
+ bne 1b | |
+ orr r0, ip, r1, lsl #16 | |
+ cmp r2, r0 | |
+ addhis r0, r0, #0x10000 | |
+ adc r1, r3, #0 | |
+#else /* Big endian */ | |
+ ldr r1, .LCfr16_last_tsc + 4 | |
+ cmp r1, r3 | |
+ mov r1, r3, lsr #16 | |
+ bne 1b | |
+ orr r1, ip, r1, lsl #16 | |
+ cmp r3, r1 | |
+ addhis r1, r1, #0x10000 | |
+ adc r0, r2, #0 | |
+#endif /* Big endian */ | |
+ usr_ret lr | |
+ | |
+ .align 5 | |
+.LCfrcd16_last_tsc: | |
+ .rep 7 | |
+ .word 0 | |
+ .endr | |
+.LCfrcd16_cntr_addr: | |
+ .word 0 | |
+ | |
+ .align 5 | |
+ .globl __ipipe_freerunning_countdown_16 | |
+__ipipe_freerunning_countdown_16: | |
+ ldr r0, .LCfrcd16_cntr_addr | |
+/* User-space entry-point: r0 is the hardware counter virtual address */ | |
+1: myldrd r2, r3, r1, .LCfrcd16_last_tsc | |
+ ldrh ip, [r0] | |
+#ifndef CONFIG_CPU_BIG_ENDIAN | |
+/* Little endian */ | |
+ ldr r1, .LCfrcd16_last_tsc | |
+ rsb ip, ip, #0x10000 | |
+ cmp r1, r2 | |
+ mov r1, r2, lsr #16 | |
+ bne 1b | |
+ orr r0, ip, r1, lsl #16 | |
+ cmp r2, r0 | |
+ addhis r0, r0, #0x10000 | |
+ adc r1, r3, #0 | |
+#else /* Big endian */ | |
+ ldr r1, .LCfrcd16_last_tsc + 4 | |
+ rsb ip, ip, #0x10000 | |
+ cmp r1, r3 | |
+ mov r1, r3, lsr #16 | |
+ bne 1b | |
+ orr r1, ip, r1, lsl #16 | |
+ cmp r3, r1 | |
+ addhis r1, r1, #0x10000 | |
+ adc r0, r2, #0 | |
+#endif /* Big endian */ | |
+ usr_ret lr | |
+ | |
+ .align 5 | |
+.LCfrt16_last_tsc: | |
+ .rep 7 | |
+ .word 0 | |
+ .endr | |
+.LCfrt16_cntr_addr: | |
+ .word 0 | |
+ | |
+ .align 5 | |
+ .globl __ipipe_freerunning_twice_16 | |
+__ipipe_freerunning_twice_16: | |
+ ldr r0, .LCfrt16_cntr_addr | |
+/* User-space entry-point: r0 is the hardware counter virtual address */ | |
+1: myldrd r2, r3, r1, .LCfrt16_last_tsc | |
+2: ldrh ip, [r0] | |
+ ldrh r1, [r0] | |
+ cmp r1, ip | |
+ bne 2b | |
+#ifndef CONFIG_CPU_BIG_ENDIAN | |
+/* Little endian */ | |
+ ldr r1, .LCfrt16_last_tsc | |
+ cmp r1, r2 | |
+ mov r1, r2, lsr #16 | |
+ bne 1b | |
+ orr r0, ip, r1, lsl #16 | |
+ cmp r2, r0 | |
+ addhis r0, r0, #0x10000 | |
+ adc r1, r3, #0 | |
+#else /* Big endian */ | |
+ ldr r1, .LCfrt16_last_tsc + 4 | |
+ cmp r1, r3 | |
+ mov r1, r3, lsr #16 | |
+ bne 1b | |
+ orr r1, ip, r1, lsl #16 | |
+ cmp r3, r1 | |
+ addhis r1, r1, #0x10000 | |
+ adc r0, r2, #0 | |
+#endif /* Big endian */ | |
+ usr_ret lr | |
+ | |
+ .align 5 | |
+.LCdec16_last_tsc: | |
+ .rep 2 | |
+ .word 0 | |
+ .endr | |
+.LCdec16_last_cnt: | |
+ .rep 5 | |
+ .word 0 | |
+ .endr | |
+.LCdec16_cntr_addr: | |
+ .word 0 | |
+ | |
+ .align 5 | |
+ .globl __ipipe_decrementer_16 | |
+__ipipe_decrementer_16: | |
+ ldr r0, .LCdec16_cntr_addr | |
+/* User-space entry-point: r0 is the hardware counter virtual address */ | |
+#ifndef CONFIG_CPU_BIG_ENDIAN | |
+/* Little endian */ | |
+1: ldr r1, .LCdec16_last_tsc | |
+ ldrh ip, [r0] | |
+ ldr r2, .LCdec16_last_cnt | |
+ subs ip, r2, ip | |
+ addcc ip, ip, #0x10000 | |
+ myldrd r2, r3, r3, .LCdec16_last_tsc | |
+ cmp r1, r2 | |
+ bne 1b | |
+ adds r0, ip, r2 | |
+ adc r1, r3, #0 | |
+#else /* Big endian */ | |
+1: ldr r1, .LCdec16_last_tsc + 4 | |
+ ldrh ip, [r0] | |
+ ldr r2, .LCdec16_last_cnt | |
+ subs ip, r2, ip | |
+ addcc ip, ip, #0x10000 | |
+ myldrd r2, r3, r3, .LCdec16_last_tsc | |
+ cmp r1, r3 | |
+ bne 1b | |
+ adds r1, ip, r3 | |
+ adc r0, r2, #0 | |
+#endif /* Big endian */ | |
+ usr_ret lr | |
+ | |
+ .align 5 | |
+ .globl __ipipe_freerunning_arch | |
+__ipipe_freerunning_arch: | |
+ nop | |
+#ifdef CONFIG_ARM_ARCH_TIMER | |
+ mrrc p15, 0, r0, r1, c14 | |
+#else | |
+ mov r0, #0 | |
+ mov r1, #0 | |
+#endif | |
+ usr_ret lr | |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c | |
index ab1fe3b..adcf510 100644 | |
--- a/arch/arm/kernel/process.c | |
+++ b/arch/arm/kernel/process.c | |
@@ -125,18 +125,54 @@ EXPORT_SYMBOL(pm_power_off); | |
void (*arm_pm_restart)(char str, const char *cmd) = null_restart; | |
EXPORT_SYMBOL_GPL(arm_pm_restart); | |
-/* | |
- * This is our default idle handler. | |
- */ | |
- | |
void (*arm_pm_idle)(void); | |
-static void default_idle(void) | |
+#ifdef CONFIG_IPIPE | |
+static void __ipipe_halt_root(void) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ | |
+ /* Emulate idle entry sequence over the root domain. */ | |
+ | |
+ hard_local_irq_disable(); | |
+ | |
+ p = ipipe_this_cpu_root_context(); | |
+ | |
+ trace_hardirqs_on(); | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+ | |
+ if (unlikely(__ipipe_ipending_p(p))) { | |
+ __ipipe_sync_stage(); | |
+ hard_local_irq_enable(); | |
+ } else { | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+ ipipe_trace_end(0x8000000E); | |
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | |
+ hard_local_irq_enable(); | |
+ if (arm_pm_idle) | |
+ arm_pm_idle(); | |
+ else | |
+ cpu_do_idle(); | |
+ } | |
+} | |
+#else /* !CONFIG_IPIPE */ | |
+static void __ipipe_halt_root(void) | |
{ | |
if (arm_pm_idle) | |
arm_pm_idle(); | |
else | |
cpu_do_idle(); | |
+} | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
+/* | |
+ * This is our default idle handler. | |
+ */ | |
+static void default_idle(void) | |
+{ | |
+ if (!need_resched()) | |
+ __ipipe_halt_root(); | |
+ | |
local_irq_enable(); | |
} | |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c | |
index 03deeff..c06c5d6 100644 | |
--- a/arch/arm/kernel/ptrace.c | |
+++ b/arch/arm/kernel/ptrace.c | |
@@ -214,6 +214,10 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) | |
static int break_trap(struct pt_regs *regs, unsigned int instr) | |
{ | |
+ | |
+ if (__ipipe_report_trap(IPIPE_TRAP_BREAK,regs)) | |
+ return 0; | |
+ | |
ptrace_break(current, regs); | |
return 0; | |
} | |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c | |
index b4b1d39..6fb5b9c 100644 | |
--- a/arch/arm/kernel/setup.c | |
+++ b/arch/arm/kernel/setup.c | |
@@ -444,16 +444,27 @@ void notrace cpu_init(void) | |
: "r14"); | |
} | |
+#if NR_CPUS > 16 | |
u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; | |
+#else | |
+u32 __cpu_logical_map[16] = { [0 ... 15] = MPIDR_INVALID }; | |
+#endif | |
void __init smp_setup_processor_id(void) | |
{ | |
int i; | |
u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; | |
u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
+ u32 max = cpu + 1 > nr_cpu_ids ? cpu + 1 : nr_cpu_ids; | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ /* printk on I-pipe needs per cpu data */ | |
+ set_my_cpu_offset(per_cpu_offset(0)); | |
+#endif | |
+ BUG_ON(max > ARRAY_SIZE(__cpu_logical_map)); | |
cpu_logical_map(0) = cpu; | |
- for (i = 1; i < nr_cpu_ids; ++i) | |
+ for (i = 1; i < max; ++i) | |
cpu_logical_map(i) = i == cpu ? 0 : i; | |
printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); | |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c | |
index 5a42c12..a8515cd 100644 | |
--- a/arch/arm/kernel/signal.c | |
+++ b/arch/arm/kernel/signal.c | |
@@ -585,11 +585,13 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |
{ | |
do { | |
if (likely(thread_flags & _TIF_NEED_RESCHED)) { | |
+ local_irq_disable(); | |
+ hard_cond_local_irq_enable(); | |
schedule(); | |
} else { | |
if (unlikely(!user_mode(regs))) | |
return 0; | |
- local_irq_enable(); | |
+ hard_local_irq_enable(); | |
if (thread_flags & _TIF_SIGPENDING) { | |
int restart = do_signal(regs, syscall); | |
if (unlikely(restart)) { | |
@@ -606,7 +608,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |
tracehook_notify_resume(regs); | |
} | |
} | |
- local_irq_disable(); | |
+ hard_local_irq_disable(); | |
thread_flags = current_thread_info()->flags; | |
} while (thread_flags & _TIF_WORK_MASK); | |
return 0; | |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c | |
index 5919eb4..0269db0 100644 | |
--- a/arch/arm/kernel/smp.c | |
+++ b/arch/arm/kernel/smp.c | |
@@ -66,8 +66,24 @@ enum ipi_msg_type { | |
IPI_CALL_FUNC, | |
IPI_CALL_FUNC_SINGLE, | |
IPI_CPU_STOP, | |
+ IPI_CPU_DUMP, | |
+#ifdef CONFIG_IPIPE | |
+ IPI_IPIPE_FIRST, | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
+#ifdef CONFIG_IPIPE | |
+#define noipipe_irq_enter() \ | |
+ do { \ | |
+ } while(0) | |
+#define noipipe_irq_exit() \ | |
+ do { \ | |
+ } while(0) | |
+#else /* !CONFIG_IPIPE */ | |
+#define noipipe_irq_enter() irq_enter() | |
+#define noipipe_irq_exit() irq_exit() | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
static DECLARE_COMPLETION(cpu_running); | |
static struct smp_operations smp_ops; | |
@@ -318,10 +334,17 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |
* The identity mapping is uncached (strongly ordered), so | |
* switch away from it before attempting any exclusive accesses. | |
*/ | |
- cpu_switch_mm(mm->pgd, mm); | |
+ cpu_switch_mm(mm->pgd, mm, 1); | |
local_flush_bp_all(); | |
enter_lazy_tlb(mm, current); | |
local_flush_tlb_all(); | |
+#ifdef CONFIG_IPIPE | |
+ /* | |
+ * With CONFIG_IPIPE debug_smp_processor_id requires access | |
+ * to percpu data. | |
+ */ | |
+ set_my_cpu_offset(per_cpu_offset(ipipe_processor_id())); | |
+#endif | |
/* | |
* All kernel threads share the same mm context; grab a | |
@@ -497,6 +520,95 @@ u64 smp_irq_stat_cpu(unsigned int cpu) | |
static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); | |
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | |
+ | |
+static inline void ipi_timer(void) | |
+{ | |
+#ifdef CONFIG_IPIPE | |
+#ifndef CONFIG_IPIPE_ARM_KUSER_TSC | |
+ __ipipe_mach_update_tsc(); | |
+#else /* CONFIG_IPIPE_ARM_KUSER_TSC */ | |
+ __ipipe_tsc_update(); | |
+#endif /* CONFIG_IPIPE_ARM_KUSER_TSC */ | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+ tick_receive_broadcast(); | |
+} | |
+ | |
+#endif | |
+ | |
+#ifdef CONFIG_IPIPE | |
+#define IPIPE_IPI_BASE IPIPE_VIRQ_BASE | |
+ | |
+unsigned __ipipe_first_ipi; | |
+EXPORT_SYMBOL_GPL(__ipipe_first_ipi); | |
+ | |
+static void __ipipe_do_IPI(unsigned virq, void *cookie) | |
+{ | |
+ enum ipi_msg_type msg = virq - IPIPE_IPI_BASE; | |
+ handle_IPI(msg, __this_cpu_ptr(&ipipe_percpu.tick_regs)); | |
+} | |
+ | |
+void __ipipe_ipis_alloc(void) | |
+{ | |
+ unsigned virq, _virq; | |
+ unsigned ipi_nr; | |
+ | |
+ if (__ipipe_first_ipi) | |
+ return; | |
+ | |
+ /* __ipipe_first_ipi is 0 here */ | |
+ ipi_nr = IPI_IPIPE_FIRST + IPIPE_LAST_IPI + 1; | |
+ | |
+ for (virq = IPIPE_IPI_BASE; virq < IPIPE_IPI_BASE + ipi_nr; virq++) { | |
+ _virq = ipipe_alloc_virq(); | |
+ if (virq != _virq) | |
+ panic("I-pipe: cannot reserve virq #%d (got #%d)\n", | |
+ virq, _virq); | |
+ | |
+ if (virq - IPIPE_IPI_BASE == IPI_IPIPE_FIRST) | |
+ __ipipe_first_ipi = virq; | |
+ } | |
+} | |
+ | |
+void __ipipe_ipis_request(void) | |
+{ | |
+ unsigned virq; | |
+ | |
+ for (virq = IPIPE_IPI_BASE; virq < __ipipe_first_ipi; virq++) | |
+ ipipe_request_irq(ipipe_root_domain, | |
+ virq, | |
+ (ipipe_irq_handler_t)__ipipe_do_IPI, | |
+ NULL, NULL); | |
+} | |
+void ipipe_send_ipi(unsigned ipi, cpumask_t cpumask) | |
+{ | |
+ enum ipi_msg_type msg = ipi - IPIPE_IPI_BASE; | |
+ smp_cross_call(&cpumask, msg); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_send_ipi); | |
+ | |
+ /* hw IRQs off */ | |
+asmlinkage void __exception __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs) | |
+{ | |
+ int virq = IPIPE_IPI_BASE + svc; | |
+ | |
+ /* | |
+ * Virtual NMIs ignore the root domain's stall | |
+ * bit. When caught over high priority | |
+ * domains, virtual VMIs are pipelined the | |
+ * usual way as normal interrupts. | |
+ */ | |
+ if (virq == IPIPE_SERVICE_VNMI && __ipipe_root_p) | |
+ __ipipe_do_vnmi(IPIPE_SERVICE_VNMI, NULL); | |
+ else | |
+ __ipipe_dispatch_irq(virq, IPIPE_IRQF_NOACK); | |
+ | |
+ __ipipe_exit_irq(regs); | |
+} | |
+ | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | |
void tick_broadcast(const struct cpumask *mask) | |
{ | |
smp_cross_call(mask, IPI_TIMER); | |
@@ -610,9 +722,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs) | |
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | |
case IPI_TIMER: | |
- irq_enter(); | |
- tick_receive_broadcast(); | |
- irq_exit(); | |
+ noipipe_irq_enter(); | |
+ ipi_timer(); | |
+ noipipe_irq_exit(); | |
break; | |
#endif | |
@@ -621,21 +733,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs) | |
break; | |
case IPI_CALL_FUNC: | |
- irq_enter(); | |
+ noipipe_irq_enter(); | |
generic_smp_call_function_interrupt(); | |
- irq_exit(); | |
+ noipipe_irq_exit(); | |
break; | |
case IPI_CALL_FUNC_SINGLE: | |
- irq_enter(); | |
+ noipipe_irq_enter(); | |
generic_smp_call_function_single_interrupt(); | |
- irq_exit(); | |
+ noipipe_irq_exit(); | |
break; | |
case IPI_CPU_STOP: | |
- irq_enter(); | |
+ noipipe_irq_enter(); | |
ipi_cpu_stop(cpu); | |
- irq_exit(); | |
+ noipipe_irq_exit(); | |
break; | |
default: | |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c | |
index f6fd1d4..0725d7d 100644 | |
--- a/arch/arm/kernel/smp_twd.c | |
+++ b/arch/arm/kernel/smp_twd.c | |
@@ -21,13 +21,18 @@ | |
#include <linux/io.h> | |
#include <linux/of_irq.h> | |
#include <linux/of_address.h> | |
+#include <linux/ipipe_tickdev.h> | |
+#include <linux/irqchip/arm-gic.h> | |
#include <asm/smp_plat.h> | |
#include <asm/smp_twd.h> | |
#include <asm/localtimer.h> | |
+#include <asm/cputype.h> | |
+#include <asm/ipipe.h> | |
/* set up by the platform code */ | |
static void __iomem *twd_base; | |
+static struct clk *twd_clk; | |
static struct clk *twd_clk; | |
static unsigned long twd_timer_rate; | |
@@ -36,6 +41,71 @@ static DEFINE_PER_CPU(bool, percpu_setup_called); | |
static struct clock_event_device __percpu **twd_evt; | |
static int twd_ppi; | |
+#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP) | |
+static DEFINE_PER_CPU(struct ipipe_timer, twd_itimer); | |
+ | |
+void __iomem *gt_base; | |
+ | |
+static void twd_ack(void) | |
+{ | |
+ writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT); | |
+} | |
+ | |
+static struct __ipipe_tscinfo tsc_info; | |
+ | |
+static void twd_get_clock(struct device_node *np); | |
+static void __cpuinit twd_calibrate_rate(void); | |
+ | |
+static void __cpuinit gt_setup(unsigned long base_paddr, unsigned bits) | |
+{ | |
+ if ((read_cpuid_id() & 0xf00000) == 0) | |
+ return; | |
+ | |
+ gt_base = ioremap(base_paddr, SZ_256); | |
+ BUG_ON(!gt_base); | |
+ | |
+ /* Start global timer */ | |
+ __raw_writel(1, gt_base + 0x8); | |
+ | |
+ tsc_info.type = IPIPE_TSC_TYPE_FREERUNNING; | |
+ tsc_info.freq = twd_timer_rate; | |
+ tsc_info.counter_vaddr = (unsigned long)gt_base; | |
+ tsc_info.u.counter_paddr = base_paddr; | |
+ | |
+ switch(bits) { | |
+ case 64: | |
+ tsc_info.u.mask = 0xffffffffffffffffULL; | |
+ break; | |
+ case 32: | |
+ tsc_info.u.mask = 0xffffffff; | |
+ break; | |
+ default: | |
+ /* Only supported as a 32 bits or 64 bits */ | |
+ BUG(); | |
+ } | |
+ | |
+ __ipipe_tsc_register(&tsc_info); | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+ | |
+static DEFINE_PER_CPU(int, irqs); | |
+ | |
+void twd_hrtimer_debug(unsigned int irq) /* hw interrupt off */ | |
+{ | |
+ int cpu = ipipe_processor_id(); | |
+ | |
+ if ((++per_cpu(irqs, cpu) % HZ) == 0) { | |
+#if 0 | |
+ __ipipe_serial_debug("%c", 'A' + cpu); | |
+#else | |
+ do { } while (0); | |
+#endif | |
+ } | |
+} | |
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */ | |
+#endif /* CONFIG_IPIPE && CONFIG_SMP */ | |
+ | |
static void twd_set_mode(enum clock_event_mode mode, | |
struct clock_event_device *clk) | |
{ | |
@@ -230,7 +300,12 @@ static irqreturn_t twd_handler(int irq, void *dev_id) | |
{ | |
struct clock_event_device *evt = *(struct clock_event_device **)dev_id; | |
+ if (clockevent_ipipe_stolen(evt)) | |
+ goto handle; | |
+ | |
if (twd_timer_ack()) { | |
+ handle: | |
+ __ipipe_tsc_update(); | |
evt->event_handler(evt); | |
return IRQ_HANDLED; | |
} | |
@@ -298,6 +373,16 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk) | |
clk->set_next_event = twd_set_next_event; | |
clk->irq = twd_ppi; | |
+#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP) | |
+ printk(KERN_INFO "I-pipe, %lu.%03lu MHz timer\n", | |
+ twd_timer_rate / 1000000, | |
+ (twd_timer_rate % 1000000) / 1000); | |
+ clk->ipipe_timer = __this_cpu_ptr(&twd_itimer); | |
+ clk->ipipe_timer->irq = clk->irq; | |
+ clk->ipipe_timer->ack = twd_ack; | |
+ clk->ipipe_timer->min_delay_ticks = 0xf; | |
+#endif | |
+ | |
this_cpu_clk = __this_cpu_ptr(twd_evt); | |
*this_cpu_clk = clk; | |
@@ -335,6 +420,13 @@ static int __init twd_local_timer_common_register(struct device_node *np) | |
twd_get_clock(np); | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+ if (twd_timer_rate == 0) | |
+ twd_calibrate_rate(); | |
+ | |
+ __ipipe_mach_hrtimer_debug = &twd_hrtimer_debug; | |
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */ | |
+ | |
return 0; | |
out_irq: | |
@@ -349,6 +441,8 @@ out_free: | |
int __init twd_local_timer_register(struct twd_local_timer *tlt) | |
{ | |
+ int rc; | |
+ | |
if (twd_base || twd_evt) | |
return -EBUSY; | |
@@ -358,7 +452,14 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt) | |
if (!twd_base) | |
return -ENOMEM; | |
- return twd_local_timer_common_register(NULL); | |
+ | |
+ rc = twd_local_timer_common_register(NULL); | |
+ if (rc == 0) | |
+#ifdef CONFIG_IPIPE | |
+ gt_setup(tlt->res[0].start - 0x400, 32); | |
+#endif | |
+ | |
+ return rc; | |
} | |
#ifdef CONFIG_OF | |
@@ -381,7 +482,18 @@ static void __init twd_local_timer_of_register(struct device_node *np) | |
goto out; | |
} | |
+ | |
err = twd_local_timer_common_register(np); | |
+#ifdef CONFIG_IPIPE | |
+ if (err == 0) { | |
+ struct resource res; | |
+ | |
+ if (of_address_to_resource(np, 0, &res)) | |
+ res.start = 0; | |
+ | |
+ gt_setup(res.start - 0x400, 32); | |
+ } | |
+#endif /* CONFIG_IPIPE */ | |
out: | |
WARN(err, "twd_local_timer_of_register failed (%d)\n", err); | |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c | |
index c59c97e..280735a 100644 | |
--- a/arch/arm/kernel/suspend.c | |
+++ b/arch/arm/kernel/suspend.c | |
@@ -67,7 +67,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | |
*/ | |
ret = __cpu_suspend(arg, fn); | |
if (ret == 0) { | |
- cpu_switch_mm(mm->pgd, mm); | |
+ cpu_switch_mm(mm->pgd, mm, 1); | |
local_flush_bp_all(); | |
local_flush_tlb_all(); | |
} | |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c | |
index 6b9567e..f5d15a9 100644 | |
--- a/arch/arm/kernel/traps.c | |
+++ b/arch/arm/kernel/traps.c | |
@@ -25,6 +25,7 @@ | |
#include <linux/delay.h> | |
#include <linux/init.h> | |
#include <linux/sched.h> | |
+#include <linux/ipipe.h> | |
#include <linux/atomic.h> | |
#include <asm/cacheflush.h> | |
@@ -458,6 +459,14 @@ asmlinkage void do_unexp_fiq (struct pt_regs *regs) | |
*/ | |
asmlinkage void bad_mode(struct pt_regs *regs, int reason) | |
{ | |
+ if (__ipipe_report_trap(IPIPE_TRAP_UNKNOWN,regs)) | |
+ return; | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ ipipe_stall_root(); | |
+ hard_local_irq_enable(); | |
+#endif | |
+ | |
console_verbose(); | |
printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]); | |
@@ -465,6 +474,11 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason) | |
die("Oops - bad mode", regs, 0); | |
local_irq_disable(); | |
panic("bad mode"); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ hard_local_irq_disable(); | |
+ __ipipe_root_status &= ~IPIPE_STALL_FLAG; | |
+#endif | |
} | |
static int bad_syscall(int n, struct pt_regs *regs) | |
@@ -801,10 +815,20 @@ void __init trap_init(void) | |
#ifdef CONFIG_KUSER_HELPERS | |
static void __init kuser_init(void *vectors) | |
{ | |
+#ifndef CONFIG_IPIPE | |
extern char __kuser_helper_start[], __kuser_helper_end[]; | |
int kuser_sz = __kuser_helper_end - __kuser_helper_start; | |
- | |
- memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | |
+#else /* !CONFIG_IPIPE */ | |
+ extern char __ipipe_tsc_area_start[], __kuser_helper_end[]; | |
+ int kuser_sz = __kuser_helper_end - __ipipe_tsc_area_start; | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
+#ifndef CONFIG_IPIPE | |
+ memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | |
+#else /* CONFIG_IPIPE */ | |
+ BUG_ON(0x1000 - kuser_sz < 0x200); | |
+ memcpy((void *)vectors + 0x1000 - kuser_sz, __ipipe_tsc_area_start, kuser_sz); | |
+#endif /* CONFIG_IPIPE */ | |
/* | |
* vectors + 0xfe0 = __kuser_get_tls | |
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig | |
index 0280238..7f75be9 100644 | |
--- a/arch/arm/mach-at91/Kconfig | |
+++ b/arch/arm/mach-at91/Kconfig | |
@@ -206,6 +206,17 @@ config AT91_TIMER_HZ | |
system clock (of at least several MHz), rounding is less of a | |
problem so it can be safer to use a decimal values like 100. | |
+comment "Adeos I-pipe Options" | |
+ | |
+config IPIPE_AT91_TC | |
+ depends on IPIPE | |
+ int "AT91 TC used as time base by Adeos I-pipe" | |
+ default 0 | |
+ help | |
+ When Adeos interrupt pipeline is enabled, TC0 is used by default | |
+ as time base, but you can use TC1 or TC2 by setting this variable to 1 | |
+ or 2. This should only be needed to avoid conflicts with other drivers. | |
+ | |
endmenu | |
endif | |
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile | |
index 788562d..cf125f1 100644 | |
--- a/arch/arm/mach-at91/Makefile | |
+++ b/arch/arm/mach-at91/Makefile | |
@@ -109,3 +109,5 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o | |
ifeq ($(CONFIG_PM_DEBUG),y) | |
CFLAGS_pm.o += -DDEBUG | |
endif | |
+ | |
+obj-$(CONFIG_IPIPE) += at91_ipipe.o | |
diff --git a/arch/arm/mach-at91/at91_ipipe.c b/arch/arm/mach-at91/at91_ipipe.c | |
new file mode 100644 | |
index 0000000..01058dc | |
--- /dev/null | |
+++ b/arch/arm/mach-at91/at91_ipipe.c | |
@@ -0,0 +1,260 @@ | |
+/* | |
+ * linux/arch/arm/mach-at91/at91_ipipe.c | |
+ * | |
+ * Copyright (C) 2007 Gilles Chanteperdrix <[email protected]> | |
+ * | |
+ * Adaptation to AT91SAM926x: | |
+ * Copyright (C) 2007 Gregory CLEMENT, Adeneo | |
+ * | |
+ * Adaptation to AT91SAM9G45: | |
+ * Copyright (C) 2011 Gregory CLEMENT, Free Electrons | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License version 2 as | |
+ * published by the Free Software Foundation. | |
+ */ | |
+ | |
+#include <linux/interrupt.h> | |
+#include <linux/irq.h> | |
+#include <linux/kernel.h> | |
+#include <linux/clockchips.h> | |
+#include <linux/clk.h> | |
+#include <linux/stringify.h> | |
+#include <linux/err.h> | |
+#include <linux/console.h> | |
+#include <linux/module.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/export.h> | |
+#include <linux/ipipe_tickdev.h> | |
+ | |
+#include <asm/io.h> | |
+#include <asm/mach/time.h> | |
+ | |
+#include <mach/hardware.h> | |
+#include "at91_tc.h" | |
+#include "at91_ipipe.h" | |
+#include "clock.h" | |
+ | |
+#if defined(CONFIG_ARCH_AT91RM9200) | |
+#define AT91_ID_TC0 AT91RM9200_ID_TC0 | |
+#define AT91_ID_TC1 AT91RM9200_ID_TC1 | |
+#define AT91_ID_TC2 AT91RM9200_ID_TC2 | |
+#elif defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9G20) | |
+#define AT91_ID_TC0 AT91SAM9260_ID_TC0 | |
+#define AT91_ID_TC1 AT91SAM9260_ID_TC1 | |
+#define AT91_ID_TC2 AT91SAM9260_ID_TC2 | |
+#elif defined(CONFIG_ARCH_AT91SAM9261) | |
+#define AT91_ID_TC0 AT91SAM9261_ID_TC0 | |
+#define AT91_ID_TC1 AT91SAM9261_ID_TC1 | |
+#define AT91_ID_TC2 AT91SAM9261_ID_TC2 | |
+#elif defined(CONFIG_ARCH_AT91SAM9263) | |
+#define AT91_ID_TC0 AT91SAM9263_ID_TCB | |
+#define AT91_ID_TC1 AT91SAM9263_ID_TCB | |
+#define AT91_ID_TC2 AT91SAM9263_ID_TCB | |
+#elif defined(CONFIG_ARCH_AT91SAM9RL) | |
+#define AT91_ID_TC0 AT91SAM9RL_ID_TC0 | |
+#define AT91_ID_TC1 AT91SAM9RL_ID_TC1 | |
+#define AT91_ID_TC2 AT91SAM9RL_ID_TC2 | |
+#elif defined(CONFIG_ARCH_AT91X40) | |
+#define AT91_ID_TC0 AT91X40_ID_TC0 | |
+#define AT91_ID_TC1 AT91X40_ID_TC1 | |
+#define AT91_ID_TC2 AT91X40_ID_TC2 | |
+#elif defined(CONFIG_ARCH_AT91SAM9G45) | |
+#define AT91_ID_TC0 AT91SAM9G45_ID_TCB | |
+#define AT91_ID_TC1 AT91SAM9G45_ID_TCB | |
+#define AT91_ID_TC2 AT91SAM9G45_ID_TCB | |
+#else | |
+#error "AT91 processor unsupported by Adeos" | |
+#endif | |
+ | |
+#if (CONFIG_IPIPE_AT91_TC==0) | |
+# define KERNEL_TIMER_IRQ_NUM AT91_ID_TC0 | |
+#elif (CONFIG_IPIPE_AT91_TC==1) | |
+# define KERNEL_TIMER_IRQ_NUM AT91_ID_TC1 | |
+#elif (CONFIG_IPIPE_AT91_TC==2) | |
+# define KERNEL_TIMER_IRQ_NUM AT91_ID_TC2 | |
+#else | |
+#error IPIPE_AT91_TC must be 0, 1 or 2. | |
+#endif | |
+ | |
+#define TCNXCNS(timer,v) ((v) << ((timer)<<1)) | |
+#define AT91_TC_REG_MASK (0xffff) | |
+#define AT91_TC_BASE ((unsigned long)AT91_VA_BASE_TCB0) | |
+ | |
+static unsigned max_delta_ticks; | |
+ | |
+static inline unsigned int at91_tc_read(unsigned int reg_offset) | |
+{ | |
+ unsigned long addr = (AT91_TC_BASE + 0x40 * CONFIG_IPIPE_AT91_TC); | |
+ | |
+ return __raw_readl((void __iomem *)(addr + reg_offset)); | |
+} | |
+ | |
+static inline void at91_tc_write(unsigned int reg_offset, unsigned long value) | |
+{ | |
+ unsigned long addr = (AT91_TC_BASE + 0x40 * CONFIG_IPIPE_AT91_TC); | |
+ | |
+ __raw_writel(value, (void __iomem *)(addr + reg_offset)); | |
+} | |
+ | |
+#define read_CV() at91_tc_read(AT91_TC_CV) | |
+#define read_RC() at91_tc_read(AT91_TC_RC) | |
+#define write_RC(value) at91_tc_write(AT91_TC_RC, value) | |
+ | |
+/* | |
+ * Reprogram the timer | |
+ */ | |
+static int at91_tc_set(unsigned long evt, void *timer); | |
+ | |
+/* | |
+ * IRQ handler for the timer. | |
+ */ | |
+static void at91_tc_ack(void) | |
+{ | |
+ at91_tc_read(AT91_TC_SR); | |
+} | |
+ | |
+static void at91_tc_request(struct ipipe_timer *timer, int steal) | |
+{ | |
+ /* Enable CPCS interrupt. */ | |
+ at91_tc_write(AT91_TC_IER, AT91_TC_CPCS); | |
+} | |
+ | |
+static void at91_tc_release(struct ipipe_timer *timer) | |
+{ | |
+ /* Disable all interrupts. */ | |
+ at91_tc_write(AT91_TC_IDR, ~0ul); | |
+} | |
+ | |
+static struct ipipe_timer at91_itimer = { | |
+ .irq = NR_IRQS_LEGACY + KERNEL_TIMER_IRQ_NUM, | |
+ .request = at91_tc_request, | |
+ .set = at91_tc_set, | |
+ .ack = at91_tc_ack, | |
+ .release = at91_tc_release, | |
+ | |
+ .name = "at91_tc" __stringify(CONFIG_IPIPE_AT91_TC), | |
+ .rating = 250, | |
+}; | |
+ | |
+static int at91_tc_set(unsigned long evt, void *timer) | |
+{ | |
+ unsigned short next_tick; | |
+ | |
+ if (evt > max_delta_ticks) | |
+ evt = max_delta_ticks; | |
+ | |
+ __ipipe_tsc_update(); | |
+ | |
+ next_tick = read_CV() + evt; | |
+ write_RC(next_tick); | |
+ if (evt >= AT91_TC_REG_MASK / 2 | |
+ || (short)(next_tick - read_CV()) > 0) | |
+ return 0; | |
+ | |
+ at91_itimer.min_delay_ticks = evt; | |
+ return -ETIME; | |
+} | |
+ | |
+static struct __ipipe_tscinfo tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING, | |
+ .counter_vaddr = (AT91_TC_BASE + | |
+ 0x40 * CONFIG_IPIPE_AT91_TC + AT91_TC_CV), | |
+ .u = { | |
+ { | |
+ .counter_paddr = (AT91_BASE_TCB0 + | |
+ 0x40 * CONFIG_IPIPE_AT91_TC + | |
+ AT91_TC_CV), | |
+ .mask = AT91_TC_REG_MASK, | |
+ }, | |
+ }, | |
+}; | |
+ | |
+void at91_ipipe_init(struct clock_event_device *host_timer) | |
+{ | |
+ unsigned char tc_divisors[] = { 2, 8, 32, 128, 0, }; | |
+ unsigned master_freq, divisor = 0, divided_freq = 0; | |
+ unsigned long long wrap_ns; | |
+ int tc_timer_clock; | |
+ unsigned short v; | |
+ struct clk *clk; | |
+ | |
+#ifdef CONFIG_ARCH_AT91SAM9263 | |
+ clk = clk_get(NULL, "tcb_clk"); | |
+#elif defined(CONFIG_ARCH_AT91SAM9G45) | |
+ clk = clk_get(NULL, "tcb0_clk"); | |
+#else /* not AT91SAM9263 or AT91SAM9G45*/ | |
+ clk = clk_get(NULL, "tc"__stringify(CONFIG_IPIPE_AT91_TC) "_clk"); | |
+#endif | |
+ clk_enable(clk); | |
+ | |
+ /* Disable the channel */ | |
+ at91_tc_write(AT91_TC_CCR, AT91_TC_CLKDIS); | |
+ | |
+ /* Disable all interrupts. */ | |
+ at91_tc_write(AT91_TC_IDR, ~0ul); | |
+ | |
+ master_freq = clk_get_rate(clk_get(NULL, "mck")); | |
+ /* Find the first frequency above 1 MHz */ | |
+ for (tc_timer_clock = ARRAY_SIZE(tc_divisors) - 1; | |
+ tc_timer_clock >= 0; tc_timer_clock--) { | |
+ divisor = tc_divisors[tc_timer_clock]; | |
+ divided_freq = (divisor | |
+ ? master_freq / divisor : AT91_SLOW_CLOCK); | |
+ if (divided_freq > 1000000) | |
+ break; | |
+ } | |
+ | |
+ wrap_ns = (unsigned long long) (AT91_TC_REG_MASK + 1) * NSEC_PER_SEC; | |
+ do_div(wrap_ns, divided_freq); | |
+ | |
+ if (divided_freq < 1000000) | |
+ printk(KERN_INFO "AT91 I-pipe warning: could not find a" | |
+ " frequency greater than 1MHz\n"); | |
+ | |
+ printk(KERN_INFO "AT91 I-pipe timer: div: %u, freq: %u.%06u MHz, wrap: " | |
+ "%u.%06u ms\n", divisor, | |
+ divided_freq / 1000000, divided_freq % 1000000, | |
+ (unsigned) wrap_ns / 1000000, (unsigned) wrap_ns % 1000000); | |
+ | |
+ /* Add a 1ms margin. It means that when an interrupt occurs, update_tsc | |
+ must be called within 1ms. update_tsc is called by acktimer when no | |
+ higher domain handles the timer, and called through set_dec when a | |
+ higher domain handles the timer. */ | |
+ wrap_ns -= 1000000; | |
+ /* Set up the interrupt. */ | |
+ | |
+ if (host_timer && host_timer->features & CLOCK_EVT_FEAT_ONESHOT | |
+ && host_timer->max_delta_ns > wrap_ns) | |
+ host_timer->max_delta_ns = wrap_ns; | |
+ | |
+ /* No Sync. */ | |
+ at91_tc_write(AT91_TC_BCR, 0); | |
+ | |
+ /* program NO signal on XCN */ | |
+ v = __raw_readl((void __iomem *) (AT91_VA_BASE_TCB0 + AT91_TC_BMR)); | |
+ v &= ~TCNXCNS(CONFIG_IPIPE_AT91_TC, 3); | |
+ v |= TCNXCNS(CONFIG_IPIPE_AT91_TC, 1); /* AT91_TC_TCNXCNS_NONE */ | |
+ __raw_writel(v, (void __iomem *) (AT91_VA_BASE_TCB0 + AT91_TC_BMR)); | |
+ | |
+ /* Use the clock selected as input clock. */ | |
+ at91_tc_write(AT91_TC_CMR, tc_timer_clock); | |
+ | |
+ /* Load the TC register C. */ | |
+ write_RC(0xffff); | |
+ | |
+ /* Enable the channel. */ | |
+ at91_tc_write(AT91_TC_CCR, AT91_TC_CLKEN | AT91_TC_SWTRG); | |
+ | |
+ at91_itimer.freq = divided_freq; | |
+ at91_itimer.min_delay_ticks = ipipe_timer_ns2ticks(&at91_itimer, 2000); | |
+ max_delta_ticks = ipipe_timer_ns2ticks(&at91_itimer, wrap_ns); | |
+ ipipe_timer_register(&at91_itimer); | |
+ | |
+ tsc_info.freq = divided_freq; | |
+ __ipipe_tsc_register(&tsc_info); | |
+ | |
+#if 1 | |
+ at91_pic_muter_register(); | |
+#endif | |
+} | |
diff --git a/arch/arm/mach-at91/at91_ipipe.h b/arch/arm/mach-at91/at91_ipipe.h | |
new file mode 100644 | |
index 0000000..5eba5d91 | |
--- /dev/null | |
+++ b/arch/arm/mach-at91/at91_ipipe.h | |
@@ -0,0 +1,19 @@ | |
+#ifndef AT91_IPIPE_H | |
+#define AT91_IPIPE_H | |
+ | |
+#include <linux/ipipe.h> | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+struct clock_event_device; | |
+void at91_ipipe_init(struct clock_event_device *host_timer); | |
+ | |
+void at91_pic_muter_register(void); | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+#define at91_ipipe_init(dev) do { } while (0) | |
+ | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+#endif /* AT91_IPIPE_TIME_H */ | |
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c | |
index d193a40..a147917 100644 | |
--- a/arch/arm/mach-at91/at91rm9200.c | |
+++ b/arch/arm/mach-at91/at91rm9200.c | |
@@ -150,6 +150,17 @@ static struct clk tc5_clk = { | |
.type = CLK_TYPE_PERIPHERAL, | |
}; | |
+static struct map_desc at91rm9200_io_desc[] __initdata = { | |
+#ifdef CONFIG_IPIPE | |
+ { | |
+ .virtual = (unsigned long)AT91_VA_BASE_TCB0, | |
+ .pfn = __phys_to_pfn(AT91_BASE_TCB0), | |
+ .length = SZ_16K, | |
+ .type = MT_DEVICE, | |
+ }, | |
+#endif /* CONFIG_IPIPE */ | |
+}; | |
+ | |
static struct clk *periph_clocks[] __initdata = { | |
&pioA_clk, | |
&pioB_clk, | |
@@ -320,6 +331,9 @@ static void __init at91rm9200_map_io(void) | |
{ | |
/* Map peripherals */ | |
at91_init_sram(0, AT91RM9200_SRAM_BASE, AT91RM9200_SRAM_SIZE); | |
+#ifdef CONFIG_IPIPE | |
+ iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc)); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
static void __init at91rm9200_ioremap_registers(void) | |
@@ -351,6 +365,7 @@ static void __init at91rm9200_initialize(void) | |
* The default interrupt priority levels (0 = lowest, 7 = highest). | |
*/ | |
static unsigned int at91rm9200_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
+#ifndef CONFIG_IPIPE | |
7, /* Advanced Interrupt Controller (FIQ) */ | |
7, /* System Peripherals */ | |
1, /* Parallel IO Controller A */ | |
@@ -383,6 +398,42 @@ static unsigned int at91rm9200_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
0, /* Advanced Interrupt Controller (IRQ4) */ | |
0, /* Advanced Interrupt Controller (IRQ5) */ | |
0 /* Advanced Interrupt Controller (IRQ6) */ | |
+#else /* CONFIG_IPIPE */ | |
+/* Give the highest priority to TC, since they are used as timer interrupt by | |
+ I-pipe. */ | |
+ 7, /* Advanced Interrupt Controller */ | |
+ 6, /* System Peripheral */ | |
+ 0, /* Parallel IO Controller A */ | |
+ 0, /* Parallel IO Controller B */ | |
+ 0, /* Parallel IO Controller C */ | |
+ 0, /* Parallel IO Controller D */ | |
+ 5, /* USART 0 */ | |
+ 5, /* USART 1 */ | |
+ 5, /* USART 2 */ | |
+ 5, /* USART 3 */ | |
+ 0, /* Multimedia Card Interface */ | |
+ 3, /* USB Device Port */ | |
+ 0, /* Two-Wire Interface */ | |
+ 5, /* Serial Peripheral Interface */ | |
+ 4, /* Serial Synchronous Controller */ | |
+ 4, /* Serial Synchronous Controller */ | |
+ 4, /* Serial Synchronous Controller */ | |
+ 7, /* Timer Counter 0 */ | |
+ 7, /* Timer Counter 1 */ | |
+ 7, /* Timer Counter 2 */ | |
+ 0, /* Timer Counter 3 */ | |
+ 0, /* Timer Counter 4 */ | |
+ 0, /* Timer Counter 5 */ | |
+ 2, /* USB Host port */ | |
+ 2, /* Ethernet MAC */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0 /* Advanced Interrupt Controller */ | |
+#endif /*CONFIG_IPIPE */ | |
}; | |
AT91_SOC_START(at91rm9200) | |
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c | |
index 180b302..f0875b4 100644 | |
--- a/arch/arm/mach-at91/at91rm9200_time.c | |
+++ b/arch/arm/mach-at91/at91rm9200_time.c | |
@@ -31,6 +31,7 @@ | |
#include <asm/mach/time.h> | |
#include <mach/at91_st.h> | |
+#include "at91_ipipe.h" | |
static unsigned long last_crtr; | |
static u32 irqmask; | |
@@ -64,6 +65,8 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id) | |
{ | |
u32 sr = at91_st_read(AT91_ST_SR) & irqmask; | |
+ __ipipe_tsc_update(); | |
+ | |
/* | |
* irqs should be disabled here, but as the irq is shared they are only | |
* guaranteed to be off if the timer irq is registered first. | |
@@ -269,6 +272,7 @@ void __init at91rm9200_timer_init(void) | |
clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); | |
clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; | |
clkevt.cpumask = cpumask_of(0); | |
+ at91_ipipe_init(&clkevt); | |
clockevents_register_device(&clkevt); | |
/* register clocksource */ | |
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c | |
index a8ce245..543304e 100644 | |
--- a/arch/arm/mach-at91/at91sam9260.c | |
+++ b/arch/arm/mach-at91/at91sam9260.c | |
@@ -29,6 +29,17 @@ | |
#include "clock.h" | |
#include "sam9_smc.h" | |
+static struct map_desc at91sam9260_io_desc[] __initdata = { | |
+#ifdef CONFIG_IPIPE | |
+ { | |
+ .virtual = (unsigned long)AT91_VA_BASE_TCB0, | |
+ .pfn = __phys_to_pfn(AT91_BASE_TCB0), | |
+ .length = SZ_16K, | |
+ .type = MT_DEVICE, | |
+ }, | |
+#endif /* CONFIG_IPIPE */ | |
+}; | |
+ | |
/* -------------------------------------------------------------------- | |
* Clocks | |
* -------------------------------------------------------------------- */ | |
@@ -322,6 +333,10 @@ static void __init at91sam9xe_map_io(void) | |
} | |
at91_init_sram(0, AT91SAM9XE_SRAM_BASE, sram_size); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ iotable_init(at91sam9260_io_desc, ARRAY_SIZE(at91sam9260_io_desc)); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
static void __init at91sam9260_map_io(void) | |
@@ -363,6 +378,7 @@ static void __init at91sam9260_initialize(void) | |
* The default interrupt priority levels (0 = lowest, 7 = highest). | |
*/ | |
static unsigned int at91sam9260_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
+#ifndef CONFIG_IPIPE | |
7, /* Advanced Interrupt Controller */ | |
7, /* System Peripherals */ | |
1, /* Parallel IO Controller A */ | |
@@ -395,6 +411,42 @@ static unsigned int at91sam9260_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
0, /* Advanced Interrupt Controller */ | |
0, /* Advanced Interrupt Controller */ | |
0, /* Advanced Interrupt Controller */ | |
+#else /* CONFIG_IPIPE */ | |
+/* Give the highest priority to TC, since they are used as timer interrupt by | |
+ I-pipe. */ | |
+ 7, /* Advanced Interrupt Controller */ | |
+ 7, /* System Peripherals */ | |
+ 0, /* Parallel IO Controller A */ | |
+ 0, /* Parallel IO Controller B */ | |
+ 0, /* Parallel IO Controller C */ | |
+ 0, /* Analog-to-Digital Converter */ | |
+ 6, /* USART 0 */ | |
+ 6, /* USART 1 */ | |
+ 6, /* USART 2 */ | |
+ 0, /* Multimedia Card Interface */ | |
+ 4, /* USB Device Port */ | |
+ 0, /* Two-Wire Interface */ | |
+ 6, /* Serial Peripheral Interface 0 */ | |
+ 6, /* Serial Peripheral Interface 1 */ | |
+ 5, /* Serial Synchronous Controller */ | |
+ 0, | |
+ 0, | |
+ 7, /* Timer Counter 0 */ | |
+ 7, /* Timer Counter 1 */ | |
+ 7, /* Timer Counter 2 */ | |
+ 3, /* USB Host port */ | |
+ 3, /* Ethernet */ | |
+ 0, /* Image Sensor Interface */ | |
+ 6, /* USART 3 */ | |
+ 6, /* USART 4 */ | |
+ 6, /* USART 5 */ | |
+ 7, /* Timer Counter 3 */ | |
+ 7, /* Timer Counter 4 */ | |
+ 7, /* Timer Counter 5 */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+#endif /*CONFIG_IPIPE */ | |
}; | |
AT91_SOC_START(at91sam9260) | |
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c | |
index 25efb5a..fb91de6 100644 | |
--- a/arch/arm/mach-at91/at91sam9261.c | |
+++ b/arch/arm/mach-at91/at91sam9261.c | |
@@ -28,6 +28,17 @@ | |
#include "clock.h" | |
#include "sam9_smc.h" | |
+static struct map_desc at91sam9261_io_desc[] __initdata = { | |
+#ifdef CONFIG_IPIPE | |
+ { | |
+ .virtual = AT91_VA_BASE_TCB0, | |
+ .pfn = __phys_to_pfn(AT91_BASE_TCB0), | |
+ .length = SZ_16K, | |
+ .type = MT_DEVICE, | |
+ }, | |
+#endif /* CONFIG_IPIPE */ | |
+}; | |
+ | |
/* -------------------------------------------------------------------- | |
* Clocks | |
* -------------------------------------------------------------------- */ | |
@@ -274,6 +285,9 @@ static void __init at91sam9261_map_io(void) | |
at91_init_sram(0, AT91SAM9G10_SRAM_BASE, AT91SAM9G10_SRAM_SIZE); | |
else | |
at91_init_sram(0, AT91SAM9261_SRAM_BASE, AT91SAM9261_SRAM_SIZE); | |
+#ifdef CONFIG_IPIPE | |
+ iotable_init(at91sam9261_io_desc, ARRAY_SIZE(at91sam9261_io_desc)); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
static void __init at91sam9261_ioremap_registers(void) | |
@@ -305,6 +319,7 @@ static void __init at91sam9261_initialize(void) | |
* The default interrupt priority levels (0 = lowest, 7 = highest). | |
*/ | |
static unsigned int at91sam9261_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
+#ifndef CONFIG_IPIPE | |
7, /* Advanced Interrupt Controller */ | |
7, /* System Peripherals */ | |
1, /* Parallel IO Controller A */ | |
@@ -337,6 +352,42 @@ static unsigned int at91sam9261_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
0, /* Advanced Interrupt Controller */ | |
0, /* Advanced Interrupt Controller */ | |
0, /* Advanced Interrupt Controller */ | |
+#else /* CONFIG_IPIPE */ | |
+/* Give the highest priority to TC, since they are used as timer interrupt by | |
+ I-pipe. */ | |
+ 7, /* Advanced Interrupt Controller */ | |
+ 7, /* System Peripherals */ | |
+ 0, /* Parallel IO Controller A */ | |
+ 0, /* Parallel IO Controller B */ | |
+ 0, /* Parallel IO Controller C */ | |
+ 0, | |
+ 6, /* USART 0 */ | |
+ 6, /* USART 1 */ | |
+ 6, /* USART 2 */ | |
+ 0, /* Multimedia Card Interface */ | |
+ 4, /* USB Device Port */ | |
+ 0, /* Two-Wire Interface */ | |
+ 6, /* Serial Peripheral Interface 0 */ | |
+ 6, /* Serial Peripheral Interface 1 */ | |
+ 5, /* Serial Synchronous Controller 0 */ | |
+ 5, /* Serial Synchronous Controller 1 */ | |
+ 5, /* Serial Synchronous Controller 2 */ | |
+ 7, /* Timer Counter 0 */ | |
+ 7, /* Timer Counter 1 */ | |
+ 7, /* Timer Counter 2 */ | |
+ 3, /* USB Host port */ | |
+ 3, /* LCD Controller */ | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+ 0, /* Advanced Interrupt Controller */ | |
+#endif /*CONFIG_IPIPE */ | |
}; | |
AT91_SOC_START(at91sam9261) | |
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c | |
index f44ffd2..4f977c7 100644 | |
--- a/arch/arm/mach-at91/at91sam9263.c | |
+++ b/arch/arm/mach-at91/at91sam9263.c | |
@@ -27,6 +27,17 @@ | |
#include "clock.h" | |
#include "sam9_smc.h" | |
+static struct map_desc at91sam9263_io_desc[] __initdata = { | |
+#ifdef CONFIG_IPIPE | |
+ { | |
+ .virtual = AT91_VA_BASE_TCB0, | |
+ .pfn = __phys_to_pfn(AT91_BASE_TCB0), | |
+ .length = SZ_16K, | |
+ .type = MT_DEVICE, | |
+ }, | |
+#endif /* CONFIG_IPIPE */ | |
+}; | |
+ | |
/* -------------------------------------------------------------------- | |
* Clocks | |
* -------------------------------------------------------------------- */ | |
@@ -309,6 +320,9 @@ static void __init at91sam9263_map_io(void) | |
{ | |
at91_init_sram(0, AT91SAM9263_SRAM0_BASE, AT91SAM9263_SRAM0_SIZE); | |
at91_init_sram(1, AT91SAM9263_SRAM1_BASE, AT91SAM9263_SRAM1_SIZE); | |
+#ifdef CONFIG_IPIPE | |
+ iotable_init(at91sam9263_io_desc, ARRAY_SIZE(at91sam9263_io_desc)); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
static void __init at91sam9263_ioremap_registers(void) | |
@@ -341,6 +355,7 @@ static void __init at91sam9263_initialize(void) | |
* The default interrupt priority levels (0 = lowest, 7 = highest). | |
*/ | |
static unsigned int at91sam9263_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
+#ifndef CONFIG_IPIPE | |
7, /* Advanced Interrupt Controller (FIQ) */ | |
7, /* System Peripherals */ | |
1, /* Parallel IO Controller A */ | |
@@ -373,6 +388,42 @@ static unsigned int at91sam9263_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
2, /* USB Host port */ | |
0, /* Advanced Interrupt Controller (IRQ0) */ | |
0, /* Advanced Interrupt Controller (IRQ1) */ | |
+#else /* CONFIG_IPIPE */ | |
+/* Give the highest priority to TC, since they are used as timer interrupt by | |
+ I-pipe. */ | |
+ 7, /* Advanced Interrupt Controller (FIQ) */ | |
+ 6, /* System Peripherals */ | |
+ 0, /* Parallel IO Controller A */ | |
+ 0, /* Parallel IO Controller B */ | |
+ 0, /* Parallel IO Controller C, D and E */ | |
+ 0, | |
+ 0, | |
+ 5, /* USART 0 */ | |
+ 5, /* USART 1 */ | |
+ 5, /* USART 2 */ | |
+ 0, /* Multimedia Card Interface 0 */ | |
+ 0, /* Multimedia Card Interface 1 */ | |
+ 3, /* CAN */ | |
+ 0, /* Two-Wire Interface */ | |
+ 5, /* Serial Peripheral Interface 0 */ | |
+ 5, /* Serial Peripheral Interface 1 */ | |
+ 4, /* Serial Synchronous Controller 0 */ | |
+ 4, /* Serial Synchronous Controller 1 */ | |
+ 5, /* AC97 Controller */ | |
+ 7, /* Timer Counter 0, 1 and 2 */ | |
+ 0, /* Pulse Width Modulation Controller */ | |
+ 2, /* Ethernet */ | |
+ 0, | |
+ 0, /* 2D Graphic Engine */ | |
+ 2, /* USB Device Port */ | |
+ 0, /* Image Sensor Interface */ | |
+ 2, /* LDC Controller */ | |
+ 0, /* DMA Controller */ | |
+ 0, | |
+ 2, /* USB Host port */ | |
+ 0, /* Advanced Interrupt Controller (IRQ0) */ | |
+ 0, /* Advanced Interrupt Controller (IRQ1) */ | |
+#endif /*CONFIG_IPIPE */ | |
}; | |
AT91_SOC_START(at91sam9263) | |
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c | |
index 3a4bc2e..073152e 100644 | |
--- a/arch/arm/mach-at91/at91sam926x_time.c | |
+++ b/arch/arm/mach-at91/at91sam926x_time.c | |
@@ -19,6 +19,7 @@ | |
#include <linux/of_irq.h> | |
#include <asm/mach/time.h> | |
+#include "at91_ipipe.h" | |
#define AT91_PIT_MR 0x00 /* Mode Register */ | |
#define AT91_PIT_PITIEN (1 << 25) /* Timer Interrupt Enable */ | |
@@ -144,6 +145,8 @@ static struct clock_event_device pit_clkevt = { | |
*/ | |
static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id) | |
{ | |
+ __ipipe_tsc_update(); | |
+ | |
/* | |
* irqs should be disabled here, but as the irq is shared they are only | |
* guaranteed to be off if the timer irq is registered first. | |
@@ -260,6 +263,7 @@ void __init at91sam926x_pit_init(void) | |
/* Set up and register clockevents */ | |
pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); | |
pit_clkevt.cpumask = cpumask_of(0); | |
+ at91_ipipe_init(&pit_clkevt); | |
clockevents_register_device(&pit_clkevt); | |
} | |
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c | |
index 8b7fce0..00e96fb 100644 | |
--- a/arch/arm/mach-at91/at91sam9g45.c | |
+++ b/arch/arm/mach-at91/at91sam9g45.c | |
@@ -27,6 +27,17 @@ | |
#include "clock.h" | |
#include "sam9_smc.h" | |
+#ifdef CONFIG_IPIPE | |
+static struct map_desc at91sam9g45_io_desc[] __initdata = { | |
+ { | |
+ .virtual = AT91_VA_BASE_TCB0, | |
+ .pfn = __phys_to_pfn(AT91_BASE_TCB0), | |
+ .length = SZ_16K, | |
+ .type = MT_DEVICE, | |
+ }, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
/* -------------------------------------------------------------------- | |
* Clocks | |
* -------------------------------------------------------------------- */ | |
@@ -357,6 +368,9 @@ static struct at91_gpio_bank at91sam9g45_gpio[] __initdata = { | |
static void __init at91sam9g45_map_io(void) | |
{ | |
at91_init_sram(0, AT91SAM9G45_SRAM_BASE, AT91SAM9G45_SRAM_SIZE); | |
+#ifdef CONFIG_IPIPE | |
+ iotable_init(at91sam9g45_io_desc, ARRAY_SIZE(at91sam9g45_io_desc)); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
static void __init at91sam9g45_ioremap_registers(void) | |
@@ -387,6 +401,7 @@ static void __init at91sam9g45_initialize(void) | |
/* | |
* The default interrupt priority levels (0 = lowest, 7 = highest). | |
*/ | |
+#ifndef CONFIG_IPIPE | |
static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
7, /* Advanced Interrupt Controller (FIQ) */ | |
7, /* System Peripherals */ | |
@@ -421,6 +436,44 @@ static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
0, | |
0, /* Advanced Interrupt Controller (IRQ0) */ | |
}; | |
+#else | |
+static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
+/* Give the highest priority to TC, since they are used as timer interrupt by | |
+ I-pipe. */ | |
+ 7, /* Advanced Interrupt Controller (FIQ) */ | |
+ 6, /* System Peripherals */ | |
+ 0, /* Parallel IO Controller A */ | |
+ 0, /* Parallel IO Controller B */ | |
+ 0, /* Parallel IO Controller C */ | |
+ 0, /* Parallel IO Controller D and E */ | |
+ 0, | |
+ 5, /* USART 0 */ | |
+ 5, /* USART 1 */ | |
+ 5, /* USART 2 */ | |
+ 5, /* USART 3 */ | |
+ 0, /* Multimedia Card Interface 0 */ | |
+ 6, /* Two-Wire Interface 0 */ | |
+ 6, /* Two-Wire Interface 1 */ | |
+ 5, /* Serial Peripheral Interface 0 */ | |
+ 5, /* Serial Peripheral Interface 1 */ | |
+ 4, /* Serial Synchronous Controller 0 */ | |
+ 4, /* Serial Synchronous Controller 1 */ | |
+ 7, /* Timer Counter 0, 1, 2, 3, 4 and 5 */ | |
+ 0, /* Pulse Width Modulation Controller */ | |
+ 0, /* Touch Screen Controller */ | |
+ 0, /* DMA Controller */ | |
+ 2, /* USB Host High Speed port */ | |
+ 3, /* LDC Controller */ | |
+ 5, /* AC97 Controller */ | |
+ 2, /* Ethernet */ | |
+ 0, /* Image Sensor Interface */ | |
+ 2, /* USB Device High speed port */ | |
+ 0, | |
+ 0, /* Multimedia Card Interface 1 */ | |
+ 0, | |
+ 0, /* Advanced Interrupt Controller (IRQ0) */ | |
+}; | |
+#endif | |
AT91_SOC_START(at91sam9g45) | |
.map_io = at91sam9g45_map_io, | |
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c | |
index f77fae5..65de0b0 100644 | |
--- a/arch/arm/mach-at91/at91sam9rl.c | |
+++ b/arch/arm/mach-at91/at91sam9rl.c | |
@@ -28,6 +28,17 @@ | |
#include "clock.h" | |
#include "sam9_smc.h" | |
+static struct map_desc at91sam9rl_io_desc[] __initdata = { | |
+#ifdef CONFIG_IPIPE | |
+ { | |
+ .virtual = AT91_VA_BASE_TCB0, | |
+ .pfn = __phys_to_pfn(AT91_BASE_TCB0), | |
+ .length = SZ_16K, | |
+ .type = MT_DEVICE, | |
+ }, | |
+#endif /* CONFIG_IPIPE */ | |
+}; | |
+ | |
/* -------------------------------------------------------------------- | |
* Clocks | |
* -------------------------------------------------------------------- */ | |
@@ -277,6 +288,9 @@ static void __init at91sam9rl_map_io(void) | |
/* Map SRAM */ | |
at91_init_sram(0, AT91SAM9RL_SRAM_BASE, sram_size); | |
+#ifdef CONFIG_IPIPE | |
+ iotable_init(at91sam9rl_io_desc, ARRAY_SIZE(at91sam9rl_io_desc)); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
static void __init at91sam9rl_ioremap_registers(void) | |
@@ -307,6 +321,7 @@ static void __init at91sam9rl_initialize(void) | |
* The default interrupt priority levels (0 = lowest, 7 = highest). | |
*/ | |
static unsigned int at91sam9rl_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
+#ifndef CONFIG_IPIPE | |
7, /* Advanced Interrupt Controller */ | |
7, /* System Peripherals */ | |
1, /* Parallel IO Controller A */ | |
@@ -339,6 +354,42 @@ static unsigned int at91sam9rl_default_irq_priority[NR_AIC_IRQS] __initdata = { | |
0, | |
0, | |
0, /* Advanced Interrupt Controller */ | |
+#else /* CONFIG_IPIPE */ | |
+/* Give the highest priority to TC, since they are used as timer interrupt by | |
+ I-pipe. */ | |
+ 7, /* Advanced Interrupt Controller */ | |
+ 6, /* System Peripherals */ | |
+ 1, /* Parallel IO Controller A */ | |
+ 1, /* Parallel IO Controller B */ | |
+ 1, /* Parallel IO Controller C */ | |
+ 1, /* Parallel IO Controller D */ | |
+ 4, /* USART 0 */ | |
+ 4, /* USART 1 */ | |
+ 4, /* USART 2 */ | |
+ 4, /* USART 3 */ | |
+ 0, /* Multimedia Card Interface */ | |
+ 5, /* Two-Wire Interface 0 */ | |
+ 5, /* Two-Wire Interface 1 */ | |
+ 4, /* Serial Peripheral Interface */ | |
+ 3, /* Serial Synchronous Controller 0 */ | |
+ 3, /* Serial Synchronous Controller 1 */ | |
+ 7, /* Timer Counter 0 */ | |
+ 7, /* Timer Counter 1 */ | |
+ 7, /* Timer Counter 2 */ | |
+ 0, | |
+ 0, /* Touch Screen Controller */ | |
+ 0, /* DMA Controller */ | |
+ 2, /* USB Device High speed port */ | |
+ 2, /* LCD Controller */ | |
+ 5, /* AC97 Controller */ | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, | |
+ 0, /* Advanced Interrupt Controller */ | |
+#endif /*CONFIG_IPIPE */ | |
}; | |
AT91_SOC_START(at91sam9rl) | |
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c | |
index 19ca793..e8d4c4c 100644 | |
--- a/arch/arm/mach-at91/at91x40.c | |
+++ b/arch/arm/mach-at91/at91x40.c | |
@@ -48,8 +48,10 @@ static void at91x40_idle(void) | |
* Disable the processor clock. The processor will be automatically | |
* re-enabled by an interrupt or by a reset. | |
*/ | |
+ hard_local_irq_disable(); | |
__raw_writel(AT91_PS_CR_CPU, AT91_IO_P2V(AT91_PS_CR)); | |
cpu_do_idle(); | |
+ hard_local_irq_enable(); | |
} | |
void __init at91x40_initialize(unsigned long main_clock) | |
diff --git a/arch/arm/mach-at91/at91x40_time.c b/arch/arm/mach-at91/at91x40_time.c | |
index 2919eba..2e377b5 100644 | |
--- a/arch/arm/mach-at91/at91x40_time.c | |
+++ b/arch/arm/mach-at91/at91x40_time.c | |
@@ -27,6 +27,7 @@ | |
#include <mach/hardware.h> | |
#include <asm/mach/time.h> | |
+#include "at91_ipipe.h" | |
#include "at91_tc.h" | |
#define at91_tc_read(field) \ | |
@@ -50,6 +51,7 @@ static u32 at91x40_gettimeoffset(void) | |
static irqreturn_t at91x40_timer_interrupt(int irq, void *dev_id) | |
{ | |
+ __ipipe_tsc_update(); | |
at91_tc_read(AT91_TC_CLK1BASE + AT91_TC_SR); | |
timer_tick(); | |
return IRQ_HANDLED; | |
@@ -60,7 +62,6 @@ static struct irqaction at91x40_timer_irq = { | |
.flags = IRQF_DISABLED | IRQF_TIMER, | |
.handler = at91x40_timer_interrupt | |
}; | |
- | |
void __init at91x40_timer_init(void) | |
{ | |
unsigned int v; | |
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c | |
index da84188..888372f 100644 | |
--- a/arch/arm/mach-at91/clock.c | |
+++ b/arch/arm/mach-at91/clock.c | |
@@ -956,6 +956,8 @@ late_initcall(at91_clock_reset); | |
void at91sam9_idle(void) | |
{ | |
+ hard_local_irq_disable(); | |
at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); | |
cpu_do_idle(); | |
+ hard_local_irq_enable(); | |
} | |
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c | |
index a5afcf7..8ef9c3e 100644 | |
--- a/arch/arm/mach-at91/gpio.c | |
+++ b/arch/arm/mach-at91/gpio.c | |
@@ -24,9 +24,18 @@ | |
#include <linux/irqdomain.h> | |
#include <linux/irqchip/chained_irq.h> | |
#include <linux/of_address.h> | |
+#include <linux/ipipe.h> | |
#include <mach/hardware.h> | |
#include <mach/at91_pio.h> | |
+#include "at91_ipipe.h" | |
+#ifdef CONFIG_IPIPE | |
+#include "at91_aic.h" | |
+#include <asm/irq.h> | |
+ | |
+static unsigned aic_root; | |
+static unsigned aic_muted; | |
+#endif /* CONFIG_IPIPE */ | |
#include "generic.h" | |
@@ -41,6 +50,12 @@ struct at91_gpio_chip { | |
void __iomem *regbase; /* PIO bank virtual address */ | |
struct clk *clock; /* associated clock */ | |
struct irq_domain *domain; /* associated irq domain */ | |
+#ifdef CONFIG_IPIPE | |
+ unsigned *nr_nonroot; | |
+ unsigned nr_nonroot_storage; | |
+ unsigned root; | |
+ unsigned muted; | |
+#endif | |
}; | |
#define to_at91_gpio_chip(c) container_of(c, struct at91_gpio_chip, chip) | |
@@ -580,6 +595,10 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type) | |
static struct irq_chip gpio_irqchip = { | |
.name = "GPIO", | |
.irq_disable = gpio_irq_mask, | |
+#ifdef CONFIG_IPIPE | |
+ .irq_ack = gpio_irq_mask, | |
+ .irq_mask_ack = gpio_irq_mask, | |
+#endif /* CONFIG_IPIPE */ | |
.irq_mask = gpio_irq_mask, | |
.irq_unmask = gpio_irq_unmask, | |
/* .irq_set_type is set dynamically */ | |
@@ -612,7 +631,7 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |
n = find_first_bit(&isr, BITS_PER_LONG); | |
while (n < BITS_PER_LONG) { | |
- generic_handle_irq(irq_find_mapping(at91_gpio->domain, n)); | |
+ ipipe_handle_demuxed_irq(irq_find_mapping(at91_gpio->domain, n)); | |
n = find_next_bit(&isr, BITS_PER_LONG, n + 1); | |
} | |
} | |
@@ -955,14 +974,121 @@ void __init at91_gpio_init(struct at91_gpio_bank *data, int nr_banks) | |
for (i = 0; i < gpio_banks; i++) { | |
at91_gpio = &gpio_chip[i]; | |
+#ifdef CONFIG_IPIPE | |
+ at91_gpio->nr_nonroot = &at91_gpio->nr_nonroot_storage; | |
+#endif | |
/* | |
* GPIO controller are grouped on some SoC: | |
* PIOC, PIOD and PIOE can share the same IRQ line | |
*/ | |
- if (last && last->pioc_hwirq == at91_gpio->pioc_hwirq) | |
+ if (last && last->pioc_hwirq == at91_gpio->pioc_hwirq) { | |
last->next = at91_gpio; | |
+#ifdef CONFIG_IPIPE | |
+ at91_gpio->nr_nonroot = last->nr_nonroot; | |
+#endif | |
+ } | |
last = at91_gpio; | |
gpiochip_add(&at91_gpio->chip); | |
} | |
} | |
+ | |
+#if defined(CONFIG_IPIPE) | |
+static void at91_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ struct irq_data *idata; | |
+ struct irq_chip *chip; | |
+ | |
+ idata = irq_desc_get_irq_data(desc); | |
+ chip = irq_data_get_irq_chip(idata); | |
+ | |
+ if (chip == &gpio_irqchip) { | |
+ struct at91_gpio_chip *chip = irq_data_get_irq_chip_data(idata); | |
+ | |
+ if (ipd == &ipipe_root) | |
+ chip->root |= (1 << idata->hwirq); | |
+ else | |
+ chip->root &= ~(1 << idata->hwirq); | |
+ | |
+ if (ipd != &ipipe_root && ++(*chip->nr_nonroot) == 1) | |
+ aic_root &= ~(1 << chip->pioc_hwirq); | |
+ } else { | |
+ if (ipd == &ipipe_root) | |
+ aic_root |= (1 << idata->hwirq); | |
+ else | |
+ aic_root &= ~(1 << idata->hwirq); | |
+ } | |
+} | |
+ | |
+static void at91_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ struct irq_data *idata = irq_desc_get_irq_data(desc); | |
+ struct irq_chip *chip = irq_data_get_irq_chip(idata); | |
+ | |
+ if (chip == &gpio_irqchip) { | |
+ struct at91_gpio_chip *chip = irq_data_get_irq_chip_data(idata); | |
+ | |
+ if (ipd != &ipipe_root) | |
+ chip->root |= (1 << idata->hwirq); | |
+ | |
+ if (ipd != &ipipe_root && --(*chip->nr_nonroot) == 0) | |
+ aic_root |= (1 << chip->pioc_hwirq); | |
+ } else | |
+ if (ipd != &ipipe_root) | |
+ aic_root |= (1 << idata->hwirq); | |
+} | |
+ | |
+static void at91_mute_pic(void) | |
+{ | |
+ struct at91_gpio_chip *prev, *chip = NULL; | |
+ unsigned long unmasked, muted; | |
+ unsigned i; | |
+ | |
+ for (i = 0; i < gpio_banks; i++) { | |
+ prev = chip; | |
+ chip = &gpio_chip[i]; | |
+ if (!(*chip->nr_nonroot)) | |
+ continue; | |
+ | |
+ unmasked = __raw_readl(chip->regbase + PIO_IMR); | |
+ muted = unmasked & chip->root; | |
+ chip->muted = muted; | |
+ __raw_writel(muted, chip->regbase + PIO_IDR); | |
+ } | |
+ | |
+ unmasked = at91_aic_read(AT91_AIC_IMR); | |
+ aic_muted = muted = unmasked & aic_root; | |
+ at91_aic_write(AT91_AIC_IDCR, muted); | |
+} | |
+ | |
+static void at91_unmute_pic(void) | |
+{ | |
+ struct at91_gpio_chip *prev, *chip = NULL; | |
+ unsigned i; | |
+ | |
+ at91_aic_write(AT91_AIC_IECR, aic_muted); | |
+ | |
+ for (i = 0; i < gpio_banks; i++) { | |
+ prev = chip; | |
+ chip = &gpio_chip[i]; | |
+ if (!(*chip->nr_nonroot)) | |
+ continue; | |
+ | |
+ __raw_writel(chip->muted, chip->regbase + PIO_IER); | |
+ } | |
+} | |
+ | |
+void at91_pic_muter_register(void) | |
+{ | |
+ struct ipipe_mach_pic_muter at91_pic_muter = { | |
+ .enable_irqdesc = at91_enable_irqdesc, | |
+ .disable_irqdesc = at91_disable_irqdesc, | |
+ .mute = at91_mute_pic, | |
+ .unmute = at91_unmute_pic, | |
+ }; | |
+ | |
+ ipipe_pic_muter_register(&at91_pic_muter); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h | |
index a832e07..9b730ec 100644 | |
--- a/arch/arm/mach-at91/include/mach/hardware.h | |
+++ b/arch/arm/mach-at91/include/mach/hardware.h | |
@@ -86,6 +86,27 @@ | |
*/ | |
#define AT91_VA_BASE_SYS AT91_IO_P2V(AT91_BASE_SYS) | |
+#ifdef CONFIG_IPIPE | |
+#if defined(CONFIG_ARCH_AT91RM9200) | |
+#define AT91_BASE_TCB0 AT91RM9200_BASE_TCB0 | |
+#elif defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9G20) | |
+#define AT91_BASE_TCB0 AT91SAM9260_BASE_TCB0 | |
+#elif defined(CONFIG_ARCH_AT91SAM9261) | |
+#define AT91_BASE_TCB0 AT91SAM9261_BASE_TCB0 | |
+#elif defined(CONFIG_ARCH_AT91SAM9263) | |
+#define AT91_BASE_TCB0 AT91SAM9263_BASE_TCB0 | |
+#elif defined(CONFIG_ARCH_AT91SAM9RL) | |
+#define AT91_BASE_TCB0 AT91SAM9RL_BASE_TCB0 | |
+#elif defined(CONFIG_ARCH_AT91SAM9G45) | |
+#define AT91_BASE_TCB0 AT91SAM9G45_BASE_TCB0 | |
+#elif defined(CONFIG_ARCH_AT91X40) | |
+#define AT91_BASE_TCB0 (AT91_BASE_SYS + AT91_TC) | |
+#else | |
+#error "AT91 processor unsupported by Adeos" | |
+#endif | |
+#define AT91_VA_BASE_TCB0 AT91_IO_P2V(AT91_BASE_TCB0) | |
+#endif | |
+ | |
/* Internal SRAM is mapped below the IO devices */ | |
#define AT91_SRAM_MAX SZ_1M | |
#define AT91_VIRT_BASE (AT91_IO_VIRT_BASE - AT91_SRAM_MAX) | |
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c | |
index e0ca591..138ca69 100644 | |
--- a/arch/arm/mach-at91/irq.c | |
+++ b/arch/arm/mach-at91/irq.c | |
@@ -166,7 +166,7 @@ at91_aic_handle_irq(struct pt_regs *regs) | |
if (!irqstat) | |
at91_aic_write(AT91_AIC_EOICR, 0); | |
else | |
- handle_IRQ(irqnr, regs); | |
+ ipipe_handle_multi_irq(irqnr, regs); | |
} | |
asmlinkage void __exception_irq_entry | |
@@ -181,10 +181,10 @@ at91_aic5_handle_irq(struct pt_regs *regs) | |
if (!irqstat) | |
at91_aic_write(AT91_AIC5_EOICR, 0); | |
else | |
- handle_IRQ(irqnr, regs); | |
+ ipipe_handle_multi_irq(irqnr, regs); | |
} | |
-static void at91_aic_mask_irq(struct irq_data *d) | |
+static inline void at91_aic_hard_mask_irq(struct irq_data *d) | |
{ | |
/* Disable interrupt on AIC */ | |
at91_aic_write(AT91_AIC_IDCR, 1 << d->hwirq); | |
@@ -192,6 +192,16 @@ static void at91_aic_mask_irq(struct irq_data *d) | |
clear_backup(d->hwirq); | |
} | |
+static void at91_aic_mask_irq(struct irq_data *d) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_cond_local_irq_save(); | |
+ at91_aic_hard_mask_irq(d); | |
+ ipipe_lock_irq(d->irq); | |
+ hard_cond_local_irq_restore(flags); | |
+} | |
+ | |
static void __maybe_unused at91_aic5_mask_irq(struct irq_data *d) | |
{ | |
/* Disable interrupt on AIC5 */ | |
@@ -201,7 +211,7 @@ static void __maybe_unused at91_aic5_mask_irq(struct irq_data *d) | |
clear_backup(d->hwirq); | |
} | |
-static void at91_aic_unmask_irq(struct irq_data *d) | |
+static inline void at91_aic_hard_unmask_irq(struct irq_data *d) | |
{ | |
/* Enable interrupt on AIC */ | |
at91_aic_write(AT91_AIC_IECR, 1 << d->hwirq); | |
@@ -209,6 +219,16 @@ static void at91_aic_unmask_irq(struct irq_data *d) | |
set_backup(d->hwirq); | |
} | |
+static void at91_aic_unmask_irq(struct irq_data *d) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_cond_local_irq_save(); | |
+ at91_aic_hard_unmask_irq(d); | |
+ ipipe_unlock_irq(d->irq); | |
+ hard_cond_local_irq_restore(flags); | |
+} | |
+ | |
static void __maybe_unused at91_aic5_unmask_irq(struct irq_data *d) | |
{ | |
/* Enable interrupt on AIC5 */ | |
@@ -227,6 +247,19 @@ static void at91_aic_eoi(struct irq_data *d) | |
at91_aic_write(AT91_AIC_EOICR, 0); | |
} | |
+#ifdef CONFIG_IPIPE | |
+static void at91_aic_hold_irq(struct irq_data *d) | |
+{ | |
+ at91_aic_hard_mask_irq(d); | |
+ at91_aic_eoi(d); | |
+} | |
+ | |
+static void at91_aic_release_irq(struct irq_data *d) | |
+{ | |
+ at91_aic_hard_unmask_irq(d); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static void __maybe_unused at91_aic5_eoi(struct irq_data *d) | |
{ | |
at91_aic_write(AT91_AIC5_EOICR, 0); | |
@@ -296,6 +329,10 @@ static struct irq_chip at91_aic_chip = { | |
.irq_set_type = at91_aic_set_type, | |
.irq_set_wake = at91_aic_set_wake, | |
.irq_eoi = at91_aic_eoi, | |
+#ifdef CONFIG_IPIPE | |
+ .irq_hold = at91_aic_hold_irq, | |
+ .irq_release = at91_aic_release_irq, | |
+#endif | |
}; | |
static void __init at91_aic_hw_init(unsigned int spu_vector) | |
diff --git a/arch/arm/mach-imx/3ds_debugboard.c b/arch/arm/mach-imx/3ds_debugboard.c | |
index 1343773..0a478c8 100644 | |
--- a/arch/arm/mach-imx/3ds_debugboard.c | |
+++ b/arch/arm/mach-imx/3ds_debugboard.c | |
@@ -20,6 +20,7 @@ | |
#include <linux/smsc911x.h> | |
#include <linux/regulator/machine.h> | |
#include <linux/regulator/fixed.h> | |
+#include <linux/ipipe.h> | |
#include "hardware.h" | |
@@ -101,7 +102,7 @@ static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc) | |
for (; int_valid != 0; int_valid >>= 1, expio_irq++) { | |
if ((int_valid & 1) == 0) | |
continue; | |
- generic_handle_irq(irq_find_mapping(domain, expio_irq)); | |
+ ipipe_handle_demuxed_irq(irq_find_mapping(domain, expio_irq)); | |
} | |
desc->irq_data.chip->irq_ack(&desc->irq_data); | |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig | |
index ba44328..c2e922a 100644 | |
--- a/arch/arm/mach-imx/Kconfig | |
+++ b/arch/arm/mach-imx/Kconfig | |
@@ -19,6 +19,7 @@ menu "Freescale i.MX support" | |
config MXC_IRQ_PRIOR | |
bool "Use IRQ priority" | |
+ depends on !IPIPE | |
help | |
Select this if you want to use prioritized IRQ handling. | |
This feature prevents higher priority ISR to be interrupted | |
diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c | |
index e163ec7..79156a4 100644 | |
--- a/arch/arm/mach-imx/avic.c | |
+++ b/arch/arm/mach-imx/avic.c | |
@@ -151,6 +151,9 @@ static __init void avic_init_gc(int idx, unsigned int irq_start) | |
ct->chip.irq_mask = irq_gc_mask_clr_bit; | |
ct->chip.irq_unmask = irq_gc_mask_set_bit; | |
ct->chip.irq_ack = irq_gc_mask_clr_bit; | |
+#ifdef CONFIG_IPIPE | |
+ ct->chip.irq_mask_ack = irq_gc_mask_clr_bit; | |
+#endif /* CONFIG_IPIPE */ | |
ct->chip.irq_set_wake = irq_gc_set_wake; | |
ct->chip.irq_suspend = avic_irq_suspend; | |
ct->chip.irq_resume = avic_irq_resume; | |
@@ -169,7 +172,7 @@ asmlinkage void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) | |
if (nivector == 0xffff) | |
break; | |
- handle_IRQ(irq_find_mapping(domain, nivector), regs); | |
+ ipipe_handle_multi_irq(irq_find_mapping(domain, nivector), regs); | |
} while (1); | |
} | |
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c | |
index 15f9d22..b6ee881 100644 | |
--- a/arch/arm/mach-imx/clk-imx1.c | |
+++ b/arch/arm/mach-imx/clk-imx1.c | |
@@ -109,7 +109,8 @@ int __init mx1_clocks_init(unsigned long fref) | |
clk_register_clkdev(clk[clk32], NULL, "imx1-rtc.0"); | |
clk_register_clkdev(clk[clko], "clko", NULL); | |
- mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT); | |
+ mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), | |
+ MX1_TIM1_BASE_ADDR, MX1_TIM1_INT); | |
return 0; | |
} | |
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c | |
index d7ed660..1588b2e 100644 | |
--- a/arch/arm/mach-imx/clk-imx21.c | |
+++ b/arch/arm/mach-imx/clk-imx21.c | |
@@ -179,7 +179,8 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href) | |
clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL); | |
clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL); | |
- mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), MX21_INT_GPT1); | |
+ mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), | |
+ MX21_GPT1_BASE_ADDR, MX21_INT_GPT1); | |
return 0; | |
} | |
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c | |
index 69858c7..a104771 100644 | |
--- a/arch/arm/mach-imx/clk-imx25.c | |
+++ b/arch/arm/mach-imx/clk-imx25.c | |
@@ -304,7 +304,8 @@ int __init mx25_clocks_init(void) | |
clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma"); | |
clk_register_clkdev(clk[iim_ipg], "iim", NULL); | |
- mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1); | |
+ mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), | |
+ MX25_GPT1_BASE_ADDR, MX25_INT_GPT1); | |
return 0; | |
} | |
@@ -337,8 +338,14 @@ int __init mx25_clocks_init_dt(void) | |
base = of_iomap(np, 0); | |
WARN_ON(!base); | |
irq = irq_of_parse_and_map(np, 0); | |
+ { | |
+ struct resource res; | |
- mxc_timer_init(base, irq); | |
+ if (of_address_to_resource(np, 0, &res)) | |
+ res.start = 0; | |
+ | |
+ mxc_timer_init(base, res.start, irq); | |
+ } | |
return 0; | |
} | |
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c | |
index c3cfa41..7ec6d76 100644 | |
--- a/arch/arm/mach-imx/clk-imx27.c | |
+++ b/arch/arm/mach-imx/clk-imx27.c | |
@@ -288,7 +288,8 @@ int __init mx27_clocks_init(unsigned long fref) | |
clk_register_clkdev(clk[cpu_div], NULL, "cpufreq-cpu0.0"); | |
clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL); | |
- mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1); | |
+ mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), | |
+ MX27_GPT1_BASE_ADDR, MX27_INT_GPT1); | |
clk_prepare_enable(clk[emi_ahb_gate]); | |
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c | |
index b5b65f3..2a6e93e 100644 | |
--- a/arch/arm/mach-imx/clk-imx31.c | |
+++ b/arch/arm/mach-imx/clk-imx31.c | |
@@ -186,7 +186,8 @@ int __init mx31_clocks_init(unsigned long fref) | |
mx31_revision(); | |
clk_disable_unprepare(clk[iim_gate]); | |
- mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT); | |
+ mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), | |
+ MX31_GPT1_BASE_ADDR, MX31_INT_GPT); | |
return 0; | |
} | |
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c | |
index 2193c83..61fac63 100644 | |
--- a/arch/arm/mach-imx/clk-imx35.c | |
+++ b/arch/arm/mach-imx/clk-imx35.c | |
@@ -279,9 +279,11 @@ int __init mx35_clocks_init(void) | |
imx_print_silicon_rev("i.MX35", mx35_revision()); | |
#ifdef CONFIG_MXC_USE_EPIT | |
- epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1); | |
+ epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), | |
+ MX35_EPIT1_BASE_ADDR, MX35_INT_EPIT1); | |
#else | |
- mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); | |
+ mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), | |
+ MX35_GPT1_BASE_ADDR, MX35_INT_GPT); | |
#endif | |
return 0; | |
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c | |
index 6fc486b..f71970d 100644 | |
--- a/arch/arm/mach-imx/clk-imx51-imx53.c | |
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c | |
@@ -346,6 +346,10 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil, | |
clk_prepare_enable(clk[tmax1]); | |
clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */ | |
clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */ | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ mxc_pic_muter_register(); | |
+#endif | |
} | |
int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc, | |
@@ -417,7 +421,8 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc, | |
clk_set_rate(clk[esdhc_b_podf], 166250000); | |
/* System timer */ | |
- mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT); | |
+ mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), | |
+ MX51_GPT1_BASE_ADDR, MX51_INT_GPT); | |
clk_prepare_enable(clk[iim_gate]); | |
imx_print_silicon_rev("i.MX51", mx51_revision()); | |
@@ -530,7 +535,8 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc, | |
clk_set_rate(clk[esdhc_b_podf], 200000000); | |
/* System timer */ | |
- mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT); | |
+ mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), | |
+ MX53_GPT1_BASE_ADDR, MX53_INT_GPT); | |
clk_prepare_enable(clk[iim_gate]); | |
imx_print_silicon_rev("i.MX53", mx53_revision()); | |
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c | |
index 4e3148c..a5dea56 100644 | |
--- a/arch/arm/mach-imx/clk-imx6q.c | |
+++ b/arch/arm/mach-imx/clk-imx6q.c | |
@@ -698,5 +698,12 @@ int __init mx6q_clocks_init(void) | |
base = of_iomap(np, 0); | |
WARN_ON(!base); | |
irq = irq_of_parse_and_map(np, 0); | |
- mxc_timer_init(base, irq); | |
+ { | |
+ struct resource res; | |
+ | |
+ if (of_address_to_resource(np, 0, &res)) | |
+ res.start = 0; | |
+ | |
+ mxc_timer_init(base, res.start, irq); | |
+ } | |
} | |
--- a/arch/arm/mach-imx/clk-imx6sl.c | |
+++ b/arch/arm/mach-imx/clk-imx6sl.c | |
@@ -488,5 +488,12 @@ int __init mx6sl_clocks_init(void) | |
base = of_iomap(np, 0); | |
WARN_ON(!base); | |
irq = irq_of_parse_and_map(np, 0); | |
- mxc_timer_init(base, irq); | |
+ { | |
+ struct resource res; | |
+ | |
+ if (of_address_to_resource(np, 0, &res)) | |
+ res.start = 0; | |
+ | |
+ mxc_timer_init(base, res.start, irq); | |
+ } | |
} | |
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h | |
index c08ae3f..6731a23 100644 | |
--- a/arch/arm/mach-imx/common.h | |
+++ b/arch/arm/mach-imx/common.h | |
@@ -52,7 +52,8 @@ extern void imx51_soc_init(void); | |
extern void imx51_init_late(void); | |
extern void imx53_init_late(void); | |
extern void epit_timer_init(void __iomem *base, int irq); | |
-extern void mxc_timer_init(void __iomem *, int); | |
+extern void mxc_timer_init(void __iomem *, unsigned long, int); | |
+extern void mxc_pic_muter_register(void); | |
extern int mx1_clocks_init(unsigned long fref); | |
extern int mx21_clocks_init(unsigned long lref, unsigned long fref); | |
extern int mx25_clocks_init(void); | |
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c | |
index e70e3ac..7ea13da 100644 | |
--- a/arch/arm/mach-imx/cpu.c | |
+++ b/arch/arm/mach-imx/cpu.c | |
@@ -1,4 +1,3 @@ | |
- | |
#include <linux/module.h> | |
#include <linux/io.h> | |
@@ -44,3 +43,4 @@ void __init imx_set_aips(void __iomem *base) | |
reg = __raw_readl(base + 0x50) & 0x00FFFFFF; | |
__raw_writel(reg, base + 0x50); | |
} | |
+ | |
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c | |
index 5536fd8..89e855d 100644 | |
--- a/arch/arm/mach-imx/mach-imx6q.c | |
+++ b/arch/arm/mach-imx/mach-imx6q.c | |
@@ -268,6 +268,12 @@ static void __init imx6q_timer_init(void) | |
clocksource_of_init(); | |
imx_print_silicon_rev(cpu_is_imx6dl() ? "i.MX6DL" : "i.MX6Q", | |
imx_get_soc_revision()); | |
+#ifdef CONFIG_IPIPE | |
+ { | |
+ extern void __init mx6_pic_muter_register(void); | |
+ mx6_pic_muter_register(); | |
+ } | |
+#endif /* CONFIG_IPIPE */ | |
} | |
static const char *imx6q_dt_compat[] __initdata = { | |
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c | |
index 1ed9161..f4c8d86 100644 | |
--- a/arch/arm/mach-imx/mach-mx31_3ds.c | |
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c | |
@@ -18,6 +18,7 @@ | |
#include <linux/init.h> | |
#include <linux/clk.h> | |
#include <linux/irq.h> | |
+#include <linux/ipipe.h> | |
#include <linux/gpio.h> | |
#include <linux/platform_device.h> | |
#include <linux/mfd/mc13783.h> | |
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c | |
index daf8889..62e0717 100644 | |
--- a/arch/arm/mach-imx/mach-mx31ads.c | |
+++ b/arch/arm/mach-imx/mach-mx31ads.c | |
@@ -22,6 +22,7 @@ | |
#include <linux/i2c.h> | |
#include <linux/irq.h> | |
#include <linux/irqdomain.h> | |
+#include <linux/ipipe.h> | |
#include <asm/mach-types.h> | |
#include <asm/mach/arch.h> | |
@@ -168,7 +169,7 @@ static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc) | |
if ((int_valid & 1) == 0) | |
continue; | |
- generic_handle_irq(irq_find_mapping(domain, expio_irq)); | |
+ ipipe_handle_demuxed_irq(irq_find_mapping(domain, expio_irq)); | |
} | |
} | |
diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c | |
index 3c609c5..9d8b70c 100644 | |
--- a/arch/arm/mach-imx/mm-imx1.c | |
+++ b/arch/arm/mach-imx/mm-imx1.c | |
@@ -19,6 +19,7 @@ | |
#include <linux/init.h> | |
#include <linux/io.h> | |
#include <linux/pinctrl/machine.h> | |
+#include <linux/bug.h> | |
#include <asm/mach/map.h> | |
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c | |
index 9357707..14a85e1 100644 | |
--- a/arch/arm/mach-imx/mm-imx25.c | |
+++ b/arch/arm/mach-imx/mm-imx25.c | |
@@ -100,6 +100,13 @@ void __init imx25_soc_init(void) | |
pinctrl_provide_dummies(); | |
/* i.mx25 has the i.mx35 type sdma */ | |
imx_add_imx_sdma("imx35-sdma", MX25_SDMA_BASE_ADDR, MX25_INT_SDMA, &imx25_sdma_pdata); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ /* Setup AIPS registers */ | |
+ imx_set_aips(MX25_IO_ADDRESS(MX25_AIPS1_BASE_ADDR)); | |
+ imx_set_aips(MX25_IO_ADDRESS(MX25_AIPS1_BASE_ADDR)); | |
+#endif | |
+ | |
/* i.mx25 has the i.mx31 type audmux */ | |
platform_device_register_simple("imx31-audmux", 0, imx25_audmux_res, | |
ARRAY_SIZE(imx25_audmux_res)); | |
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c | |
index 4f1be65..a76d171 100644 | |
--- a/arch/arm/mach-imx/mm-imx27.c | |
+++ b/arch/arm/mach-imx/mm-imx27.c | |
@@ -82,7 +82,11 @@ static const struct resource imx27_audmux_res[] __initconst = { | |
void __init imx27_soc_init(void) | |
{ | |
+#ifdef CONFIG_IPIPE | |
+ volatile unsigned long aips_reg; | |
+ void __iomem *aips_virt; | |
+#endif | |
mxc_arch_reset_init(MX27_IO_ADDRESS(MX27_WDOG_BASE_ADDR)); | |
mxc_device_init(); | |
/* i.mx27 has the i.mx21 type gpio */ | |
@@ -95,6 +99,15 @@ void __init imx27_soc_init(void) | |
pinctrl_provide_dummies(); | |
imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR, | |
MX27_INT_DMACH0, 0); /* No ERR irq */ | |
+ | |
+ /* Setup AIPS register */ | |
+#ifdef CONFIG_IPIPE | |
+ aips_virt = (void __iomem *)MX27_IO_P2V(MX27_AIPI_BASE_ADDR); | |
+ aips_reg = __raw_readl(aips_virt + 8); | |
+ aips_reg &= ~(1 << 3); | |
+ __raw_writel(aips_reg, aips_virt); | |
+#endif | |
+ | |
/* imx27 has the imx21 type audmux */ | |
platform_device_register_simple("imx21-audmux", 0, imx27_audmux_res, | |
ARRAY_SIZE(imx27_audmux_res)); | |
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c | |
index e0e69a6..e0d354f 100644 | |
--- a/arch/arm/mach-imx/mm-imx3.c | |
+++ b/arch/arm/mach-imx/mm-imx3.c | |
@@ -20,6 +20,7 @@ | |
#include <linux/init.h> | |
#include <linux/err.h> | |
#include <linux/pinctrl/machine.h> | |
+#include <linux/cpu.h> | |
#include <asm/pgtable.h> | |
#include <asm/system_misc.h> | |
@@ -140,7 +141,11 @@ void __init imx31_init_early(void) | |
mxc_set_cpu_type(MXC_CPU_MX31); | |
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); | |
arch_ioremap_caller = imx3_ioremap_caller; | |
+#ifdef CONFIG_IPIPE | |
+ cpu_idle_poll_ctrl(true); | |
+#else /* !CONFIG_IPIPE */ | |
arm_pm_idle = imx3_idle; | |
+#endif /* !CONFIG_IPIPE */ | |
mx3_ccm_base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR); | |
} | |
@@ -217,6 +222,10 @@ void __init imx35_init_early(void) | |
mxc_set_cpu_type(MXC_CPU_MX35); | |
mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR)); | |
+#ifdef CONFIG_IPIPE | |
+ cpu_idle_poll_ctrl(true); | |
+#else /* !CONFIG_IPIPE */ | |
arm_pm_idle = imx3_idle; | |
+#endif /* !CONFIG_IPIPE */ | |
arch_ioremap_caller = imx3_ioremap_caller; | |
mx3_ccm_base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR); | |
} | |
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c | |
index b7c4e70..626051f 100644 | |
--- a/arch/arm/mach-imx/mm-imx5.c | |
+++ b/arch/arm/mach-imx/mm-imx5.c | |
@@ -154,10 +154,20 @@ void __init imx51_soc_init(void) | |
void __init imx51_init_late(void) | |
{ | |
mx51_neon_fixup(); | |
+#ifdef CONFIG_IPIPE | |
+ /* Allow user-space access to emulated tsc */ | |
+ imx_set_aips(MX51_IO_ADDRESS(MX51_AIPS1_BASE_ADDR)); | |
+ imx_set_aips(MX51_IO_ADDRESS(MX51_AIPS2_BASE_ADDR)); | |
+#endif | |
imx51_pm_init(); | |
} | |
void __init imx53_init_late(void) | |
{ | |
+#ifdef CONFIG_IPIPE | |
+ /* Allow user-space access to emulated tsc */ | |
+ imx_set_aips(MX51_IO_ADDRESS(MX53_AIPS1_BASE_ADDR)); | |
+ imx_set_aips(MX51_IO_ADDRESS(MX53_AIPS2_BASE_ADDR)); | |
+#endif | |
imx53_pm_init(); | |
} | |
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c | |
index fea9131..ab0130f 100644 | |
--- a/arch/arm/mach-imx/time.c | |
+++ b/arch/arm/mach-imx/time.c | |
@@ -26,6 +26,8 @@ | |
#include <linux/clockchips.h> | |
#include <linux/clk.h> | |
#include <linux/err.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include <asm/sched_clock.h> | |
#include <asm/mach/time.h> | |
@@ -228,12 +230,8 @@ static void mxc_set_mode(enum clock_event_mode mode, | |
} | |
} | |
-/* | |
- * IRQ handler for the timer | |
- */ | |
-static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id) | |
+static inline void mxc_timer_ack(void) | |
{ | |
- struct clock_event_device *evt = &clockevent_mxc; | |
uint32_t tstat; | |
if (timer_is_v2()) | |
@@ -242,12 +240,40 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id) | |
tstat = __raw_readl(timer_base + MX1_2_TSTAT); | |
gpt_irq_acknowledge(); | |
+} | |
+ | |
+/* | |
+ * IRQ handler for the timer | |
+ */ | |
+static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id) | |
+{ | |
+ struct clock_event_device *evt = &clockevent_mxc; | |
+ | |
+ if (!clockevent_ipipe_stolen(evt)) | |
+ mxc_timer_ack(); | |
+ | |
+ __ipipe_tsc_update(); | |
evt->event_handler(evt); | |
return IRQ_HANDLED; | |
} | |
+#ifdef CONFIG_IPIPE | |
+static struct __ipipe_tscinfo tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING, | |
+ .u = { | |
+ { | |
+ .mask = 0xffffffff, | |
+ }, | |
+ }, | |
+}; | |
+ | |
+static struct ipipe_timer mxc_itimer = { | |
+ .ack = mxc_timer_ack, | |
+}; | |
+#endif | |
+ | |
static struct irqaction mxc_timer_irq = { | |
.name = "i.MX Timer Tick", | |
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, | |
@@ -260,6 +286,9 @@ static struct clock_event_device clockevent_mxc = { | |
.set_mode = mxc_set_mode, | |
.set_next_event = mx1_2_set_next_event, | |
.rating = 200, | |
+#ifdef CONFIG_IPIPE | |
+ .ipipe_timer = &mxc_itimer, | |
+#endif | |
}; | |
static int __init mxc_clockevent_init(struct clk *timer_clk) | |
@@ -275,7 +304,8 @@ static int __init mxc_clockevent_init(struct clk *timer_clk) | |
return 0; | |
} | |
-void __init mxc_timer_init(void __iomem *base, int irq) | |
+void __init | |
+mxc_timer_init(void __iomem *base, unsigned long phys, int irq) | |
{ | |
uint32_t tctl_val; | |
struct clk *timer_clk; | |
@@ -311,8 +341,29 @@ void __init mxc_timer_init(void __iomem *base, int irq) | |
/* init and register the timer to the framework */ | |
mxc_clocksource_init(timer_clk); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ if (num_online_cpus() == 1) { | |
+ tsc_info.freq = clk_get_rate(timer_clk); | |
+ | |
+ if (timer_is_v1()) { | |
+ tsc_info.u.counter_paddr = phys + MX1_2_TCN; | |
+ tsc_info.counter_vaddr =(unsigned long)(timer_base + MX1_2_TCN); | |
+ } else { | |
+ tsc_info.u.counter_paddr = phys + V2_TCN; | |
+ tsc_info.counter_vaddr = (unsigned long)(timer_base + V2_TCN); | |
+ } | |
+ __ipipe_tsc_register(&tsc_info); | |
+ } | |
+ | |
+ mxc_itimer.irq = irq; | |
+ mxc_itimer.freq = clk_get_rate(timer_clk); | |
+ mxc_itimer.min_delay_ticks = ipipe_timer_ns2ticks(&mxc_itimer, 2000); | |
+ | |
+#endif /* CONFIG_IPIPE */ | |
mxc_clockevent_init(timer_clk); | |
/* Make irqs happen */ | |
setup_irq(irq, &mxc_timer_irq); | |
+ | |
} | |
diff --git a/arch/arm/mach-imx/tzic.c b/arch/arm/mach-imx/tzic.c | |
index 8183178..25e53e1 100644 | |
--- a/arch/arm/mach-imx/tzic.c | |
+++ b/arch/arm/mach-imx/tzic.c | |
@@ -116,6 +116,9 @@ static __init void tzic_init_gc(int idx, unsigned int irq_start) | |
ct = gc->chip_types; | |
ct->chip.irq_mask = irq_gc_mask_disable_reg; | |
ct->chip.irq_unmask = irq_gc_unmask_enable_reg; | |
+#ifdef CONFIG_IPIPE | |
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; | |
+#endif /* CONFIG_IPIPE */ | |
ct->chip.irq_set_wake = irq_gc_set_wake; | |
ct->chip.irq_suspend = tzic_irq_suspend; | |
ct->chip.irq_resume = tzic_irq_resume; | |
@@ -140,7 +143,7 @@ asmlinkage void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs) | |
while (stat) { | |
handled = 1; | |
irqofs = fls(stat) - 1; | |
- handle_IRQ(irq_find_mapping(domain, | |
+ ipipe_handle_multi_irq(irq_find_mapping(domain, | |
irqofs + i * 32), regs); | |
stat &= ~(1 << irqofs); | |
} | |
@@ -148,6 +151,27 @@ asmlinkage void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs) | |
} while (handled); | |
} | |
+ | |
+#if defined(CONFIG_IPIPE) | |
+void tzic_set_irq_prio(unsigned irq, unsigned hi) | |
+{ | |
+ if (irq >= TZIC_NUM_IRQS) | |
+ return; | |
+ | |
+ __raw_writeb(hi ? 0 : 0x80, tzic_base + TZIC_PRIORITY0 + irq); | |
+} | |
+ | |
+void tzic_mute_pic(void) | |
+{ | |
+ __raw_writel(0x10, tzic_base + TZIC_PRIOMASK); | |
+} | |
+ | |
+void tzic_unmute_pic(void) | |
+{ | |
+ __raw_writel(0xf0, tzic_base + TZIC_PRIOMASK); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
/* | |
* This function initializes the TZIC hardware and disables all the | |
* interrupts. It registers the interrupt enable and disable functions | |
@@ -166,8 +190,13 @@ void __init tzic_init_irq(void __iomem *irqbase) | |
i = __raw_readl(tzic_base + TZIC_INTCNTL); | |
__raw_writel(0x80010001, tzic_base + TZIC_INTCNTL); | |
+#ifndef CONFIG_IPIPE | |
__raw_writel(0x1f, tzic_base + TZIC_PRIOMASK); | |
__raw_writel(0x02, tzic_base + TZIC_SYNCCTRL); | |
+#else | |
+ __raw_writel(0xf0, tzic_base + TZIC_PRIOMASK); | |
+ __raw_writel(0, tzic_base + TZIC_SYNCCTRL); | |
+#endif | |
for (i = 0; i < 4; i++) | |
__raw_writel(0xFFFFFFFF, tzic_base + TZIC_INTSEC0(i)); | |
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c | |
index 81461d2..1195aa8 100644 | |
--- a/arch/arm/mach-integrator/core.c | |
+++ b/arch/arm/mach-integrator/core.c | |
@@ -2,6 +2,7 @@ | |
* linux/arch/arm/mach-integrator/core.c | |
* | |
* Copyright (C) 2000-2003 Deep Blue Solutions Ltd | |
+ * Copyright (C) 2005 Stelian Pop. | |
* | |
* This program is free software; you can redistribute it and/or modify | |
* it under the terms of the GNU General Public License version 2, as | |
diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h | |
index be5859e..e6dcd75 100644 | |
--- a/arch/arm/mach-integrator/include/mach/platform.h | |
+++ b/arch/arm/mach-integrator/include/mach/platform.h | |
@@ -390,7 +390,7 @@ | |
* Timer definitions | |
* | |
* Only use timer 1 & 2 | |
- * (both run at 24MHz and will need the clock divider set to 16). | |
+ * (both run at 1MHZ on /CP and at 24MHz on /AP) | |
* | |
* Timer 0 runs at bus frequency | |
*/ | |
diff --git a/arch/arm/mach-integrator/include/mach/timex.h b/arch/arm/mach-integrator/include/mach/timex.h | |
index 1dcb420..a04653a 100644 | |
--- a/arch/arm/mach-integrator/include/mach/timex.h | |
+++ b/arch/arm/mach-integrator/include/mach/timex.h | |
@@ -21,6 +21,6 @@ | |
*/ | |
/* | |
- * ?? | |
+ * Timer rate | |
*/ | |
-#define CLOCK_TICK_RATE (50000000 / 16) | |
+#define CLOCK_TICK_RATE (1000000) | |
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c | |
index 8c60fcb..c4542e5 100644 | |
--- a/arch/arm/mach-integrator/integrator_cp.c | |
+++ b/arch/arm/mach-integrator/integrator_cp.c | |
@@ -2,6 +2,7 @@ | |
* linux/arch/arm/mach-integrator/integrator_cp.c | |
* | |
* Copyright (C) 2003 Deep Blue Solutions Ltd | |
+ * Copyright (C) 2005 Stelian Pop. | |
* | |
* This program is free software; you can redistribute it and/or modify | |
* it under the terms of the GNU General Public License as published by | |
@@ -477,7 +478,7 @@ static void __init cp_timer_init(void) | |
writel(0, TIMER1_VA_BASE + TIMER_CTRL); | |
writel(0, TIMER2_VA_BASE + TIMER_CTRL); | |
- sp804_clocksource_init(TIMER2_VA_BASE, "timer2"); | |
+ sp804_clocksource_init(TIMER2_VA_BASE, INTEGRATOR_TIMER2_BASE, "timer2"); | |
sp804_clockevents_init(TIMER1_VA_BASE, IRQ_TIMERINT1, "timer1"); | |
} | |
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c | |
index 6600cff..8acf623 100644 | |
--- a/arch/arm/mach-ixp4xx/common.c | |
+++ b/arch/arm/mach-ixp4xx/common.c | |
@@ -6,10 +6,10 @@ | |
* Maintainer: Deepak Saxena <[email protected]> | |
* | |
* Copyright 2002 (c) Intel Corporation | |
- * Copyright 2003-2004 (c) MontaVista, Software, Inc. | |
- * | |
- * This file is licensed under the terms of the GNU General Public | |
- * License version 2. This program is licensed "as is" without any | |
+ * Copyright 2003-2004 (c) MontaVista, Software, Inc. | |
+ * | |
+ * This file is licensed under the terms of the GNU General Public | |
+ * License version 2. This program is licensed "as is" without any | |
* warranty of any kind, whether express or implied. | |
*/ | |
@@ -30,6 +30,8 @@ | |
#include <linux/export.h> | |
#include <linux/gpio.h> | |
#include <linux/cpu.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include <mach/udc.h> | |
#include <mach/hardware.h> | |
@@ -246,7 +248,7 @@ void __init ixp4xx_init_irq(void) | |
*IXP4XX_ICLR = 0x0; | |
/* Disable all interrupt */ | |
- *IXP4XX_ICMR = 0x0; | |
+ *IXP4XX_ICMR = 0x0; | |
if (cpu_is_ixp46x() || cpu_is_ixp43x()) { | |
/* Route upper 32 sources to IRQ instead of FIQ */ | |
@@ -256,7 +258,7 @@ void __init ixp4xx_init_irq(void) | |
*IXP4XX_ICMR2 = 0x00; | |
} | |
- /* Default to all level triggered */ | |
+ /* Default to all level triggered */ | |
for(i = 0; i < NR_IRQS; i++) { | |
irq_set_chip_and_handler(i, &ixp4xx_irq_chip, | |
handle_level_irq); | |
@@ -264,10 +266,15 @@ void __init ixp4xx_init_irq(void) | |
} | |
} | |
+static inline void ixp4xx_timer_ack(void) | |
+{ | |
+ /* Clear Pending Interrupt by writing '1' to it */ | |
+ *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; | |
+} | |
/************************************************************************* | |
* IXP4xx timer tick | |
- * We use OS timer1 on the CPU for the timer tick and the timestamp | |
+ * We use OS timer1 on the CPU for the timer tick and the timestamp | |
* counter as a source of real clock ticks to account for missed jiffies. | |
*************************************************************************/ | |
@@ -275,8 +282,10 @@ static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id) | |
{ | |
struct clock_event_device *evt = dev_id; | |
- /* Clear Pending Interrupt by writing '1' to it */ | |
- *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; | |
+ if (!clockevent_ipipe_stolen(evt)) | |
+ ixp4xx_timer_ack(); | |
+ | |
+ __ipipe_tsc_update(); | |
evt->event_handler(evt); | |
@@ -464,12 +473,31 @@ static cycle_t ixp4xx_clocksource_read(struct clocksource *c) | |
unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ; | |
EXPORT_SYMBOL(ixp4xx_timer_freq); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+static struct __ipipe_tscinfo tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING, | |
+ .freq = IXP4XX_TIMER_FREQ, | |
+ .counter_vaddr = (unsigned long)IXP4XX_OSTS, | |
+ .u = { | |
+ { | |
+ .mask = 0xffffffff, | |
+ .counter_paddr = IXP4XX_TIMER_BASE_PHYS, | |
+ }, | |
+ }, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static void __init ixp4xx_clocksource_init(void) | |
{ | |
setup_sched_clock(ixp4xx_read_sched_clock, 32, ixp4xx_timer_freq); | |
clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32, | |
ixp4xx_clocksource_read); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ __ipipe_tsc_register(&tsc_info); | |
+#endif | |
} | |
/* | |
@@ -516,12 +544,23 @@ static void ixp4xx_set_mode(enum clock_event_mode mode, | |
*IXP4XX_OSRT1 = osrt | opts; | |
} | |
+#ifdef CONFIG_IPIPE | |
+static struct ipipe_timer ixp4xx_itimer = { | |
+ .irq = IRQ_IXP4XX_TIMER1, | |
+ .min_delay_ticks = 333, /* 5 usec with the 66.66 MHz system clock */ | |
+ .ack = ixp4xx_timer_ack, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static struct clock_event_device clockevent_ixp4xx = { | |
.name = "ixp4xx timer1", | |
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
.rating = 200, | |
.set_mode = ixp4xx_set_mode, | |
.set_next_event = ixp4xx_set_next_event, | |
+#ifdef CONFIG_IPIPE | |
+ .ipipe_timer = &ixp4xx_itimer, | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
static void __init ixp4xx_clockevent_init(void) | |
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h | |
index db5afb6..c54231a 100644 | |
--- a/arch/arm/mach-ixp4xx/include/mach/platform.h | |
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h | |
@@ -72,8 +72,8 @@ extern unsigned long ixp4xx_exp_bus_size; | |
/* | |
* Clock Speed Definitions. | |
*/ | |
-#define IXP4XX_PERIPHERAL_BUS_CLOCK (66) /* 66Mhzi APB BUS */ | |
-#define IXP4XX_UART_XTAL 14745600 | |
+#define IXP4XX_PERIPHERAL_BUS_CLOCK (66) /* 66Mhzi APB BUS */ | |
+#define IXP4XX_UART_XTAL 14745600 | |
/* | |
* This structure provide a means for the board setup code | |
@@ -137,7 +137,7 @@ extern struct pci_ops ixp4xx_ops; | |
*/ | |
/* GPIO pin types */ | |
#define IXP4XX_GPIO_OUT 0x1 | |
-#define IXP4XX_GPIO_IN 0x2 | |
+#define IXP4XX_GPIO_IN 0x2 | |
/* GPIO signal types */ | |
#define IXP4XX_GPIO_LOW 0 | |
@@ -149,10 +149,14 @@ extern struct pci_ops ixp4xx_ops; | |
static inline void gpio_line_config(u8 line, u32 direction) | |
{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_local_irq_save(); | |
if (direction == IXP4XX_GPIO_IN) | |
*IXP4XX_GPIO_GPOER |= (1 << line); | |
else | |
*IXP4XX_GPIO_GPOER &= ~(1 << line); | |
+ hard_local_irq_restore(flags); | |
} | |
static inline void gpio_line_get(u8 line, int *value) | |
@@ -162,11 +166,14 @@ static inline void gpio_line_get(u8 line, int *value) | |
static inline void gpio_line_set(u8 line, int value) | |
{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_local_irq_save(); | |
if (value == IXP4XX_GPIO_HIGH) | |
*IXP4XX_GPIO_GPOUTR |= (1 << line); | |
else if (value == IXP4XX_GPIO_LOW) | |
*IXP4XX_GPIO_GPOUTR &= ~(1 << line); | |
+ hard_local_irq_restore(flags); | |
} | |
#endif // __ASSEMBLY__ | |
- | |
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig | |
index 4dc2fbb..fd5cc93 100644 | |
--- a/arch/arm/mach-mxs/Kconfig | |
+++ b/arch/arm/mach-mxs/Kconfig | |
@@ -14,6 +14,7 @@ config SOC_IMX28 | |
select HAVE_CAN_FLEXCAN if CAN | |
select HAVE_PWM | |
select PINCTRL_IMX28 | |
+ select IPIPE_ARM_KUSER_TSC if IPIPE | |
config ARCH_MXS | |
bool "Freescale MXS (i.MX23, i.MX28) support" | |
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c | |
index 6c4da12..ce74b45 100644 | |
--- a/arch/arm/mach-omap2/gpmc.c | |
+++ b/arch/arm/mach-omap2/gpmc.c | |
@@ -33,6 +33,8 @@ | |
#include <linux/platform_data/mtd-nand-omap2.h> | |
+#include <linux/ipipe.h> | |
+ | |
#include <asm/mach-types.h> | |
#include "soc.h" | |
@@ -1694,7 +1696,7 @@ static irqreturn_t gpmc_handle_irq(int irq, void *dev) | |
for (i = 0; i < GPMC_NR_IRQ; i++) | |
if (regval & gpmc_client_irq[i].bitmask) | |
- generic_handle_irq(gpmc_client_irq[i].irq); | |
+ ipipe_handle_demuxed_irq(gpmc_client_irq[i].irq); | |
gpmc_write_reg(GPMC_IRQSTATUS, regval); | |
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c | |
index 09abf99..a34551a 100644 | |
--- a/arch/arm/mach-omap2/io.c | |
+++ b/arch/arm/mach-omap2/io.c | |
@@ -21,9 +21,11 @@ | |
#include <linux/init.h> | |
#include <linux/io.h> | |
#include <linux/clk.h> | |
+#include <linux/cpu.h> | |
#include <asm/tlb.h> | |
#include <asm/mach/map.h> | |
+#include <asm/system_misc.h> | |
#include <linux/omap-dma.h> | |
@@ -483,6 +485,9 @@ void __init omap3_init_early(void) | |
omap3xxx_hwmod_init(); | |
omap_hwmod_init_postsetup(); | |
omap_clk_init = omap3xxx_clk_init; | |
+#ifdef CONFIG_IPIPE | |
+ cpu_idle_poll_ctrl(true); | |
+#endif | |
} | |
void __init omap3430_init_early(void) | |
@@ -521,6 +526,9 @@ void __init ti81xx_init_early(void) | |
omap3xxx_hwmod_init(); | |
omap_hwmod_init_postsetup(); | |
omap_clk_init = omap3xxx_clk_init; | |
+#ifdef CONFIG_IPIPE | |
+ cpu_idle_poll_ctrl(true); | |
+#endif | |
} | |
void __init omap3_init_late(void) | |
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c | |
index 3926f37..e54d10c 100644 | |
--- a/arch/arm/mach-omap2/irq.c | |
+++ b/arch/arm/mach-omap2/irq.c | |
@@ -15,6 +15,7 @@ | |
#include <linux/init.h> | |
#include <linux/interrupt.h> | |
#include <linux/io.h> | |
+#include <asm/ipipe.h> | |
#include <asm/exception.h> | |
#include <asm/mach/irq.h> | |
@@ -41,6 +42,7 @@ | |
#define INTC_MIR_CLEAR0 0x0088 | |
#define INTC_MIR_SET0 0x008c | |
#define INTC_PENDING_IRQ0 0x0098 | |
+#define INTC_PRIO 0x0100 | |
/* Number of IRQ state bits in each MIR register */ | |
#define IRQ_BITS_PER_REG 32 | |
@@ -51,6 +53,12 @@ | |
#define INTCPS_NR_MIR_REGS 3 | |
#define INTCPS_NR_IRQS 96 | |
+#if !defined(MULTI_OMAP1) && !defined(MULTI_OMAP2) | |
+#define inline_single inline | |
+#else | |
+#define inline_single | |
+#endif | |
+ | |
/* | |
* OMAP2 has a number of different interrupt controllers, each interrupt | |
* controller is identified as its own "bank". Register definitions are | |
@@ -81,20 +89,21 @@ struct omap3_intc_regs { | |
/* INTC bank register get/set */ | |
-static void intc_bank_write_reg(u32 val, struct omap_irq_bank *bank, u16 reg) | |
+static inline_single void intc_bank_write_reg(u32 val, struct omap_irq_bank *bank, u16 reg) | |
{ | |
__raw_writel(val, bank->base_reg + reg); | |
} | |
-static u32 intc_bank_read_reg(struct omap_irq_bank *bank, u16 reg) | |
+static inline_single u32 intc_bank_read_reg(struct omap_irq_bank *bank, u16 reg) | |
{ | |
return __raw_readl(bank->base_reg + reg); | |
} | |
/* XXX: FIQ and additional INTC support (only MPU at the moment) */ | |
-static void omap_ack_irq(struct irq_data *d) | |
+static inline_single void omap_ack_irq(struct irq_data *d) | |
{ | |
intc_bank_write_reg(0x1, &irq_banks[0], INTC_CONTROL); | |
+ dsb(); | |
} | |
static void omap_mask_ack_irq(struct irq_data *d) | |
@@ -118,8 +127,15 @@ static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank) | |
while (!(intc_bank_read_reg(bank, INTC_SYSSTATUS) & 0x1)) | |
/* Wait for reset to complete */; | |
+#ifndef CONFIG_IPIPE | |
/* Enable autoidle */ | |
intc_bank_write_reg(1 << 0, bank, INTC_SYSCONFIG); | |
+ intc_bank_write_reg(0x2, bank, INTC_IDLE); | |
+#else /* CONFIG_IPIPE */ | |
+ /* Disable autoidle */ | |
+ intc_bank_write_reg(0, bank, INTC_SYSCONFIG); | |
+ intc_bank_write_reg(0x1, bank, INTC_IDLE); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
int omap_irq_pending(void) | |
@@ -149,6 +165,9 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) | |
ct = gc->chip_types; | |
ct->chip.irq_ack = omap_mask_ack_irq; | |
ct->chip.irq_mask = irq_gc_mask_disable_reg; | |
+#ifdef CONFIG_IPIPE | |
+ ct->chip.irq_mask_ack = omap_mask_ack_irq; | |
+#endif | |
ct->chip.irq_unmask = irq_gc_unmask_enable_reg; | |
ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; | |
@@ -248,7 +267,7 @@ out: | |
if (irqnr) { | |
irqnr = irq_find_mapping(domain, irqnr); | |
- handle_IRQ(irqnr, regs); | |
+ ipipe_handle_multi_irq(irqnr, regs); | |
} | |
} while (irqnr); | |
} | |
@@ -291,6 +310,35 @@ void __init omap_intc_of_init(void) | |
of_irq_init(irq_match); | |
} | |
+ | |
+#if defined(CONFIG_IPIPE) && defined(CONFIG_ARCH_OMAP2PLUS) | |
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) | |
+void omap3_intc_mute(void) | |
+{ | |
+ struct omap_irq_bank *bank = &irq_banks[0]; | |
+ | |
+ intc_bank_write_reg(0x1, bank, INTC_THRESHOLD); | |
+ intc_bank_write_reg(0x1, bank, INTC_CONTROL); | |
+} | |
+ | |
+void omap3_intc_unmute(void) | |
+{ | |
+ struct omap_irq_bank *bank = &irq_banks[0]; | |
+ | |
+ intc_bank_write_reg(0xff, bank, INTC_THRESHOLD); | |
+} | |
+ | |
+void omap3_intc_set_irq_prio(int irq, int hi) | |
+{ | |
+ struct omap_irq_bank *bank = &irq_banks[0]; | |
+ | |
+ if (irq >= INTCPS_NR_MIR_REGS * 32) | |
+ return; | |
+ intc_bank_write_reg(hi ? 0 : 0xfc, bank, INTC_PRIO + 4 * irq); | |
+} | |
+#endif /* CONFIG_ARCH_OMAP3 */ | |
+#endif /* CONFIG_IPIPE && ARCH_OMAP2PLUS */ | |
+ | |
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) | |
static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)]; | |
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c | |
index f82cf87..31ac06b 100644 | |
--- a/arch/arm/mach-omap2/mux.c | |
+++ b/arch/arm/mach-omap2/mux.c | |
@@ -34,6 +34,7 @@ | |
#include <linux/uaccess.h> | |
#include <linux/irq.h> | |
#include <linux/interrupt.h> | |
+#include <linux/ipipe.h> | |
#include "omap_hwmod.h" | |
@@ -392,7 +393,7 @@ static bool omap_hwmod_mux_scan_wakeups(struct omap_hwmod_mux_info *hmux, | |
handled_irqs |= 1 << irq; | |
- generic_handle_irq(mpu_irqs[irq].irq); | |
+ ipipe_handle_demuxed_irq(mpu_irqs[irq].irq); | |
} | |
return false; | |
@@ -409,7 +410,7 @@ static int _omap_hwmod_mux_handle_irq(struct omap_hwmod *oh, void *data) | |
if (!oh->mux || !oh->mux->enabled) | |
return 0; | |
if (omap_hwmod_mux_scan_wakeups(oh->mux, oh->mpu_irqs)) | |
- generic_handle_irq(oh->mpu_irqs[0].irq); | |
+ ipipe_handle_demuxed_irq(oh->mpu_irqs[0].irq); | |
return 0; | |
} | |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c | |
index f8bb3b9..1e332a2 100644 | |
--- a/arch/arm/mach-omap2/omap-wakeupgen.c | |
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |
@@ -426,6 +426,7 @@ int __init omap_wakeupgen_init(void) | |
wakeupgen_writel(0, i, CPU1_ID); | |
} | |
+#ifndef CONFIG_IPIPE | |
/* | |
* Override GIC architecture specific functions to add | |
* OMAP WakeupGen interrupt controller along with GIC | |
@@ -433,6 +434,7 @@ int __init omap_wakeupgen_init(void) | |
gic_arch_extn.irq_mask = wakeupgen_mask; | |
gic_arch_extn.irq_unmask = wakeupgen_unmask; | |
gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; | |
+#endif | |
/* | |
* FIXME: Add support to set_smp_affinity() once the core | |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c | |
index 5a2d803..89cbc7c 100644 | |
--- a/arch/arm/mach-omap2/pm34xx.c | |
+++ b/arch/arm/mach-omap2/pm34xx.c | |
@@ -346,6 +346,10 @@ void omap_sram_idle(void) | |
static void omap3_pm_idle(void) | |
{ | |
+#ifdef CONFIG_IPIPE | |
+ BUG(); | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
if (omap_irq_pending()) | |
return; | |
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c | |
index a251f87..de366c5 100644 | |
--- a/arch/arm/mach-omap2/pm44xx.c | |
+++ b/arch/arm/mach-omap2/pm44xx.c | |
@@ -131,7 +131,13 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) | |
*/ | |
static void omap_default_idle(void) | |
{ | |
+ hard_local_irq_disable(); | |
+ hard_local_fiq_disable_notrace(); | |
+ | |
omap_do_wfi(); | |
+ | |
+ hard_local_fiq_enable_notrace(); | |
+ hard_local_irq_enable(); | |
} | |
/** | |
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c | |
index 228b850..81fa83b 100644 | |
--- a/arch/arm/mach-omap2/prm_common.c | |
+++ b/arch/arm/mach-omap2/prm_common.c | |
@@ -131,11 +131,11 @@ static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc) | |
/* Serve priority events first */ | |
for_each_set_bit(virtirq, priority_pending, nr_irq) | |
- generic_handle_irq(prcm_irq_setup->base_irq + virtirq); | |
+ ipipe_handle_demuxed_irq(prcm_irq_setup->base_irq + virtirq); | |
/* Serve normal events next */ | |
for_each_set_bit(virtirq, pending, nr_irq) | |
- generic_handle_irq(prcm_irq_setup->base_irq + virtirq); | |
+ ipipe_handle_demuxed_irq(prcm_irq_setup->base_irq + virtirq); | |
} | |
if (chip->irq_ack) | |
chip->irq_ack(&desc->irq_data); | |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c | |
index f8b23b8..c5f16dc 100644 | |
--- a/arch/arm/mach-omap2/timer.c | |
+++ b/arch/arm/mach-omap2/timer.c | |
@@ -41,6 +41,9 @@ | |
#include <linux/of_irq.h> | |
#include <linux/platform_device.h> | |
#include <linux/platform_data/dmtimer-omap.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/export.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include <asm/mach/time.h> | |
#include <asm/smp_twd.h> | |
@@ -61,16 +64,31 @@ | |
#define INCREMENTER_DENUMERATOR_RELOAD_OFFSET 0x14 | |
#define NUMERATOR_DENUMERATOR_MASK 0xfffff000 | |
+#ifdef CONFIG_IPIPE | |
+void __init omap3_pic_muter_register(void); | |
+void __init omap4_pic_muter_register(void); | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
/* Clockevent code */ | |
static struct omap_dm_timer clkev; | |
static struct clock_event_device clockevent_gpt; | |
+static void omap2_gp_timer_ack(void) | |
+{ | |
+ __omap_dm_timer_write_status(&clkev, OMAP_TIMER_INT_OVERFLOW); | |
+ __omap_dm_timer_read_status(&clkev); | |
+} | |
+ | |
static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id) | |
{ | |
struct clock_event_device *evt = &clockevent_gpt; | |
- __omap_dm_timer_write_status(&clkev, OMAP_TIMER_INT_OVERFLOW); | |
+ if (!clockevent_ipipe_stolen(evt)) | |
+ omap2_gp_timer_ack(); | |
+ | |
+ if (num_possible_cpus() == 1) | |
+ __ipipe_tsc_update(); | |
evt->event_handler(evt); | |
return IRQ_HANDLED; | |
@@ -86,7 +104,7 @@ static int omap2_gp_timer_set_next_event(unsigned long cycles, | |
struct clock_event_device *evt) | |
{ | |
__omap_dm_timer_load_start(&clkev, OMAP_TIMER_CTRL_ST, | |
- 0xffffffff - cycles, OMAP_TIMER_POSTED); | |
+ 0xffffffff - cycles, OMAP_TIMER_NONPOSTED); | |
return 0; | |
} | |
@@ -96,7 +114,7 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode, | |
{ | |
u32 period; | |
- __omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate); | |
+ __omap_dm_timer_stop(&clkev, OMAP_TIMER_NONPOSTED, clkev.rate); | |
switch (mode) { | |
case CLOCK_EVT_MODE_PERIODIC: | |
@@ -104,10 +122,10 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode, | |
period -= 1; | |
/* Looks like we need to first set the load value separately */ | |
__omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG, | |
- 0xffffffff - period, OMAP_TIMER_POSTED); | |
+ 0xffffffff - period, OMAP_TIMER_NONPOSTED); | |
__omap_dm_timer_load_start(&clkev, | |
OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST, | |
- 0xffffffff - period, OMAP_TIMER_POSTED); | |
+ 0xffffffff - period, OMAP_TIMER_NONPOSTED); | |
break; | |
case CLOCK_EVT_MODE_ONESHOT: | |
break; | |
@@ -217,7 +235,7 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, | |
const char *fck_source, | |
const char *property, | |
const char **timer_name, | |
- int posted) | |
+ int posted, int ipipe) | |
{ | |
char name[10]; /* 10 = sizeof("gptXX_Xck0") */ | |
const char *oh_name; | |
@@ -241,6 +259,9 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, | |
return -ENXIO; | |
timer->io_base = of_iomap(np, 0); | |
+ if (of_address_to_resource(np, 0, &mem)) | |
+ mem.start = 0; | |
+ timer->phys_base = mem.start; | |
of_node_put(np); | |
} else { | |
@@ -270,6 +291,7 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, | |
return -ENXIO; | |
/* Static mapping, never released */ | |
+ timer->phys_base = mem.start; | |
timer->io_base = ioremap(mem.start, mem.end - mem.start); | |
} | |
@@ -300,6 +322,15 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, | |
omap_hwmod_setup_one(oh_name); | |
omap_hwmod_enable(oh); | |
__omap_dm_timer_init_regs(timer); | |
+#ifdef CONFIG_IPIPE | |
+ if (ipipe) { | |
+ u32 l; | |
+ | |
+ l = __raw_readl(timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET); | |
+ l = (0x3 << 8) | (l & (1 << 5)) | (0x1 << 3) | (1 << 2); | |
+ __raw_writel(l, timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET); | |
+ } | |
+#endif | |
if (posted) | |
__omap_dm_timer_enable_posted(timer); | |
@@ -314,11 +345,63 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, | |
return r; | |
} | |
+#ifdef CONFIG_IPIPE | |
+static struct ipipe_timer omap_shared_itimer = { | |
+ .ack = omap2_gp_timer_ack, | |
+ .min_delay_ticks = 3, | |
+}; | |
+ | |
+#define IPIPE_GPTIMER 3 | |
+ | |
+static struct omap_dm_timer itimer; | |
+static void omap3_itimer_request(struct ipipe_timer *timer, int steal) | |
+{ | |
+ __omap_dm_timer_stop(&itimer, 0, itimer.rate); | |
+} | |
+ | |
+static int omap3_itimer_set(unsigned long cycles, void *timer) | |
+{ | |
+ __omap_dm_timer_load_start(&itimer, OMAP_TIMER_CTRL_ST, | |
+ 0xffffffff - cycles, OMAP_TIMER_NONPOSTED); | |
+ return 0; | |
+} | |
+ | |
+static void omap3_itimer_ack(void) | |
+{ | |
+ __omap_dm_timer_write_status(&itimer, OMAP_TIMER_INT_OVERFLOW); | |
+ __omap_dm_timer_read_status(&itimer); | |
+} | |
+ | |
+static void omap3_itimer_release(struct ipipe_timer *timer) | |
+{ | |
+ __omap_dm_timer_stop(&itimer, 0, itimer.rate); | |
+} | |
+ | |
+static struct ipipe_timer omap3_itimer = { | |
+ .request = omap3_itimer_request, | |
+ .set = omap3_itimer_set, | |
+ .ack = omap3_itimer_ack, | |
+ .release = omap3_itimer_release, | |
+ | |
+ .rating = 100, | |
+ .min_delay_ticks = 3, | |
+}; | |
+ | |
+static struct __ipipe_tscinfo __maybe_unused tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING, | |
+ .u = { | |
+ { | |
+ .mask = 0xffffffff, | |
+ }, | |
+ }, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static void __init omap2_gp_clockevent_init(int gptimer_id, | |
const char *fck_source, | |
const char *property) | |
{ | |
- int res; | |
+ int res, ipipe = 0; | |
clkev.id = gptimer_id; | |
clkev.errata = omap_dm_timer_get_errata(); | |
@@ -330,8 +413,33 @@ static void __init omap2_gp_clockevent_init(int gptimer_id, | |
*/ | |
__omap_dm_timer_override_errata(&clkev, OMAP_TIMER_ERRATA_I103_I767); | |
+#ifdef CONFIG_IPIPE | |
+ if (cpu_is_omap34xx()) { | |
+ itimer.id = IPIPE_GPTIMER; | |
+ itimer.errata = omap_dm_timer_get_errata(); | |
+ __omap_dm_timer_override_errata(&itimer, | |
+ OMAP_TIMER_ERRATA_I103_I767); | |
+ res = omap_dm_timer_init_one(&itimer, | |
+ "timer_sys_ck", | |
+ property, | |
+ &omap3_itimer.name, | |
+ OMAP_TIMER_POSTED, 1); | |
+ BUG_ON(res); | |
+ | |
+ __omap_dm_timer_int_enable(&itimer, OMAP_TIMER_INT_OVERFLOW); | |
+ omap3_itimer.irq = itimer.irq; | |
+ omap3_itimer.freq = itimer.rate; | |
+ omap3_itimer.cpumask = cpumask_of(0); | |
+ | |
+ ipipe_timer_register(&omap3_itimer); | |
+ } | |
+ if ((cpu_is_omap44xx() && num_possible_cpus() == 1) || soc_is_am33xx()) | |
+ ipipe = 1; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
res = omap_dm_timer_init_one(&clkev, fck_source, property, | |
- &clockevent_gpt.name, OMAP_TIMER_POSTED); | |
+ &clockevent_gpt.name, | |
+ OMAP_TIMER_POSTED, ipipe); | |
BUG_ON(res); | |
omap2_gp_timer_irq.dev_id = &clkev; | |
@@ -341,6 +449,16 @@ static void __init omap2_gp_clockevent_init(int gptimer_id, | |
clockevent_gpt.cpumask = cpu_possible_mask; | |
clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ if (ipipe) { | |
+ omap_shared_itimer.irq = clkev.irq; | |
+ omap_shared_itimer.min_delay_ticks = 3; | |
+ | |
+ clockevent_gpt.ipipe_timer = &omap_shared_itimer; | |
+ } | |
+#endif | |
+ | |
clockevents_config_and_register(&clockevent_gpt, clkev.rate, | |
3, /* Timer internal resynch latency */ | |
0xffffffff); | |
@@ -444,18 +562,21 @@ static int __init __maybe_unused omap2_sync32k_clocksource_init(void) | |
return ret; | |
} | |
+/* Setup free-running counter for clocksource */ | |
static void __init omap2_gptimer_clocksource_init(int gptimer_id, | |
const char *fck_source, | |
const char *property) | |
{ | |
- int res; | |
+ int res, ipipe = IS_ENABLED(CONFIG_IPIPE); | |
clksrc.id = gptimer_id; | |
clksrc.errata = omap_dm_timer_get_errata(); | |
+ __omap_dm_timer_override_errata(&clksrc, OMAP_TIMER_ERRATA_I103_I767); | |
res = omap_dm_timer_init_one(&clksrc, fck_source, property, | |
&clocksource_gpt.name, | |
- OMAP_TIMER_NONPOSTED); | |
+ (ipipe ? OMAP_TIMER_POSTED | |
+ : OMAP_TIMER_NONPOSTED), ipipe); | |
BUG_ON(res); | |
__omap_dm_timer_load_start(&clksrc, | |
@@ -463,6 +584,21 @@ static void __init omap2_gptimer_clocksource_init(int gptimer_id, | |
OMAP_TIMER_NONPOSTED); | |
setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate); | |
+#ifdef CONFIG_IPIPE | |
+ { | |
+ unsigned long off; | |
+ | |
+ off = OMAP_TIMER_COUNTER_REG & 0xff; | |
+ if (clksrc.revision == 2) | |
+ off += OMAP_TIMER_V2_FUNC_OFFSET; | |
+ | |
+ tsc_info.freq = clksrc.rate; | |
+ tsc_info.counter_vaddr = (unsigned long)clksrc.io_base + off; | |
+ tsc_info.u.counter_paddr = clksrc.phys_base + off; | |
+ __ipipe_tsc_register(&tsc_info); | |
+ } | |
+#endif | |
+ | |
if (clocksource_register_hz(&clocksource_gpt, clksrc.rate)) | |
pr_err("Could not register clocksource %s\n", | |
clocksource_gpt.name); | |
@@ -561,6 +697,7 @@ void __init omap##name##_gptimer_timer_init(void) \ | |
clksrc_prop); \ | |
} | |
+#ifndef CONFIG_IPIPE | |
#define OMAP_SYS_32K_TIMER_INIT(name, clkev_nr, clkev_src, clkev_prop, \ | |
clksrc_nr, clksrc_src, clksrc_prop) \ | |
void __init omap##name##_sync32k_timer_init(void) \ | |
@@ -576,6 +713,35 @@ void __init omap##name##_sync32k_timer_init(void) \ | |
else \ | |
omap2_sync32k_clocksource_init(); \ | |
} | |
+#else | |
+#define OMAP_SYS_32K_TIMER_INIT(name, clkev_nr, clkev_src, clkev_prop, \ | |
+ clksrc_nr, clksrc_src, clksrc_prop) \ | |
+void __init omap##name##_sync32k_timer_init(void) \ | |
+{ \ | |
+ const char *clk = clkev_src; \ | |
+ \ | |
+ if (num_possible_cpus() == 1 && !soc_is_omap54xx()) { \ | |
+ use_gptimer_clksrc = 1; \ | |
+ if (cpu_is_omap44xx()) \ | |
+ clk = "timer_sys_ck"; \ | |
+ } \ | |
+ \ | |
+ if (omap_clk_init) \ | |
+ omap_clk_init(); \ | |
+ omap_dmtimer_init(); \ | |
+ omap2_gp_clockevent_init((clkev_nr), clk, clkev_prop); \ | |
+ /* Enable the use of clocksource="gp_timer" kernel parameter */ \ | |
+ if (use_gptimer_clksrc) \ | |
+ omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src, \ | |
+ clksrc_prop); \ | |
+ else \ | |
+ omap2_sync32k_clocksource_init(); \ | |
+ if (cpu_is_omap34xx()) \ | |
+ omap3_pic_muter_register(); \ | |
+ else if (cpu_is_omap44xx() || soc_is_omap54xx()) \ | |
+ omap4_pic_muter_register(); \ | |
+} | |
+#endif | |
#ifdef CONFIG_ARCH_OMAP2 | |
OMAP_SYS_32K_TIMER_INIT(2, 1, "timer_32k_ck", "ti,timer-alwon", | |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c | |
index b6cc181..e312fac 100644 | |
--- a/arch/arm/mach-pxa/irq.c | |
+++ b/arch/arm/mach-pxa/irq.c | |
@@ -85,6 +85,9 @@ static struct irq_chip pxa_internal_irq_chip = { | |
.name = "SC", | |
.irq_ack = pxa_mask_irq, | |
.irq_mask = pxa_mask_irq, | |
+#ifdef CONFIG_IPIPE | |
+ .irq_mask_ack = pxa_mask_irq, | |
+#endif /* CONFIG_IPIPE */ | |
.irq_unmask = pxa_unmask_irq, | |
}; | |
@@ -100,7 +103,7 @@ asmlinkage void __exception_irq_entry icip_handle_irq(struct pt_regs *regs) | |
if (mask == 0) | |
break; | |
- handle_IRQ(PXA_IRQ(fls(mask) - 1), regs); | |
+ ipipe_handle_multi_irq(PXA_IRQ(fls(mask) - 1), regs); | |
} while (1); | |
} | |
@@ -114,7 +117,7 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs) | |
if ((ichp & ICHP_VAL_IRQ) == 0) | |
break; | |
- handle_IRQ(PXA_IRQ(ICHP_IRQ(ichp)), regs); | |
+ ipipe_handle_multi_irq(PXA_IRQ(ICHP_IRQ(ichp)), regs); | |
} while (1); | |
} | |
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c | |
index 1255ee0..bcb2c60 100644 | |
--- a/arch/arm/mach-pxa/lpd270.c | |
+++ b/arch/arm/mach-pxa/lpd270.c | |
@@ -24,6 +24,7 @@ | |
#include <linux/mtd/mtd.h> | |
#include <linux/mtd/partitions.h> | |
#include <linux/pwm_backlight.h> | |
+#include <linux/ipipe.h> | |
#include <asm/types.h> | |
#include <asm/setup.h> | |
@@ -129,7 +130,7 @@ static void lpd270_irq_handler(unsigned int irq, struct irq_desc *desc) | |
desc->irq_data.chip->irq_ack(&desc->irq_data); | |
if (likely(pending)) { | |
irq = LPD270_IRQ(0) + __ffs(pending); | |
- generic_handle_irq(irq); | |
+ ipipe_handle_demuxed_irq(irq); | |
pending = __raw_readw(LPD270_INT_STATUS) & | |
lpd270_irq_enabled; | |
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c | |
index d8a1be6..471665a 100644 | |
--- a/arch/arm/mach-pxa/lubbock.c | |
+++ b/arch/arm/mach-pxa/lubbock.c | |
@@ -26,6 +26,7 @@ | |
#include <linux/smc91x.h> | |
#include <linux/slab.h> | |
#include <linux/leds.h> | |
+#include <linux/ipipe.h> | |
#include <linux/spi/spi.h> | |
#include <linux/spi/ads7846.h> | |
@@ -154,7 +155,7 @@ static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc) | |
desc->irq_data.chip->irq_ack(&desc->irq_data); | |
if (likely(pending)) { | |
irq = LUBBOCK_IRQ(0) + __ffs(pending); | |
- generic_handle_irq(irq); | |
+ ipipe_handle_demuxed_irq(irq); | |
} | |
pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled; | |
} while (pending); | |
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c | |
index 7a12c1b..b583645 100644 | |
--- a/arch/arm/mach-pxa/mainstone.c | |
+++ b/arch/arm/mach-pxa/mainstone.c | |
@@ -30,6 +30,7 @@ | |
#include <linux/i2c/pxa-i2c.h> | |
#include <linux/slab.h> | |
#include <linux/leds.h> | |
+#include <linux/ipipe.h> | |
#include <asm/types.h> | |
#include <asm/setup.h> | |
@@ -153,7 +154,7 @@ static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc) | |
desc->irq_data.chip->irq_ack(&desc->irq_data); | |
if (likely(pending)) { | |
irq = MAINSTONE_IRQ(0) + __ffs(pending); | |
- generic_handle_irq(irq); | |
+ ipipe_handle_demuxed_irq(irq); | |
} | |
pending = MST_INTSETCLR & mainstone_irq_enabled; | |
} while (pending); | |
@@ -387,7 +388,7 @@ static struct pxafb_mode_info toshiba_ltm035a776c_mode = { | |
}; | |
static struct pxafb_mach_info mainstone_pxafb_info = { | |
- .num_modes = 1, | |
+ .num_modes = 1, | |
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, | |
}; | |
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c | |
index fb7f1d1..93421be 100644 | |
--- a/arch/arm/mach-pxa/pcm990-baseboard.c | |
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c | |
@@ -25,6 +25,7 @@ | |
#include <linux/i2c.h> | |
#include <linux/i2c/pxa-i2c.h> | |
#include <linux/pwm_backlight.h> | |
+#include <linux/ipipe.h> | |
#include <media/mt9v022.h> | |
#include <media/soc_camera.h> | |
@@ -295,7 +296,7 @@ static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc) | |
desc->irq_data.chip->irq_ack(&desc->irq_data); | |
if (likely(pending)) { | |
irq = PCM027_IRQ(0) + __ffs(pending); | |
- generic_handle_irq(irq); | |
+ ipipe_handle_demuxed_irq(irq); | |
} | |
pending = ~pcm990_cpld_readb(PCM990_CTRL_INTSETCLR); | |
pending &= pcm990_irq_enabled; | |
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c | |
index 8f1ee92..3199dde 100644 | |
--- a/arch/arm/mach-pxa/time.c | |
+++ b/arch/arm/mach-pxa/time.c | |
@@ -16,6 +16,8 @@ | |
#include <linux/init.h> | |
#include <linux/interrupt.h> | |
#include <linux/clockchips.h> | |
+#include <linux/ipipe_tickdev.h> | |
+#include <linux/ipipe.h> | |
#include <asm/div64.h> | |
#include <asm/mach/irq.h> | |
@@ -41,14 +43,22 @@ static u32 notrace pxa_read_sched_clock(void) | |
#define MIN_OSCR_DELTA 16 | |
+static inline void pxa_ost0_ack(void) | |
+{ | |
+ /* Disarm the compare/match, signal the event. */ | |
+ writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | |
+ writel_relaxed(OSSR_M0, OSSR); | |
+} | |
+ | |
static irqreturn_t | |
pxa_ost0_interrupt(int irq, void *dev_id) | |
{ | |
struct clock_event_device *c = dev_id; | |
- /* Disarm the compare/match, signal the event. */ | |
- writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | |
- writel_relaxed(OSSR_M0, OSSR); | |
+ if (clockevent_ipipe_stolen(c) == 0) | |
+ pxa_ost0_ack(); | |
+ | |
+ __ipipe_tsc_update(); | |
c->event_handler(c); | |
return IRQ_HANDLED; | |
@@ -125,6 +135,14 @@ static void pxa_timer_resume(struct clock_event_device *cedev) | |
#define pxa_timer_resume NULL | |
#endif | |
+#ifdef CONFIG_IPIPE | |
+static struct ipipe_timer pxa_osmr0_itimer = { | |
+ .irq = IRQ_OST0, | |
+ .ack = pxa_ost0_ack, | |
+ .min_delay_ticks = MIN_OSCR_DELTA, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static struct clock_event_device ckevt_pxa_osmr0 = { | |
.name = "osmr0", | |
.features = CLOCK_EVT_FEAT_ONESHOT, | |
@@ -133,6 +151,9 @@ static struct clock_event_device ckevt_pxa_osmr0 = { | |
.set_mode = pxa_osmr0_set_mode, | |
.suspend = pxa_timer_suspend, | |
.resume = pxa_timer_resume, | |
+#ifdef CONFIG_IPIPE | |
+ .ipipe_timer = &pxa_osmr0_itimer, | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
static struct irqaction pxa_ost0_irq = { | |
@@ -142,6 +163,19 @@ static struct irqaction pxa_ost0_irq = { | |
.dev_id = &ckevt_pxa_osmr0, | |
}; | |
+#ifdef CONFIG_IPIPE | |
+static struct __ipipe_tscinfo tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING, | |
+ .counter_vaddr = (unsigned long)io_p2v(0x40A00010UL), | |
+ .u = { | |
+ { | |
+ .counter_paddr = 0x40A00010UL, | |
+ .mask = 0xffffffff, | |
+ }, | |
+ }, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
void __init pxa_timer_init(void) | |
{ | |
unsigned long clock_tick_rate = get_clock_tick_rate(); | |
@@ -157,6 +191,12 @@ void __init pxa_timer_init(void) | |
clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32, | |
clocksource_mmio_readl_up); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ tsc_info.freq = clock_tick_rate; | |
+ __ipipe_tsc_register(&tsc_info); | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, | |
MIN_OSCR_DELTA * 2, 0x7fffffff); | |
} | |
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c | |
index 9c363c0..475e221 100644 | |
--- a/arch/arm/mach-pxa/viper.c | |
+++ b/arch/arm/mach-pxa/viper.c | |
@@ -45,6 +45,7 @@ | |
#include <linux/mtd/partitions.h> | |
#include <linux/mtd/physmap.h> | |
#include <linux/syscore_ops.h> | |
+#include <linux/ipipe.h> | |
#include <mach/pxa25x.h> | |
#include <mach/audio.h> | |
@@ -288,7 +289,7 @@ static void viper_irq_handler(unsigned int irq, struct irq_desc *desc) | |
if (likely(pending)) { | |
irq = viper_bit_to_irq(__ffs(pending)); | |
- generic_handle_irq(irq); | |
+ ipipe_handle_demuxed_irq(irq); | |
} | |
pending = viper_irq_pending(); | |
} while (pending); | |
@@ -624,8 +625,8 @@ static struct isp116x_platform_data isp116x_platform_data = { | |
static struct platform_device isp116x_device = { | |
.name = "isp116x-hcd", | |
.id = -1, | |
- .num_resources = ARRAY_SIZE(isp116x_resources), | |
- .resource = isp116x_resources, | |
+ .num_resources = ARRAY_SIZE(isp116x_resources), | |
+ .resource = isp116x_resources, | |
.dev = { | |
.platform_data = &isp116x_platform_data, | |
}, | |
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c | |
index 1d5ee5c..fa53303 100644 | |
--- a/arch/arm/mach-realview/core.c | |
+++ b/arch/arm/mach-realview/core.c | |
@@ -335,6 +335,7 @@ void __iomem *timer0_va_base; | |
void __iomem *timer1_va_base; | |
void __iomem *timer2_va_base; | |
void __iomem *timer3_va_base; | |
+void __iomem *timer3_pa_base; | |
/* | |
* Set up the clock source and clock events devices | |
@@ -343,14 +344,14 @@ void __init realview_timer_init(unsigned int timer_irq) | |
{ | |
u32 val; | |
- /* | |
- * set clock frequency: | |
+ /* | |
+ * set clock frequency: | |
* REALVIEW_REFCLK is 32KHz | |
* REALVIEW_TIMCLK is 1MHz | |
*/ | |
val = readl(__io_address(REALVIEW_SCTL_BASE)); | |
writel((REALVIEW_TIMCLK << REALVIEW_TIMER1_EnSel) | | |
- (REALVIEW_TIMCLK << REALVIEW_TIMER2_EnSel) | | |
+ (REALVIEW_TIMCLK << REALVIEW_TIMER2_EnSel) | | |
(REALVIEW_TIMCLK << REALVIEW_TIMER3_EnSel) | | |
(REALVIEW_TIMCLK << REALVIEW_TIMER4_EnSel) | val, | |
__io_address(REALVIEW_SCTL_BASE)); | |
@@ -363,7 +364,7 @@ void __init realview_timer_init(unsigned int timer_irq) | |
writel(0, timer2_va_base + TIMER_CTRL); | |
writel(0, timer3_va_base + TIMER_CTRL); | |
- sp804_clocksource_init(timer3_va_base, "timer3"); | |
+ sp804_clocksource_init(timer3_va_base, timer3_pa_base, "timer3"); | |
sp804_clockevents_init(timer0_va_base, timer_irq, "timer0"); | |
} | |
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h | |
index 602ca5e..5757912 100644 | |
--- a/arch/arm/mach-realview/core.h | |
+++ b/arch/arm/mach-realview/core.h | |
@@ -45,6 +45,7 @@ extern void __iomem *timer0_va_base; | |
extern void __iomem *timer1_va_base; | |
extern void __iomem *timer2_va_base; | |
extern void __iomem *timer3_va_base; | |
+extern void __iomem *timer3_pa_base; | |
extern void realview_timer_init(unsigned int timer_irq); | |
extern int realview_flash_register(struct resource *res, u32 num); | |
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c | |
index 5b1c8bf..cfbf2cc 100644 | |
--- a/arch/arm/mach-realview/realview_eb.c | |
+++ b/arch/arm/mach-realview/realview_eb.c | |
@@ -407,6 +407,7 @@ static void __init realview_eb_timer_init(void) | |
timer1_va_base = __io_address(REALVIEW_EB_TIMER0_1_BASE) + 0x20; | |
timer2_va_base = __io_address(REALVIEW_EB_TIMER2_3_BASE); | |
timer3_va_base = __io_address(REALVIEW_EB_TIMER2_3_BASE) + 0x20; | |
+ timer3_pa_base = REALVIEW_EB_TIMER2_3_BASE + 0x20; | |
if (core_tile_eb11mp() || core_tile_a9mp()) | |
timer_irq = IRQ_EB11MP_TIMER0_1; | |
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c | |
index d5e83a1..930e9fd 100644 | |
--- a/arch/arm/mach-realview/realview_pb1176.c | |
+++ b/arch/arm/mach-realview/realview_pb1176.c | |
@@ -324,6 +324,7 @@ static void __init realview_pb1176_timer_init(void) | |
timer1_va_base = __io_address(REALVIEW_PB1176_TIMER0_1_BASE) + 0x20; | |
timer2_va_base = __io_address(REALVIEW_PB1176_TIMER2_3_BASE); | |
timer3_va_base = __io_address(REALVIEW_PB1176_TIMER2_3_BASE) + 0x20; | |
+ timer3_pa_base = REALVIEW_PB1176_TIMER2_3_BASE + 0x20; | |
realview_clk_init(__io_address(REALVIEW_SYS_BASE), true); | |
realview_timer_init(IRQ_DC1176_TIMER0); | |
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c | |
index c3cfe21..8edcfd1 100644 | |
--- a/arch/arm/mach-realview/realview_pb11mp.c | |
+++ b/arch/arm/mach-realview/realview_pb11mp.c | |
@@ -310,6 +310,7 @@ static void __init realview_pb11mp_timer_init(void) | |
timer1_va_base = __io_address(REALVIEW_PB11MP_TIMER0_1_BASE) + 0x20; | |
timer2_va_base = __io_address(REALVIEW_PB11MP_TIMER2_3_BASE); | |
timer3_va_base = __io_address(REALVIEW_PB11MP_TIMER2_3_BASE) + 0x20; | |
+ timer3_pa_base = REALVIEW_PB11MP_TIMER2_3_BASE + 0x20; | |
realview_clk_init(__io_address(REALVIEW_SYS_BASE), false); | |
realview_timer_init(IRQ_TC11MP_TIMER0_1); | |
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c | |
index dde652a..14e92d1 100644 | |
--- a/arch/arm/mach-realview/realview_pba8.c | |
+++ b/arch/arm/mach-realview/realview_pba8.c | |
@@ -259,6 +259,7 @@ static void __init realview_pba8_timer_init(void) | |
timer1_va_base = __io_address(REALVIEW_PBA8_TIMER0_1_BASE) + 0x20; | |
timer2_va_base = __io_address(REALVIEW_PBA8_TIMER2_3_BASE); | |
timer3_va_base = __io_address(REALVIEW_PBA8_TIMER2_3_BASE) + 0x20; | |
+ timer3_pa_base = REALVIEW_PBA8_TIMER2_3_BASE + 0x20; | |
realview_clk_init(__io_address(REALVIEW_SYS_BASE), false); | |
realview_timer_init(IRQ_PBA8_TIMER0_1); | |
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c | |
index 54f0185..6d2f083 100644 | |
--- a/arch/arm/mach-realview/realview_pbx.c | |
+++ b/arch/arm/mach-realview/realview_pbx.c | |
@@ -318,6 +318,7 @@ static void __init realview_pbx_timer_init(void) | |
timer1_va_base = __io_address(REALVIEW_PBX_TIMER0_1_BASE) + 0x20; | |
timer2_va_base = __io_address(REALVIEW_PBX_TIMER2_3_BASE); | |
timer3_va_base = __io_address(REALVIEW_PBX_TIMER2_3_BASE) + 0x20; | |
+ timer3_pa_base = REALVIEW_PBX_TIMER2_3_BASE + 0x20; | |
realview_clk_init(__io_address(REALVIEW_SYS_BASE), false); | |
realview_timer_init(IRQ_PBX_TIMER0_1); | |
diff --git a/arch/arm/mach-s3c24xx/bast-irq.c b/arch/arm/mach-s3c24xx/bast-irq.c | |
index cb1b791..9b72630 100644 | |
--- a/arch/arm/mach-s3c24xx/bast-irq.c | |
+++ b/arch/arm/mach-s3c24xx/bast-irq.c | |
@@ -26,6 +26,7 @@ | |
#include <linux/ioport.h> | |
#include <linux/device.h> | |
#include <linux/io.h> | |
+#include <linux/ipipe.h> | |
#include <asm/irq.h> | |
#include <asm/mach-types.h> | |
@@ -121,7 +122,7 @@ bast_irq_pc104_demux(unsigned int irq, | |
for (i = 0; stat != 0; i++, stat >>= 1) { | |
if (stat & 1) { | |
irqno = bast_pc104_irqs[i]; | |
- generic_handle_irq(irqno); | |
+ ipipe_handle_demuxed_irq(irqno); | |
} | |
} | |
} | |
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c | |
index 2124f1fc..7be5cbe 100644 | |
--- a/arch/arm/mach-sa1100/irq.c | |
+++ b/arch/arm/mach-sa1100/irq.c | |
@@ -16,6 +16,7 @@ | |
#include <linux/irq.h> | |
#include <linux/ioport.h> | |
#include <linux/syscore_ops.h> | |
+#include <linux/ipipe.h> | |
#include <mach/hardware.h> | |
#include <mach/irqs.h> | |
@@ -127,7 +128,7 @@ sa1100_high_gpio_handler(unsigned int irq, struct irq_desc *desc) | |
mask >>= 11; | |
do { | |
if (mask & 1) | |
- generic_handle_irq(irq); | |
+ ipipe_handle_demuxed_irq(irq); | |
mask >>= 1; | |
irq++; | |
} while (mask); | |
@@ -219,6 +220,9 @@ static struct irq_chip sa1100_normal_chip = { | |
.name = "SC", | |
.irq_ack = sa1100_mask_irq, | |
.irq_mask = sa1100_mask_irq, | |
+#ifdef CONFIG_IPIPE | |
+ .irq_mask_ack = sa1100_mask_irq, | |
+#endif /* CONFIG_IPIPE */ | |
.irq_unmask = sa1100_unmask_irq, | |
.irq_set_wake = sa1100_set_wake, | |
}; | |
@@ -254,7 +258,7 @@ static int sa1100irq_suspend(void) | |
*/ | |
GRER = PWER & GPIO_IRQ_rising_edge; | |
GFER = PWER & GPIO_IRQ_falling_edge; | |
- | |
+ | |
/* | |
* Clear any pending GPIO interrupts. | |
*/ | |
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c | |
index a59a13a..e8207f0 100644 | |
--- a/arch/arm/mach-sa1100/time.c | |
+++ b/arch/arm/mach-sa1100/time.c | |
@@ -14,6 +14,8 @@ | |
#include <linux/irq.h> | |
#include <linux/timex.h> | |
#include <linux/clockchips.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include <asm/mach/time.h> | |
#include <asm/sched_clock.h> | |
@@ -27,13 +29,35 @@ static u32 notrace sa1100_read_sched_clock(void) | |
#define MIN_OSCR_DELTA 2 | |
-static irqreturn_t sa1100_ost0_interrupt(int irq, void *dev_id) | |
+#ifdef CONFIG_IPIPE | |
+static struct __ipipe_tscinfo tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING, | |
+ .freq = CLOCK_TICK_RATE, | |
+ .counter_vaddr = io_p2v(0x90000010UL), | |
+ .u = { | |
+ { | |
+ .counter_paddr = 0x90000010UL, | |
+ .mask = 0xffffffff, | |
+ }, | |
+ }, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+static inline void sa1100_ost0_ack(void) | |
{ | |
- struct clock_event_device *c = dev_id; | |
- | |
/* Disarm the compare/match, signal the event. */ | |
writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | |
writel_relaxed(OSSR_M0, OSSR); | |
+} | |
+ | |
+static irqreturn_t sa1100_ost0_interrupt(int irq, void *dev_id) | |
+{ | |
+ struct clock_event_device *c = dev_id; | |
+ | |
+ if (clockevent_ipipe_stolen(c) == 0) | |
+ sa1100_ost0_ack(); | |
+ | |
+ __ipipe_tsc_update(); | |
c->event_handler(c); | |
return IRQ_HANDLED; | |
@@ -100,14 +124,23 @@ static void sa1100_timer_resume(struct clock_event_device *cedev) | |
#define sa1100_timer_resume NULL | |
#endif | |
+#ifdef CONFIG_IPIPE | |
+static struct ipipe_timer sa1100_osmr0_itimer = { | |
+ .irq = IRQ_OST0, | |
+ .ack = sa1100_ost0_ack, | |
+ .min_delay_ticks = MIN_OSCR_DELTA, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static struct clock_event_device ckevt_sa1100_osmr0 = { | |
.name = "osmr0", | |
.features = CLOCK_EVT_FEAT_ONESHOT, | |
.rating = 200, | |
.set_next_event = sa1100_osmr0_set_next_event, | |
.set_mode = sa1100_osmr0_set_mode, | |
- .suspend = sa1100_timer_suspend, | |
- .resume = sa1100_timer_resume, | |
+#ifdef CONFIG_IPIPE | |
+ .ipipe_timer = &sa1100_osmr0_itimer, | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
static struct irqaction sa1100_timer_irq = { | |
@@ -130,6 +163,10 @@ void __init sa1100_timer_init(void) | |
clocksource_mmio_init(OSCR, "oscr", CLOCK_TICK_RATE, 200, 32, | |
clocksource_mmio_readl_up); | |
+#ifdef CONFIG_IPIPE | |
+ __ipipe_tsc_register(&tsc_info); | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
clockevents_config_and_register(&ckevt_sa1100_osmr0, 3686400, | |
MIN_OSCR_DELTA * 2, 0x7fffffff); | |
} | |
diff --git a/arch/arm/mach-spear/Kconfig b/arch/arm/mach-spear/Kconfig | |
index 442917e..1396d7a 100644 | |
--- a/arch/arm/mach-spear/Kconfig | |
+++ b/arch/arm/mach-spear/Kconfig | |
@@ -12,6 +12,7 @@ menuconfig PLAT_SPEAR | |
select COMMON_CLK | |
select GENERIC_CLOCKEVENTS | |
select HAVE_CLK | |
+ select IPIPE_ARM_KUSER_TSC if IPIPE | |
if PLAT_SPEAR | |
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c | |
index d449673..63e8ed0 100644 | |
--- a/arch/arm/mach-spear/time.c | |
+++ b/arch/arm/mach-spear/time.c | |
@@ -22,6 +22,8 @@ | |
#include <linux/of_address.h> | |
#include <linux/time.h> | |
#include <linux/irq.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include <asm/mach/time.h> | |
#include "generic.h" | |
@@ -65,23 +67,43 @@ | |
static __iomem void *gpt_base; | |
static struct clk *gpt_clk; | |
+static unsigned long gpt_phys_base; | |
+static void spear_timer_ack(void); | |
static void clockevent_set_mode(enum clock_event_mode mode, | |
struct clock_event_device *clk_event_dev); | |
static int clockevent_next_event(unsigned long evt, | |
struct clock_event_device *clk_event_dev); | |
+#ifdef CONFIG_IPIPE | |
+static unsigned prescale, max_delta_ticks; | |
+ | |
+static struct __ipipe_tscinfo __maybe_unused tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING_TWICE, | |
+ .u = { | |
+ { | |
+ .mask = 0x0000ffff, | |
+ }, | |
+ }, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static void spear_clocksource_init(void) | |
{ | |
u32 tick_rate; | |
u16 val; | |
+ tick_rate = clk_get_rate(gpt_clk); | |
+#ifndef CONFIG_IPIPE | |
/* program the prescaler (/256)*/ | |
writew(CTRL_PRESCALER256, gpt_base + CR(CLKSRC)); | |
/* find out actual clock driving Timer */ | |
- tick_rate = clk_get_rate(gpt_clk); | |
tick_rate >>= CTRL_PRESCALER256; | |
+#else /* CONFIG_IPIPE */ | |
+ writew(prescale, gpt_base + CR(CLKSRC)); | |
+ tick_rate >>= prescale; | |
+#endif /* CONFIG_IPIPE */ | |
writew(0xFFFF, gpt_base + LOAD(CLKSRC)); | |
@@ -93,14 +115,30 @@ static void spear_clocksource_init(void) | |
/* register the clocksource */ | |
clocksource_mmio_init(gpt_base + COUNT(CLKSRC), "tmr1", tick_rate, | |
200, 16, clocksource_mmio_readw_up); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ tsc_info.u.counter_paddr = gpt_phys_base + COUNT(CLKSRC), | |
+ tsc_info.counter_vaddr = (unsigned long)(gpt_base + COUNT(CLKSRC)); | |
+ tsc_info.freq = tick_rate; | |
+ __ipipe_tsc_register(&tsc_info); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
+#ifdef CONFIG_IPIPE | |
+static struct ipipe_timer spear_itimer = { | |
+ .ack = spear_timer_ack, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static struct clock_event_device clkevt = { | |
.name = "tmr0", | |
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
.set_mode = clockevent_set_mode, | |
.set_next_event = clockevent_next_event, | |
.shift = 0, /* to be computed */ | |
+#ifdef CONFIG_IPIPE | |
+ .ipipe_timer = &spear_itimer, | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
static void clockevent_set_mode(enum clock_event_mode mode, | |
@@ -117,7 +155,11 @@ static void clockevent_set_mode(enum clock_event_mode mode, | |
switch (mode) { | |
case CLOCK_EVT_MODE_PERIODIC: | |
period = clk_get_rate(gpt_clk) / HZ; | |
+#ifndef CONFIG_IPIPE | |
period >>= CTRL_PRESCALER16; | |
+#else /* !CONFIG_IPIPE */ | |
+ period >>= prescale; | |
+#endif /* !CONFIG_IPIPE */ | |
writew(period, gpt_base + LOAD(CLKEVT)); | |
val = readw(gpt_base + CR(CLKEVT)); | |
@@ -148,6 +190,13 @@ static int clockevent_next_event(unsigned long cycles, | |
{ | |
u16 val = readw(gpt_base + CR(CLKEVT)); | |
+#ifdef CONFIG_IPIPE | |
+ if (cycles > max_delta_ticks) | |
+ cycles = max_delta_ticks; | |
+#endif | |
+ | |
+ __ipipe_tsc_update(); | |
+ | |
if (val & CTRL_ENABLE) | |
writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT)); | |
@@ -159,11 +208,19 @@ static int clockevent_next_event(unsigned long cycles, | |
return 0; | |
} | |
+static void spear_timer_ack(void) | |
+{ | |
+ writew(INT_STATUS, gpt_base + IR(CLKEVT)); | |
+} | |
+ | |
static irqreturn_t spear_timer_interrupt(int irq, void *dev_id) | |
{ | |
struct clock_event_device *evt = &clkevt; | |
- writew(INT_STATUS, gpt_base + IR(CLKEVT)); | |
+ if (!clockevent_ipipe_stolen(evt)) | |
+ spear_timer_ack(); | |
+ | |
+ __ipipe_tsc_update(); | |
evt->event_handler(evt); | |
@@ -180,11 +237,26 @@ static void __init spear_clockevent_init(int irq) | |
{ | |
u32 tick_rate; | |
- /* program the prescaler */ | |
+ tick_rate = clk_get_rate(gpt_clk); | |
+#ifndef CONFIG_IPIPE | |
+ /* program the prescaler (/16)*/ | |
writew(CTRL_PRESCALER16, gpt_base + CR(CLKEVT)); | |
- tick_rate = clk_get_rate(gpt_clk); | |
+ /* find out actual clock driving Timer */ | |
tick_rate >>= CTRL_PRESCALER16; | |
+#else /* CONFIG_IPIPE */ | |
+ /* Find the prescaler giving a precision under 1us */ | |
+ for (prescale = CTRL_PRESCALER256; prescale != 0xffff; prescale--) | |
+ if ((tick_rate >> prescale) >= 1000000) | |
+ break; | |
+ | |
+ spear_itimer.irq = irq; | |
+ | |
+ writew(prescale, gpt_base + CR(CLKEVT)); | |
+ tick_rate >>= prescale; | |
+ | |
+ max_delta_ticks = 0xffff - tick_rate / 1000; | |
+#endif /* CONFIG_IPIPE */ | |
clkevt.cpumask = cpumask_of(0); | |
@@ -201,6 +273,7 @@ const static struct of_device_id timer_of_match[] __initconst = { | |
void __init spear_setup_of_timer(void) | |
{ | |
struct device_node *np; | |
+ struct resource res; | |
int irq, ret; | |
np = of_find_matching_node(NULL, timer_of_match); | |
@@ -221,6 +294,10 @@ void __init spear_setup_of_timer(void) | |
return; | |
} | |
+ if (of_address_to_resource(np, 0, &res)) | |
+ res.start = 0; | |
+ gpt_phys_base = res.start; | |
+ | |
gpt_clk = clk_get_sys("gpt0", NULL); | |
if (!gpt_clk) { | |
pr_err("%s:couldn't get clk for gpt\n", __func__); | |
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c | |
index 54bb80b..5a76f51 100644 | |
--- a/arch/arm/mach-versatile/core.c | |
+++ b/arch/arm/mach-versatile/core.c | |
@@ -807,6 +807,7 @@ void __init versatile_timer_init(void) | |
writel(0, TIMER2_VA_BASE + TIMER_CTRL); | |
writel(0, TIMER3_VA_BASE + TIMER_CTRL); | |
- sp804_clocksource_init(TIMER3_VA_BASE, "timer3"); | |
+ sp804_clocksource_init(TIMER3_VA_BASE, | |
+ VERSATILE_TIMER2_3_BASE + 0x20, "timer3"); | |
sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMERINT0_1, "timer0"); | |
} | |
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c | |
index 8802030..90ada22 100644 | |
--- a/arch/arm/mach-vexpress/v2m.c | |
+++ b/arch/arm/mach-vexpress/v2m.c | |
@@ -63,7 +63,8 @@ static void __init v2m_sp804_init(void __iomem *base, unsigned int irq) | |
if (WARN_ON(!base || irq == NO_IRQ)) | |
return; | |
- sp804_clocksource_init(base + TIMER_2_BASE, "v2m-timer1"); | |
+ sp804_clocksource_init(base + TIMER_2_BASE, | |
+ V2M_TIMER01 + TIMER_2_BASE, "v2m-timer1"); | |
sp804_clockevents_init(base + TIMER_1_BASE, irq, "v2m-timer0"); | |
} | |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig | |
index 08c9fe9..111f409 100644 | |
--- a/arch/arm/mm/Kconfig | |
+++ b/arch/arm/mm/Kconfig | |
@@ -776,6 +776,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG | |
config NEED_KUSER_HELPERS | |
bool | |
+ default y if IPIPE | |
config KUSER_HELPERS | |
bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS | |
@@ -932,3 +933,76 @@ config ARCH_HAS_BARRIERS | |
help | |
This option allows the use of custom mandatory barriers | |
included via the mach/barriers.h file. | |
+ | |
+config ARM_FCSE | |
+ bool "Fast Context Switch Extension (EXPERIMENTAL)" | |
+ depends on !SMP && (CPU_32v4 || CPU_32v4T || CPU_32v5) | |
+ help | |
+ The Fast Context Switch Extension (FCSE for short) is an extension of | |
+ some ARM processors which allows to switch contexts between processes | |
+ without flushing cache and saves a few tens of microseconds in the | |
+ worst case. | |
+ | |
+ Enabling this option makes linux use the FCSE. | |
+ | |
+ We propose two modes: | |
+ - the guaranteed mode: we guarantee that there will never be any cache | |
+ flush when switching context, but this means that there can not be | |
+ more than 95 running processes in the system, each with a virtual | |
+ memory space smaller than 32MB, and that the shared memory | |
+ mappings do not use cache; | |
+ - the best effort mode: we allow some cache flushes to happen from | |
+ time to time, but do not limit the number of processes or the | |
+ virtual memory space available for each process, and the shared | |
+ memory mappings use cache. | |
+ | |
+if ARM_FCSE | |
+ | |
+choice | |
+ prompt "FCSE mode" | |
+ default ARM_FCSE_BEST_EFFORT | |
+ help | |
+ This option allow setting which FCSE mode will be used. | |
+ | |
+config ARM_FCSE_GUARANTEED | |
+ bool "guaranteed" | |
+ help | |
+ Select guaranteed mode. | |
+ | |
+config ARM_FCSE_BEST_EFFORT | |
+ bool "best effort" | |
+ help | |
+ Select best-effort mode. | |
+ | |
+endchoice | |
+ | |
+config ARM_FCSE_PREEMPT_FLUSH | |
+ bool "Preemptible cache flushes" | |
+ default ARM_FCSE_GUARANTEED | |
+ help | |
+ When FCSE is enabled, some cache flushes happen with preemption | |
+ disabled by default, this allows avoiding more cache | |
+ flushes, but increases the latency. This option allows making | |
+ them preemptible. It probably only make sense in guaranteed mode. | |
+ | |
+config ARM_FCSE_MESSAGES | |
+ bool "help messages" | |
+ default ARM_FCSE_BEST_EFFORT | |
+ help | |
+ When FCSE is enabled in best-effort mode, due to the VM space | |
+ reduction, a too large stack size limit may result in processes | |
+ exceeding the 32MB limit too easily. A too small stack size may result | |
+ in stack overflows. Enabling this option will print messages in these | |
+ situations to assist you in tuning the stack size limit. | |
+ | |
+ In guaranteed mode, this option will cause message to be printed if | |
+ one of the hard limits (95 proceses, 32 MB VM space) is exceeded. | |
+ | |
+config ARM_FCSE_DEBUG | |
+ bool "FCSE debug" | |
+ select ARM_FCSE_MESSAGES | |
+ help | |
+ This option enables some internal debug checks. It has a high | |
+ overhead, and is only useful for debugging the FCSE code. | |
+ | |
+endif | |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile | |
index 9e51be9..cf66246 100644 | |
--- a/arch/arm/mm/Makefile | |
+++ b/arch/arm/mm/Makefile | |
@@ -95,3 +95,4 @@ obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o | |
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o | |
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o | |
obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o | |
+obj-$(CONFIG_ARM_FCSE) += fcse.o | |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c | |
index 6f4585b..24d09d5 100644 | |
--- a/arch/arm/mm/alignment.c | |
+++ b/arch/arm/mm/alignment.c | |
@@ -470,7 +470,7 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg | |
* | |
* B = rn pointer before instruction, A = rn pointer after instruction | |
* ------ increasing address -----> | |
- * | | r0 | r1 | ... | rx | | | |
+ * | | r0 | r1 | ... | rx | | | |
* PU = 01 B A | |
* PU = 11 B A | |
* PU = 00 A B | |
@@ -755,7 +755,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
int thumb2_32b = 0; | |
if (interrupts_enabled(regs)) | |
- local_irq_enable(); | |
+ hard_local_irq_enable(); | |
+ | |
+ if (__ipipe_report_trap(IPIPE_TRAP_ALIGNMENT,regs)) | |
+ return 0; | |
instrptr = instruction_pointer(regs); | |
@@ -909,7 +912,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
task_pid_nr(current), instrptr, | |
isize << 1, | |
isize == 2 ? tinstr : instr, | |
- addr, fsr); | |
+ addr, fsr); | |
if (ai_usermode & UM_FIXUP) | |
goto fixup; | |
@@ -936,7 +939,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
* entry-common.S) and disable the alignment trap only if | |
* there is no work pending for this thread. | |
*/ | |
- raw_local_irq_disable(); | |
+ hard_local_irq_disable(); | |
if (!(current_thread_info()->flags & _TIF_WORK_MASK)) | |
set_cr(cr_no_alignment); | |
} | |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c | |
index c465fac..a1b38ee 100644 | |
--- a/arch/arm/mm/cache-l2x0.c | |
+++ b/arch/arm/mm/cache-l2x0.c | |
@@ -22,17 +22,26 @@ | |
#include <linux/io.h> | |
#include <linux/of.h> | |
#include <linux/of_address.h> | |
+#include <linux/ipipe.h> | |
#include <asm/cacheflush.h> | |
#include <asm/hardware/cache-l2x0.h> | |
#include "cache-aurora-l2.h" | |
+#ifndef CONFIG_IPIPE | |
+#define SPINLOCK_SECTION_LEN 4096UL | |
+#else /* CONFIG_IPIPE */ | |
+#define SPINLOCK_SECTION_LEN 512UL | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
#define CACHE_LINE_SIZE 32 | |
static void __iomem *l2x0_base; | |
-static DEFINE_RAW_SPINLOCK(l2x0_lock); | |
+static IPIPE_DEFINE_RAW_SPINLOCK(l2x0_lock); | |
static u32 l2x0_way_mask; /* Bitmask of active ways */ | |
static u32 l2x0_size; | |
+static u32 l2x0_ways; | |
+static u32 l2x0_lines; | |
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; | |
/* Aurora don't have the cache ID register available, so we have to | |
@@ -146,6 +155,7 @@ static void __l2x0_flush_all(void) | |
debug_writel(0x00); | |
} | |
+#ifndef CONFIG_IPIPE | |
static void l2x0_flush_all(void) | |
{ | |
unsigned long flags; | |
@@ -155,6 +165,40 @@ static void l2x0_flush_all(void) | |
__l2x0_flush_all(); | |
raw_spin_unlock_irqrestore(&l2x0_lock, flags); | |
} | |
+#else | |
+static void l2x0_clean_inv_line_idx(unsigned line, unsigned way) | |
+{ | |
+ void __iomem *base = l2x0_base; | |
+ | |
+ writel_relaxed((way << 28) | (line << 5), | |
+ base + L2X0_CLEAN_INV_LINE_IDX); | |
+ cache_wait(base + L2X0_CLEAN_INV_LINE_IDX, 1); | |
+} | |
+ | |
+static void l2x0_flush_way(unsigned way, unsigned len, unsigned lines) | |
+{ | |
+ unsigned long flags; | |
+ unsigned line, i; | |
+ | |
+ for (line = 0; line < lines; line += len ) { | |
+ raw_spin_lock_irqsave(&l2x0_lock, flags); | |
+ debug_writel(0x03); | |
+ for (i = 0; i < len && line + i < lines; i++) | |
+ l2x0_clean_inv_line_idx(line + i, way); | |
+ cache_sync(); | |
+ debug_writel(0x00); | |
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags); | |
+ } | |
+} | |
+ | |
+static void l2x0_flush_all(void) | |
+{ | |
+ unsigned way, len = SPINLOCK_SECTION_LEN / CACHE_LINE_SIZE; | |
+ | |
+ for (way = 0; way < l2x0_ways; way++) | |
+ l2x0_flush_way(way, len, l2x0_lines); | |
+} | |
+#endif | |
static void l2x0_clean_all(void) | |
{ | |
@@ -202,9 +246,12 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |
l2x0_flush_line(end); | |
debug_writel(0x00); | |
} | |
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags); | |
+ raw_spin_lock_irqsave(&l2x0_lock, flags); | |
while (start < end) { | |
- unsigned long blk_end = start + min(end - start, 4096UL); | |
+ unsigned long blk_end = | |
+ start + min(end - start, SPINLOCK_SECTION_LEN); | |
while (start < blk_end) { | |
l2x0_inv_line(start); | |
@@ -234,7 +281,8 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |
raw_spin_lock_irqsave(&l2x0_lock, flags); | |
start &= ~(CACHE_LINE_SIZE - 1); | |
while (start < end) { | |
- unsigned long blk_end = start + min(end - start, 4096UL); | |
+ unsigned long blk_end = | |
+ start + min(end - start, SPINLOCK_SECTION_LEN); | |
while (start < blk_end) { | |
l2x0_clean_line(start); | |
@@ -264,7 +312,8 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |
raw_spin_lock_irqsave(&l2x0_lock, flags); | |
start &= ~(CACHE_LINE_SIZE - 1); | |
while (start < end) { | |
- unsigned long blk_end = start + min(end - start, 4096UL); | |
+ unsigned long blk_end = | |
+ start + min(end - start, SPINLOCK_SECTION_LEN); | |
debug_writel(0x03); | |
while (start < blk_end) { | |
@@ -374,6 +423,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |
} | |
l2x0_way_mask = (1 << ways) - 1; | |
+ l2x0_ways = ways; | |
/* | |
* L2 cache Size = Way size * Number of ways | |
@@ -382,6 +432,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |
way_size = 1 << (way_size + way_size_shift); | |
l2x0_size = ways * way_size * SZ_1K; | |
+ l2x0_lines = way_size * SZ_1K / CACHE_LINE_SIZE; | |
/* | |
* Check if l2x0 controller is already enabled. | |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c | |
index eeab06e..1c9a7e0 100644 | |
--- a/arch/arm/mm/context.c | |
+++ b/arch/arm/mm/context.c | |
@@ -41,7 +41,7 @@ | |
#define ASID_FIRST_VERSION (1ULL << ASID_BITS) | |
#define NUM_USER_ASIDS ASID_FIRST_VERSION | |
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |
+static IPIPE_DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | |
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | |
@@ -219,15 +219,18 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |
return asid; | |
} | |
-void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |
+int check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk, bool root_p) | |
{ | |
unsigned long flags; | |
- unsigned int cpu = smp_processor_id(); | |
+ unsigned int cpu = ipipe_processor_id(); | |
u64 asid; | |
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) | |
__check_vmalloc_seq(mm); | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ flags = hard_local_irq_save(); | |
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
/* | |
* Required during context switch to avoid speculative page table | |
* walking with the wrong TTBR. | |
@@ -239,7 +242,11 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid)) | |
goto switch_mm_fastpath; | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ raw_spin_lock(&cpu_asid_lock); | |
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
raw_spin_lock_irqsave(&cpu_asid_lock, flags); | |
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
/* Check that our ASID belongs to the current generation. */ | |
asid = atomic64_read(&mm->context.id); | |
if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { | |
@@ -255,8 +262,17 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |
atomic64_set(&per_cpu(active_asids, cpu), asid); | |
cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ raw_spin_unlock(&cpu_asid_lock); | |
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | |
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
switch_mm_fastpath: | |
- cpu_switch_mm(mm->pgd, mm); | |
+ cpu_switch_mm(mm->pgd, mm, 1); | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ hard_local_irq_restore(flags); | |
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
+ | |
+ return 0; | |
} | |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c | |
index 1267e64..4ba1bc2 100644 | |
--- a/arch/arm/mm/copypage-v4mc.c | |
+++ b/arch/arm/mm/copypage-v4mc.c | |
@@ -40,7 +40,7 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); | |
* instruction. If your processor does not supply this, you have to write your | |
* own copy_user_highpage that does the right thing. | |
*/ | |
-static void __naked | |
+static void notrace __naked | |
mc_copy_user_page(void *from, void *to) | |
{ | |
asm volatile( | |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c | |
index 0fb8502..afd7970 100644 | |
--- a/arch/arm/mm/copypage-xscale.c | |
+++ b/arch/arm/mm/copypage-xscale.c | |
@@ -36,7 +36,7 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); | |
* Dcache aliasing issue. The writes will be forwarded to the write buffer, | |
* and merged as appropriate. | |
*/ | |
-static void __naked | |
+static void notrace __naked | |
mc_copy_user_page(void *from, void *to) | |
{ | |
/* | |
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c | |
index 2a5907b..36edc36 100644 | |
--- a/arch/arm/mm/fault-armv.c | |
+++ b/arch/arm/mm/fault-armv.c | |
@@ -28,6 +28,30 @@ | |
static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; | |
#if __LINUX_ARM_ARCH__ < 6 | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+static void fcse_set_pte_shared(struct vm_area_struct *vma, | |
+ unsigned long address, pte_t *ptep) | |
+{ | |
+ pte_t entry; | |
+ | |
+ if (!(vma->vm_flags & VM_MAYSHARE) || address >= TASK_SIZE) | |
+ return; | |
+ | |
+ entry = *ptep; | |
+ if ((pte_val(entry) | |
+ & (L_PTE_PRESENT | PTE_CACHEABLE | L_PTE_RDONLY | L_PTE_DIRTY | L_PTE_SHARED)) | |
+ == (L_PTE_PRESENT | PTE_CACHEABLE | L_PTE_DIRTY)) { | |
+ pte_val(entry) |= L_PTE_SHARED; | |
+ /* Bypass set_pte_at here, we are not changing | |
+ hardware bits, flush is not needed */ | |
+ ++vma->vm_mm->context.fcse.shared_dirty_pages; | |
+ *ptep = entry; | |
+ } | |
+} | |
+#else /* !CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+#define fcse_set_pte_shared(vma, addr, ptep) do { } while (0) | |
+#endif /* !CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+ | |
/* | |
* We take the easy way out of this problem - we make the | |
* PTE uncacheable. However, we leave the write buffer on. | |
@@ -65,6 +89,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | |
return ret; | |
} | |
+#ifndef CONFIG_ARM_FCSE_GUARANTEED | |
#if USE_SPLIT_PTLOCKS | |
/* | |
* If we are using split PTE locks, then we need to take the page | |
@@ -127,11 +152,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | |
return ret; | |
} | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
static void | |
make_coherent(struct address_space *mapping, struct vm_area_struct *vma, | |
unsigned long addr, pte_t *ptep, unsigned long pfn) | |
{ | |
+#ifndef CONFIG_ARM_FCSE_GUARANTEED | |
struct mm_struct *mm = vma->vm_mm; | |
struct vm_area_struct *mpnt; | |
unsigned long offset; | |
@@ -162,6 +189,12 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, | |
flush_dcache_mmap_unlock(mapping); | |
if (aliases) | |
do_adjust_pte(vma, addr, pfn, ptep); | |
+ else | |
+ fcse_set_pte_shared(vma, addr, ptep); | |
+#else /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ if (vma->vm_flags & VM_MAYSHARE) | |
+ do_adjust_pte(vma, addr, pfn, ptep); | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
} | |
/* | |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c | |
index 5dbf13f..d3259cb 100644 | |
--- a/arch/arm/mm/fault.c | |
+++ b/arch/arm/mm/fault.c | |
@@ -25,6 +25,7 @@ | |
#include <asm/system_misc.h> | |
#include <asm/system_info.h> | |
#include <asm/tlbflush.h> | |
+#include <asm/fcse.h> | |
#include "fault.h" | |
@@ -63,7 +64,12 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |
if (!mm) | |
mm = &init_mm; | |
- printk(KERN_ALERT "pgd = %p\n", mm->pgd); | |
+#ifdef CONFIG_ARM_FCSE | |
+ printk(KERN_ALERT "fcse pid: %ld, 0x%08lx, hw pid: 0x%08lx\n", | |
+ mm->context.fcse.pid >> FCSE_PID_SHIFT, | |
+ mm->context.fcse.pid, fcse_pid_get()); | |
+#endif /* CONFIG_ARM_FCSE */ | |
+ printk(KERN_ALERT "pgd = %p, hw pgd = %p\n", mm->pgd, cpu_get_pgd()); | |
pgd = pgd_offset(mm, addr); | |
printk(KERN_ALERT "[%08lx] *pgd=%08llx", | |
addr, (long long)pgd_val(*pgd)); | |
@@ -167,13 +173,15 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, | |
#ifdef CONFIG_DEBUG_USER | |
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || | |
((user_debug & UDBG_BUS) && (sig == SIGBUS))) { | |
- printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", | |
+ printk("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", | |
tsk->comm, sig, addr, fsr); | |
show_pte(tsk->mm, addr); | |
show_regs(regs); | |
} | |
#endif | |
+ fcse_notify_segv(tsk->mm, addr, regs); | |
+ | |
tsk->thread.address = addr; | |
tsk->thread.error_code = fsr; | |
tsk->thread.trap_no = 14; | |
@@ -268,6 +276,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
if (notify_page_fault(regs, fsr)) | |
return 0; | |
+ if (__ipipe_report_trap(IPIPE_TRAP_ACCESS,regs)) | |
+ return 0; | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ ipipe_stall_root(); | |
+ hard_local_irq_enable(); | |
+#endif | |
+ | |
tsk = current; | |
mm = tsk->mm; | |
@@ -428,6 +444,14 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |
if (addr < TASK_SIZE) | |
return do_page_fault(addr, fsr, regs); | |
+ if (__ipipe_report_trap(IPIPE_TRAP_ACCESS,regs)) | |
+ return 0; | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ ipipe_stall_root(); | |
+ hard_local_irq_enable(); | |
+#endif | |
+ | |
if (user_mode(regs)) | |
goto bad_area; | |
@@ -494,6 +518,15 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |
static int | |
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
{ | |
+ | |
+ if (__ipipe_report_trap(IPIPE_TRAP_SECTION,regs)) | |
+ return 0; | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ ipipe_stall_root(); | |
+ hard_local_irq_enable(); | |
+#endif | |
+ | |
do_bad_area(addr, fsr, regs); | |
return 0; | |
} | |
@@ -504,6 +537,9 @@ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
static int | |
do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
{ | |
+ if (__ipipe_report_trap(IPIPE_TRAP_DABT,regs)) | |
+ return 0; | |
+ | |
return 1; | |
} | |
@@ -543,9 +579,19 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
const struct fsr_info *inf = fsr_info + fsr_fs(fsr); | |
struct siginfo info; | |
+ addr = fcse_mva_to_va(addr); | |
+ | |
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) | |
return; | |
+ if (__ipipe_report_trap(IPIPE_TRAP_UNKNOWN,regs)) | |
+ return; | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ ipipe_stall_root(); | |
+ hard_local_irq_enable(); | |
+#endif | |
+ | |
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", | |
inf->name, fsr, addr); | |
@@ -554,6 +600,11 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
info.si_code = inf->code; | |
info.si_addr = (void __user *)addr; | |
arm_notify_die("", regs, &info, fsr, 0); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ hard_local_irq_disable(); | |
+ __ipipe_root_status &= ~IPIPE_STALL_FLAG; | |
+#endif | |
} | |
void __init | |
@@ -578,6 +629,11 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) | |
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) | |
return; | |
+#ifdef CONFIG_IPIPE | |
+ ipipe_stall_root(); | |
+ hard_local_irq_enable(); | |
+#endif | |
+ | |
printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", | |
inf->name, ifsr, addr); | |
@@ -586,6 +642,11 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) | |
info.si_code = inf->code; | |
info.si_addr = (void __user *)addr; | |
arm_notify_die("", regs, &info, ifsr, 0); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ hard_local_irq_disable(); | |
+ __ipipe_root_status &= ~IPIPE_STALL_FLAG; | |
+#endif | |
} | |
#ifndef CONFIG_ARM_LPAE | |
@@ -611,4 +672,51 @@ static int __init exceptions_init(void) | |
} | |
arch_initcall(exceptions_init); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+extern spinlock_t pgd_lock; | |
+extern struct page *pgd_list; | |
+ | |
+static void vmalloc_sync_one(pgd_t *pgd, unsigned long addr) | |
+{ | |
+ unsigned int index = pgd_index(addr); | |
+ pgd_t *pgd_k; | |
+ pud_t *pud, *pud_k; | |
+ pmd_t *pmd, *pmd_k; | |
+ | |
+ pgd += index; | |
+ pgd_k = init_mm.pgd + index; | |
+ | |
+ if (!pgd_present(*pgd)) | |
+ set_pgd(pgd, *pgd_k); | |
+ | |
+ pud = pud_offset(pgd, addr); | |
+ pud_k = pud_offset(pgd_k, addr); | |
+ | |
+ if (!pud_present(*pud)) | |
+ set_pud(pud, *pud_k); | |
+ | |
+ pmd = pmd_offset(pud, addr); | |
+ pmd_k = pmd_offset(pud_k, addr); | |
+ | |
+ copy_pmd(pmd, pmd_k); | |
+} | |
+ | |
+void __ipipe_pin_range_globally(unsigned long start, unsigned long end) | |
+{ | |
+ unsigned long next, addr = start; | |
+ | |
+ do { | |
+ unsigned long flags; | |
+ struct page *page; | |
+ | |
+ next = pgd_addr_end(addr, end); | |
+ spin_lock_irqsave(&pgd_lock, flags); | |
+ for (page = pgd_list; page; page = (struct page *)page->index) | |
+ vmalloc_sync_one(page_address(page), addr); | |
+ spin_unlock_irqrestore(&pgd_lock, flags); | |
+ | |
+ } while (addr = next, addr != end); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
#endif | |
diff --git a/arch/arm/mm/fcse.c b/arch/arm/mm/fcse.c | |
new file mode 100644 | |
index 0000000..a5e23dc | |
--- /dev/null | |
+++ b/arch/arm/mm/fcse.c | |
@@ -0,0 +1,460 @@ | |
+/* | |
+ * arch/arm/kernel/fcse.c | |
+ * | |
+ * Helper functions for using the ARM Fast Context Switch Extension with | |
+ * processors supporting it. | |
+ * | |
+ * Copyright (C) 2008 Richard Cochran | |
+ * Copyright (C) 2009-2011 Gilles Chanteperdrix <[email protected]> | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License version 2 as | |
+ * published by the Free Software Foundation. | |
+ */ | |
+ | |
+#include <linux/bitops.h> | |
+#include <linux/memory.h> | |
+#include <linux/spinlock.h> | |
+#include <linux/mm.h> | |
+#include <linux/kernel_stat.h> | |
+#include <linux/mman.h> | |
+#include <linux/dcache.h> | |
+#include <linux/fs.h> | |
+#include <linux/hardirq.h> | |
+#include <linux/export.h> | |
+ | |
+#include <asm/fcse.h> | |
+#include <asm/cacheflush.h> | |
+#include <asm/tlbflush.h> | |
+#include <asm/system_misc.h> /* For user_debug, UDBG_SEGV */ | |
+ | |
+#define PIDS_LONGS ((FCSE_NR_PIDS + BITS_PER_LONG - 1) / BITS_PER_LONG) | |
+ | |
+static IPIPE_DEFINE_RAW_SPINLOCK(fcse_lock); | |
+static unsigned long fcse_pids_bits[PIDS_LONGS]; | |
+unsigned long fcse_pids_cache_dirty[PIDS_LONGS]; | |
+EXPORT_SYMBOL(fcse_pids_cache_dirty); | |
+ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+static unsigned random_pid; | |
+struct mm_struct *fcse_large_process; | |
+struct fcse_user fcse_pids_user[FCSE_NR_PIDS]; | |
+static struct mm_struct *fcse_cur_mm = &init_mm; | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+ | |
+static inline void fcse_pid_reference_inner(unsigned fcse_pid) | |
+{ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ if (++fcse_pids_user[fcse_pid].count == 1) | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+ __set_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_bits); | |
+} | |
+ | |
+static inline void fcse_pid_dereference(struct mm_struct *mm) | |
+{ | |
+ unsigned fcse_pid = mm->context.fcse.pid >> FCSE_PID_SHIFT; | |
+ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ if (--fcse_pids_user[fcse_pid].count == 0) | |
+ __clear_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_bits); | |
+ | |
+ /* | |
+ * The following means we suppose that by the time this | |
+ * function is called, this mm is out of cache: | |
+ * - when the caller is destroy_context, exit_mmap is called | |
+ * by mmput before, which flushes the cache; | |
+ * - when the caller is fcse_relocate_mm_to_pid from | |
+ * fcse_switch_mm_inner, we only relocate when the mm is out | |
+ * of cache; | |
+ * - when the caller is fcse_relocate_mm_to_pid from | |
+ * fcse_relocate_mm_to_null_pid, we flush the cache in this | |
+ * function. | |
+ */ | |
+ if (fcse_pids_user[fcse_pid].mm == mm) { | |
+ fcse_pids_user[fcse_pid].mm = NULL; | |
+ __clear_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty); | |
+ } | |
+ if (fcse_large_process == mm) | |
+ fcse_large_process = NULL; | |
+#else /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+ __clear_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_bits); | |
+ __clear_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty); | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+} | |
+ | |
+static inline long find_free_pid(unsigned long bits[]) | |
+{ | |
+ return FCSE_PID_MAX - find_first_zero_bit(bits, FCSE_NR_PIDS); | |
+} | |
+ | |
+void fcse_pid_free(struct mm_struct *mm) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ raw_spin_lock_irqsave(&fcse_lock, flags); | |
+ fcse_pid_dereference(mm); | |
+ raw_spin_unlock_irqrestore(&fcse_lock, flags); | |
+} | |
+ | |
+int fcse_pid_alloc(struct mm_struct *mm) | |
+{ | |
+ unsigned long flags; | |
+ unsigned fcse_pid; | |
+ | |
+ raw_spin_lock_irqsave(&fcse_lock, flags); | |
+ fcse_pid = find_free_pid(fcse_pids_bits); | |
+ if (fcse_pid == -1) { | |
+ /* Allocate zero pid last, since zero pid is also used by | |
+ processes with address space larger than 32MB in | |
+ best-effort mode. */ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ if(++random_pid == FCSE_NR_PIDS) { | |
+ if (fcse_large_process) { | |
+ random_pid = | |
+ fcse_large_process->context.fcse.highest_pid + 1; | |
+ if (random_pid == FCSE_NR_PIDS) | |
+ random_pid = 0; | |
+ } else | |
+ random_pid = 0; | |
+ } | |
+ fcse_pid = random_pid; | |
+#else /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ raw_spin_unlock_irqrestore(&fcse_lock, flags); | |
+#ifdef CONFIG_ARM_FCSE_MESSAGES | |
+ printk(KERN_WARNING "FCSE: %s[%d] would exceed the %lu processes limit.\n", | |
+ current->comm, current->pid, FCSE_NR_PIDS); | |
+#endif /* CONFIG_ARM_FCSE_MESSAGES */ | |
+ return -EAGAIN; | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ } | |
+ fcse_pid_reference_inner(fcse_pid); | |
+ raw_spin_unlock_irqrestore(&fcse_lock, flags); | |
+ | |
+ return fcse_pid; | |
+} | |
+ | |
+static inline void fcse_clear_dirty_all(void) | |
+{ | |
+ switch(ARRAY_SIZE(fcse_pids_cache_dirty)) { | |
+ case 3: | |
+ fcse_pids_cache_dirty[2] = 0UL; | |
+ case 2: | |
+ fcse_pids_cache_dirty[1] = 0UL; | |
+ case 1: | |
+ fcse_pids_cache_dirty[0] = 0UL; | |
+ } | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ fcse_large_process = NULL; | |
+#endif | |
+} | |
+ | |
+unsigned fcse_flush_all_start(void) | |
+{ | |
+ if (!cache_is_vivt()) | |
+ return 0; | |
+ | |
+#ifndef CONFIG_ARM_FCSE_PREEMPT_FLUSH | |
+ preempt_disable(); | |
+#endif /* CONFIG_ARM_FCSE_PREEMPT_FLUSH */ | |
+ | |
+#if defined(CONFIG_IPIPE) | |
+ clear_ti_thread_flag(current_thread_info(), TIF_SWITCHED); | |
+#elif defined(CONFIG_ARM_FCSE_PREEMPT_FLUSH) | |
+ return nr_context_switches(); | |
+#endif /* CONFIG_ARM_FCSE_PREEMPT_FLUSH */ | |
+ | |
+ return 0; | |
+} | |
+ | |
+noinline void | |
+fcse_flush_all_done(unsigned seq, unsigned dirty) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ if (!cache_is_vivt()) | |
+ return; | |
+ | |
+ raw_spin_lock_irqsave(&fcse_lock, flags); | |
+#if defined(CONFIG_IPIPE) | |
+ if (!test_ti_thread_flag(current_thread_info(), TIF_SWITCHED)) | |
+#elif defined(CONFIG_ARM_FCSE_PREEMPT_FLUSH) | |
+ if (seq == nr_context_switches()) | |
+#endif /* CONFIG_ARM_FCSE_PREEMPT_FLUSH */ | |
+ fcse_clear_dirty_all(); | |
+ | |
+ if (dirty && current->mm != &init_mm && current->mm) { | |
+ unsigned fcse_pid = | |
+ current->mm->context.fcse.pid >> FCSE_PID_SHIFT; | |
+ __set_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty); | |
+ } | |
+ raw_spin_unlock_irqrestore(&fcse_lock, flags); | |
+#ifndef CONFIG_ARM_FCSE_PREEMPT_FLUSH | |
+ preempt_enable(); | |
+#endif /* CONFIG_ARM_FCSE_PREEMPT_FLUSH */ | |
+} | |
+ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+/* Called with preemption disabled, mm->mmap_sem being held for writing. */ | |
+static noinline int fcse_relocate_mm_to_pid(struct mm_struct *mm, int fcse_pid) | |
+{ | |
+ const unsigned len = pgd_index(FCSE_TASK_SIZE) * sizeof(pgd_t); | |
+ unsigned long flags; | |
+ pgd_t *from, *to; | |
+ | |
+ raw_spin_lock_irqsave(&fcse_lock, flags); | |
+ fcse_pid_dereference(mm); | |
+ fcse_pid_reference_inner(fcse_pid); | |
+ fcse_pids_user[fcse_pid].mm = mm; | |
+ __set_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty); | |
+ if (mm->context.fcse.large) | |
+ fcse_large_process = mm; | |
+ raw_spin_unlock_irqrestore(&fcse_lock, flags); | |
+ | |
+ from = pgd_offset(mm, 0); | |
+ mm->context.fcse.pid = fcse_pid << FCSE_PID_SHIFT; | |
+ to = pgd_offset(mm, 0); | |
+ | |
+ memcpy(to, from, len); | |
+ memset(from, '\0', len); | |
+ barrier(); | |
+ clean_dcache_area(from, len); | |
+ clean_dcache_area(to, len); | |
+ | |
+ return fcse_pid; | |
+} | |
+ | |
+static int fcse_flush_needed_p(struct mm_struct *next) | |
+{ | |
+ unsigned fcse_pid = next->context.fcse.pid >> FCSE_PID_SHIFT; | |
+ unsigned flush_needed = 0; | |
+ | |
+ if (next == &init_mm) | |
+ goto check_cur; | |
+ | |
+ if (fcse_pids_user[fcse_pid].mm != next) | |
+ if (fcse_pids_user[fcse_pid].mm) | |
+ flush_needed = test_bit(FCSE_PID_MAX - fcse_pid, | |
+ fcse_pids_cache_dirty); | |
+ | |
+ if (flush_needed == 0 | |
+ && fcse_large_process | |
+ && fcse_large_process != next | |
+ && fcse_pid <= fcse_large_process->context.fcse.highest_pid) | |
+ flush_needed = 1; | |
+ | |
+ check_cur: | |
+ if (flush_needed == 0 && fcse_cur_mm->context.fcse.shared_dirty_pages) | |
+ flush_needed = 1; | |
+ | |
+ return flush_needed; | |
+} | |
+ | |
+int fcse_switch_mm_start_inner(struct mm_struct *next) | |
+{ | |
+ unsigned flush_needed; | |
+ unsigned long flags; | |
+ | |
+ raw_spin_lock_irqsave(&fcse_lock, flags); | |
+ flush_needed = fcse_flush_needed_p(next); | |
+ raw_spin_unlock_irqrestore(&fcse_lock, flags); | |
+ | |
+ return flush_needed; | |
+} | |
+EXPORT_SYMBOL_GPL(fcse_switch_mm_start_inner); | |
+ | |
+void fcse_switch_mm_end_inner(struct mm_struct *next) | |
+{ | |
+ unsigned fcse_pid = next->context.fcse.pid >> FCSE_PID_SHIFT; | |
+ unsigned long flags; | |
+ | |
+ raw_spin_lock_irqsave(&fcse_lock, flags); | |
+ if (fcse_flush_needed_p(next)) | |
+ fcse_clear_dirty_all(); | |
+ | |
+ fcse_pid_set(fcse_pid << FCSE_PID_SHIFT); | |
+ if (next != &init_mm) { | |
+ __set_bit(FCSE_PID_MAX - fcse_pid, fcse_pids_cache_dirty); | |
+ if (next->context.fcse.large) | |
+ fcse_large_process = next; | |
+ if (fcse_pids_user[fcse_pid].mm != next) | |
+ fcse_pids_user[fcse_pid].mm = next; | |
+ } | |
+ fcse_cur_mm = next; | |
+ raw_spin_unlock_irqrestore(&fcse_lock, flags); | |
+} | |
+EXPORT_SYMBOL_GPL(fcse_switch_mm_end_inner); | |
+ | |
+void fcse_pid_reference(unsigned fcse_pid) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ raw_spin_lock_irqsave(&fcse_lock, flags); | |
+ fcse_pid_reference_inner(fcse_pid); | |
+ raw_spin_unlock_irqrestore(&fcse_lock, flags); | |
+} | |
+ | |
+/* Called with mm->mmap_sem write-locked. */ | |
+static noinline void fcse_relocate_mm_to_null_pid(struct mm_struct *mm) | |
+{ | |
+ if (!cache_is_vivt()) | |
+ return; | |
+ | |
+ preempt_disable(); | |
+ while (fcse_mm_in_cache(mm)) { | |
+ unsigned seq; | |
+ | |
+ preempt_enable(); | |
+ | |
+ seq = fcse_flush_all_start(); | |
+ flush_cache_all(); | |
+ | |
+ preempt_disable(); | |
+ fcse_flush_all_done(seq, 0); | |
+ } | |
+ | |
+ fcse_relocate_mm_to_pid(mm, 0); | |
+ barrier(); | |
+ flush_tlb_mm(mm); | |
+ fcse_pid_set(0); | |
+ | |
+ preempt_enable(); | |
+} | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+ | |
+unsigned long | |
+fcse_check_mmap_inner(struct mm_struct *mm, | |
+ struct vm_unmapped_area_info *info, | |
+ unsigned long addr, unsigned long flags) | |
+{ | |
+ if (flags & MAP_FIXED) | |
+ goto skip_retry; | |
+ | |
+ /* Try again the mmap, allowing addresses above 32 MB */ | |
+ info->flags = 0; | |
+ info->low_limit = PAGE_ALIGN(mm->start_stack); | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ info->high_limit = TASK_SIZE; | |
+#else /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ info->high_limit = FCSE_TASK_SIZE; | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ addr = vm_unmapped_area(info); | |
+ | |
+ if ((addr & ~PAGE_MASK) == 0 && addr + info->length <= FCSE_TASK_SIZE) | |
+ return addr; | |
+ | |
+ /* Could not find an address */ | |
+ skip_retry: | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ /* Could not find an address */ | |
+ if (addr & ~PAGE_MASK) | |
+ return addr; | |
+ | |
+ /* It is not the first time this process gets addresses above 32MB */ | |
+ if (mm->context.fcse.large) | |
+ return addr; | |
+ | |
+#ifdef CONFIG_ARM_FCSE_MESSAGES | |
+ printk(KERN_INFO "FCSE: process %u(%s) VM exceeds 32MB.\n", | |
+ current->pid, current->comm); | |
+#endif /* CONFIG_ARM_FCSE_MESSAGES */ | |
+ mm->context.fcse.large = 1; | |
+ if (mm->context.fcse.pid) | |
+ fcse_relocate_mm_to_null_pid(mm); | |
+ | |
+ return addr; | |
+ | |
+#else /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ /* Address above 32MB, no 32MB processes in guaranteed mode. */ | |
+#ifdef CONFIG_ARM_FCSE_MESSAGES | |
+ if ((flags & MAP_BRK) == 0) | |
+ printk(KERN_WARNING | |
+ "FCSE: process %u(%s) VM would exceed the 32MB limit.\n", | |
+ current->pid, current->comm); | |
+#endif /* CONFIG_ARM_FCSE_MESSAGES */ | |
+ return -ENOMEM; | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+} | |
+ | |
+#ifdef CONFIG_ARM_FCSE_MESSAGES | |
+#define addr_in_vma(vma, addr) \ | |
+ ({ \ | |
+ struct vm_area_struct *_vma = (vma); \ | |
+ ((unsigned long)((addr) - _vma->vm_start) \ | |
+ < (unsigned long)((_vma->vm_end - _vma->vm_start))); \ | |
+ }) | |
+ | |
+#ifdef CONFIG_DEBUG_USER | |
+static noinline void | |
+dump_vmas(struct mm_struct *mm, unsigned long addr, struct pt_regs *regs) | |
+{ | |
+ struct vm_area_struct *vma; | |
+ char path[128]; | |
+ int locked = 0; | |
+ | |
+ printk("mappings:\n"); | |
+ if (!in_atomic()) | |
+ locked = down_read_trylock(&mm->mmap_sem); | |
+ for(vma = mm->mmap; vma; vma = vma->vm_next) { | |
+ struct file *file = vma->vm_file; | |
+ int flags = vma->vm_flags; | |
+ const char *name; | |
+ | |
+ printk("0x%08lx-0x%08lx %c%c%c%c 0x%08llx ", | |
+ vma->vm_start, | |
+ vma->vm_end, | |
+ flags & VM_READ ? 'r' : '-', | |
+ flags & VM_WRITE ? 'w' : '-', | |
+ flags & VM_EXEC ? 'x' : '-', | |
+ flags & VM_MAYSHARE ? 's' : 'p', | |
+ ((loff_t)vma->vm_pgoff) << PAGE_SHIFT); | |
+ | |
+ if (file) | |
+ name = d_path(&file->f_path, path, sizeof(path)); | |
+ else if ((name = arch_vma_name(vma))) | |
+ ; | |
+ else if (!vma->vm_mm) | |
+ name = "[vdso]"; | |
+ else if (vma->vm_start <= mm->start_brk | |
+ && vma->vm_end >= mm->brk) | |
+ name = "[heap]"; | |
+ else if (vma->vm_start <= mm->start_stack && | |
+ vma->vm_end >= mm->start_stack) | |
+ name = "[stack]"; | |
+ else | |
+ name = ""; | |
+ printk("%s", name); | |
+ if (addr_in_vma(vma, regs->ARM_pc)) | |
+ printk(" <- PC"); | |
+ if (addr_in_vma(vma, regs->ARM_sp)) | |
+ printk(" <- SP"); | |
+ if (addr_in_vma(vma, addr)) | |
+ printk("%s fault", | |
+ (addr_in_vma(vma, regs->ARM_pc) | |
+ || addr_in_vma(vma, regs->ARM_sp) | |
+ ? "," : " <-")); | |
+ printk("\n"); | |
+ } | |
+ if (locked) | |
+ up_read(&mm->mmap_sem); | |
+} | |
+#endif /* CONFIG_DEBUG_USER */ | |
+ | |
+void fcse_notify_segv(struct mm_struct *mm, | |
+ unsigned long addr, struct pt_regs *regs) | |
+{ | |
+ int locked = 0; | |
+ | |
+#if defined(CONFIG_DEBUG_USER) | |
+ if (user_debug & UDBG_SEGV) | |
+ dump_vmas(mm, addr, regs); | |
+#endif /* CONFIG_DEBUG_USER */ | |
+ | |
+ if (!in_atomic()) | |
+ locked = down_read_trylock(&mm->mmap_sem); | |
+ if (find_vma(mm, addr) == find_vma(mm, regs->ARM_sp)) | |
+ printk(KERN_INFO "FCSE: process %u(%s) probably overflowed stack at 0x%08lx.\n", | |
+ current->pid, current->comm, regs->ARM_pc); | |
+ if (locked) | |
+ up_read(&mm->mmap_sem); | |
+} | |
+#endif /* CONFIG_ARM_FCSE_MESSAGES */ | |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c | |
index 32aa586..7f51c14 100644 | |
--- a/arch/arm/mm/flush.c | |
+++ b/arch/arm/mm/flush.c | |
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |
unsigned long uaddr, void *kaddr, unsigned long len) | |
{ | |
if (cache_is_vivt()) { | |
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | |
+ if (fcse_mm_in_cache(vma->vm_mm)) { | |
unsigned long addr = (unsigned long)kaddr; | |
__cpuc_coherent_kern_range(addr, addr + len); | |
} | |
@@ -153,6 +153,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |
#ifdef CONFIG_SMP | |
preempt_disable(); | |
#endif | |
+ fcse_flush_cache_user_range(vma, uaddr, uaddr + len); | |
memcpy(dst, src, len); | |
flush_ptrace_access(vma, page, uaddr, dst, len); | |
#ifdef CONFIG_SMP | |
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c | |
index 83cb3ac..d3039f4 100644 | |
--- a/arch/arm/mm/idmap.c | |
+++ b/arch/arm/mm/idmap.c | |
@@ -110,7 +110,7 @@ early_initcall(init_static_idmap); | |
void setup_mm_for_reboot(void) | |
{ | |
/* Switch to the identity mapping. */ | |
- cpu_switch_mm(idmap_pgd, &init_mm); | |
+ cpu_switch_mm(idmap_pgd, &init_mm, 1); | |
local_flush_bp_all(); | |
#ifdef CONFIG_CPU_HAS_ASID | |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c | |
index 04d9006..34200c2 100644 | |
--- a/arch/arm/mm/ioremap.c | |
+++ b/arch/arm/mm/ioremap.c | |
@@ -328,6 +328,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |
} | |
flush_cache_vmap(addr, addr + size); | |
+ __ipipe_pin_range_globally(addr, addr + size); | |
return (void __iomem *) (offset + addr); | |
} | |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c | |
index 10062ce..c206655 100644 | |
--- a/arch/arm/mm/mmap.c | |
+++ b/arch/arm/mm/mmap.c | |
@@ -32,6 +32,7 @@ static int mmap_is_legacy(void) | |
static unsigned long mmap_base(unsigned long rnd) | |
{ | |
+#ifndef CONFIG_ARM_FCSE | |
unsigned long gap = rlimit(RLIMIT_STACK); | |
if (gap < MIN_GAP) | |
@@ -40,6 +41,9 @@ static unsigned long mmap_base(unsigned long rnd) | |
gap = MAX_GAP; | |
return PAGE_ALIGN(TASK_SIZE - gap - rnd); | |
+#else /* CONFIG_ARM_FCSE */ | |
+ return PAGE_ALIGN(FCSE_TASK_SIZE - rlimit(RLIMIT_STACK) - rnd); | |
+#endif /* CONFIG_ARM_FCSE */ | |
} | |
/* | |
@@ -75,12 +79,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
if (aliasing && flags & MAP_SHARED && | |
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) | |
return -EINVAL; | |
- return addr; | |
+ goto found_addr; | |
} | |
if (len > TASK_SIZE) | |
return -ENOMEM; | |
+ info.length = len; | |
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | |
+ info.align_offset = pgoff << PAGE_SHIFT; | |
+ | |
if (addr) { | |
if (do_align) | |
addr = COLOUR_ALIGN(addr, pgoff); | |
@@ -90,16 +98,18 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
vma = find_vma(mm, addr); | |
if (TASK_SIZE - len >= addr && | |
(!vma || addr + len <= vma->vm_start)) | |
- return addr; | |
+ goto found_addr; | |
} | |
info.flags = 0; | |
- info.length = len; | |
info.low_limit = mm->mmap_base; | |
- info.high_limit = TASK_SIZE; | |
- info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | |
- info.align_offset = pgoff << PAGE_SHIFT; | |
- return vm_unmapped_area(&info); | |
+ info.high_limit = fcse() == 0 ? TASK_SIZE : | |
+ (PAGE_ALIGN(mm->start_stack) | |
+ - rlimit(RLIMIT_STACK) - PAGE_SIZE); | |
+ addr = vm_unmapped_area(&info); | |
+ | |
+ found_addr: | |
+ return fcse_check_mmap_addr(mm, addr, len, &info, flags); | |
} | |
unsigned long | |
@@ -129,9 +139,13 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |
if (aliasing && flags & MAP_SHARED && | |
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) | |
return -EINVAL; | |
- return addr; | |
+ goto found_addr; | |
} | |
+ info.length = len; | |
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | |
+ info.align_offset = pgoff << PAGE_SHIFT; | |
+ | |
/* requesting a specific address */ | |
if (addr) { | |
if (do_align) | |
@@ -139,18 +153,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |
else | |
addr = PAGE_ALIGN(addr); | |
vma = find_vma(mm, addr); | |
- if (TASK_SIZE - len >= addr && | |
+ if (TASK_SIZE - len > addr && | |
(!vma || addr + len <= vma->vm_start)) | |
- return addr; | |
+ goto found_addr; | |
} | |
info.flags = VM_UNMAPPED_AREA_TOPDOWN; | |
- info.length = len; | |
info.low_limit = PAGE_SIZE; | |
+ | |
+ if (fcse()) { | |
+ unsigned long top, bottom, shift; | |
+ | |
+ BUG_ON(mm->start_stack == 0); | |
+ top = PAGE_ALIGN(mm->start_stack); | |
+ bottom = top - rlimit(RLIMIT_STACK); | |
+ shift = FCSE_TASK_SIZE - (top - PAGE_SIZE); | |
+ if (mm->mmap_base > bottom) | |
+ mm->mmap_base -= shift; | |
+ } | |
info.high_limit = mm->mmap_base; | |
- info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | |
- info.align_offset = pgoff << PAGE_SHIFT; | |
+ | |
addr = vm_unmapped_area(&info); | |
+ found_addr: | |
+ addr = fcse_check_mmap_addr(mm, addr, len, &info, flags); | |
/* | |
* A failed mmap() very likely causes application failure, | |
@@ -158,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |
* can happen with large stack limits and large mmap() | |
* allocations. | |
*/ | |
- if (addr & ~PAGE_MASK) { | |
+ if (!fcse() && addr & ~PAGE_MASK) { | |
VM_BUG_ON(addr != -ENOMEM); | |
info.flags = 0; | |
info.low_limit = mm->mmap_base; | |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c | |
index 0acb089..60f5fdb 100644 | |
--- a/arch/arm/mm/pgd.c | |
+++ b/arch/arm/mm/pgd.c | |
@@ -27,6 +27,43 @@ | |
#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) | |
#endif | |
+#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | |
+ | |
+#ifdef CONFIG_IPIPE | |
+/* Copied from arch/i386/mm/pgdtable.c, maintains the list of pgds for the | |
+ implementation of ipipe_pin_range_globally in arch/arm/mm/fault.c. */ | |
+DEFINE_SPINLOCK(pgd_lock); | |
+struct page *pgd_list; | |
+ | |
+#define pgd_list_lock(flags) spin_lock_irqsave(&pgd_lock, flags) | |
+#define pgd_list_unlock(flags) spin_unlock_irqrestore(&pgd_lock, flags) | |
+ | |
+static inline void pgd_list_add(pgd_t *pgd) | |
+{ | |
+ struct page *page = virt_to_page(pgd); | |
+ page->index = (unsigned long)pgd_list; | |
+ if (pgd_list) | |
+ set_page_private(pgd_list, (unsigned long)&page->index); | |
+ pgd_list = page; | |
+ set_page_private(page, (unsigned long)&pgd_list); | |
+} | |
+ | |
+static inline void pgd_list_del(pgd_t *pgd) | |
+{ | |
+ struct page *next, **pprev, *page = virt_to_page(pgd); | |
+ next = (struct page *)page->index; | |
+ pprev = (struct page **)page_private(page); | |
+ *pprev = next; | |
+ if (next) | |
+ set_page_private(next, (unsigned long)pprev); | |
+} | |
+#else /* !CONFIG_IPIPE */ | |
+#define pgd_list_lock(flags) ((void) (flags)) | |
+#define pgd_list_unlock(flags) ((void) (flags)) | |
+#define pgd_list_add(pgd) do { } while (0) | |
+#define pgd_list_del(pgd) do { } while (0) | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
/* | |
* need to get a 16k page for level 1 | |
*/ | |
@@ -36,6 +73,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |
pud_t *new_pud, *init_pud; | |
pmd_t *new_pmd, *init_pmd; | |
pte_t *new_pte, *init_pte; | |
+ unsigned long flags; | |
new_pgd = __pgd_alloc(); | |
if (!new_pgd) | |
@@ -47,8 +85,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |
* Copy over the kernel and IO PGD entries | |
*/ | |
init_pgd = pgd_offset_k(0); | |
+ pgd_list_lock(flags); | |
memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, | |
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | |
+ pgd_list_add(new_pgd); | |
+ pgd_list_unlock(flags); | |
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | |
@@ -67,6 +108,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |
#endif | |
if (!vectors_high()) { | |
+#ifdef CONFIG_ARM_FCSE | |
+ /* FCSE does not work without high vectors. */ | |
+ BUG(); | |
+#endif /* CONFIG_ARM_FCSE */ | |
+ | |
/* | |
* On ARM, first page must always be allocated since it | |
* contains the machine vectors. The vectors are always high | |
@@ -106,6 +152,7 @@ no_pgd: | |
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) | |
{ | |
+ unsigned long flags; | |
pgd_t *pgd; | |
pud_t *pud; | |
pmd_t *pmd; | |
@@ -118,7 +165,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) | |
if (pgd_none_or_clear_bad(pgd)) | |
goto no_pgd; | |
- pud = pud_offset(pgd, 0); | |
+ pud = pud_offset(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); | |
if (pud_none_or_clear_bad(pud)) | |
goto no_pud; | |
@@ -136,6 +183,9 @@ no_pud: | |
pgd_clear(pgd); | |
pud_free(mm, pud); | |
no_pgd: | |
+ pgd_list_lock(flags); | |
+ pgd_list_del(pgd); | |
+ pgd_list_unlock(flags); | |
#ifdef CONFIG_ARM_LPAE | |
/* | |
* Free modules/pkmap or identity pmd tables. | |
@@ -153,6 +203,7 @@ no_pgd: | |
pmd_free(mm, pmd); | |
pgd_clear(pgd); | |
pud_free(mm, pud); | |
+ | |
} | |
#endif | |
__pgd_free(pgd_base); | |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S | |
index 2556cf1..b75fce4 100644 | |
--- a/arch/arm/mm/proc-arm920.S | |
+++ b/arch/arm/mm/proc-arm920.S | |
@@ -347,6 +347,11 @@ ENTRY(cpu_arm920_dcache_clean_area) | |
ENTRY(cpu_arm920_switch_mm) | |
#ifdef CONFIG_MMU | |
mov ip, #0 | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ cmp r2, #0 | |
+ beq 3f | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+#ifndef CONFIG_ARM_FCSE_GUARANTEED | |
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | |
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache | |
#else | |
@@ -364,6 +369,10 @@ ENTRY(cpu_arm920_switch_mm) | |
#endif | |
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
mcr p15, 0, ip, c7, c10, 4 @ drain WB | |
+#endif /* !CONFIG_ARM_FCSE_GUARANTEED */ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+3: | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer | |
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | |
#endif | |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S | |
index 344c8a5..315fc09 100644 | |
--- a/arch/arm/mm/proc-arm926.S | |
+++ b/arch/arm/mm/proc-arm926.S | |
@@ -368,6 +368,11 @@ ENTRY(cpu_arm926_dcache_clean_area) | |
ENTRY(cpu_arm926_switch_mm) | |
#ifdef CONFIG_MMU | |
mov ip, #0 | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ cmp r2, #0 | |
+ beq 2f | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+#ifndef CONFIG_ARM_FCSE_GUARANTEED | |
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | |
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache | |
#else | |
@@ -377,6 +382,10 @@ ENTRY(cpu_arm926_switch_mm) | |
#endif | |
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
mcr p15, 0, ip, c7, c10, 4 @ drain WB | |
+#endif /* !CONFIG_ARM_FCSE_GUARANTEED */ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+2: | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer | |
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | |
#endif | |
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S | |
index 4106b09..e798ceb 100644 | |
--- a/arch/arm/mm/proc-feroceon.S | |
+++ b/arch/arm/mm/proc-feroceon.S | |
@@ -475,6 +475,12 @@ ENTRY(cpu_feroceon_dcache_clean_area) | |
.align 5 | |
ENTRY(cpu_feroceon_switch_mm) | |
#ifdef CONFIG_MMU | |
+#ifndef CONFIG_ARM_FCSE_GUARANTEED | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ cmp r2, #0 | |
+ mov r2, lr | |
+ beq 2f | |
+#else /* !CONFIG_ARM_FCSE */ | |
/* | |
* Note: we wish to call __flush_whole_cache but we need to preserve | |
* lr to do so. The only way without touching main memory is to | |
@@ -482,12 +488,19 @@ ENTRY(cpu_feroceon_switch_mm) | |
* compensate locally for the skipped ops if it is not set. | |
*/ | |
mov r2, lr @ abuse r2 to preserve lr | |
+#endif /* !CONFIG_ARM_FCSE */ | |
bl __flush_whole_cache | |
@ if r2 contains the VM_EXEC bit then the next 2 ops are done already | |
tst r2, #VM_EXEC | |
mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
mcreq p15, 0, ip, c7, c10, 4 @ drain WB | |
+#ifdef CONFIG_ARM_FCSE | |
+2: | |
+#endif | |
+#else /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+ mov r2, lr | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer | |
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | |
mov pc, r2 | |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S | |
index e766f88..9551082 100644 | |
--- a/arch/arm/mm/proc-xscale.S | |
+++ b/arch/arm/mm/proc-xscale.S | |
@@ -471,9 +471,18 @@ ENTRY(cpu_xscale_dcache_clean_area) | |
*/ | |
.align 5 | |
ENTRY(cpu_xscale_switch_mm) | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+ cmp r2, #0 | |
+ beq 2f | |
+#endif /* CONFIG_ARM_FCSE_BEST_EFFORT */ | |
+#ifndef CONFIG_ARM_FCSE_GUARANTEED | |
clean_d_cache r1, r2 | |
mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB | |
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | |
+#endif /* CONFIG_ARM_FCSE_GUARANTEED */ | |
+#ifdef CONFIG_ARM_FCSE_BEST_EFFORT | |
+2: | |
+#endif /* !CONFIG_ARM_FCSE_GUARANTEED */ | |
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer | |
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | |
cpwait_ret lr, ip | |
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c | |
index 869254c..69438c1 100644 | |
--- a/arch/arm/plat-omap/dmtimer.c | |
+++ b/arch/arm/plat-omap/dmtimer.c | |
@@ -285,7 +285,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id) | |
return _omap_dm_timer_request(REQUEST_BY_ID, &id); | |
} | |
-EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific); | |
/** | |
* omap_dm_timer_request_by_cap - Request a timer by capability | |
@@ -365,6 +364,18 @@ int omap_dm_timer_get_irq(struct omap_dm_timer *timer) | |
} | |
EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq); | |
+#ifdef CONFIG_IPIPE | |
+unsigned long omap_dm_timer_get_phys_counter_addr(struct omap_dm_timer *timer) | |
+{ | |
+ return timer->phys_base + (OMAP_TIMER_COUNTER_REG & 0xff); | |
+} | |
+ | |
+unsigned long omap_dm_timer_get_virt_counter_addr(struct omap_dm_timer *timer) | |
+{ | |
+ return (unsigned long)timer->io_base + (OMAP_TIMER_COUNTER_REG & 0xff); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
#if defined(CONFIG_ARCH_OMAP1) | |
#include <mach/hardware.h> | |
/** | |
@@ -563,7 +574,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_set_load); | |
/* Optimized set_load which removes costly spin wait in timer_start */ | |
int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, | |
- unsigned int load) | |
+ unsigned int load) | |
{ | |
u32 l; | |
@@ -827,6 +838,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |
} | |
timer->fclk = ERR_PTR(-ENODEV); | |
+ timer->phys_base = mem->start; | |
timer->io_base = devm_ioremap_resource(dev, mem); | |
if (IS_ERR(timer->io_base)) | |
return PTR_ERR(timer->io_base); | |
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h | |
index fb92abb..e593b85 100644 | |
--- a/arch/arm/plat-omap/include/plat/dmtimer.h | |
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h | |
@@ -104,6 +104,7 @@ struct omap_dm_timer { | |
int irq; | |
struct clk *fclk; | |
+ unsigned long phys_base; | |
void __iomem *io_base; | |
void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */ | |
void __iomem *irq_ena; /* irq enable */ | |
@@ -412,4 +413,9 @@ static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, | |
__raw_writel(value, timer->irq_stat); | |
} | |
+static inline unsigned long __omap_dm_timer_read_status(struct omap_dm_timer *timer) | |
+{ | |
+ return __raw_readl(timer->irq_stat); | |
+} | |
+ | |
#endif /* __ASM_ARCH_DMTIMER_H */ | |
diff --git a/arch/arm/plat-samsung/include/plat/gpio-core.h b/arch/arm/plat-samsung/include/plat/gpio-core.h | |
index cf5aae5..160b5a0 100644 | |
--- a/arch/arm/plat-samsung/include/plat/gpio-core.h | |
+++ b/arch/arm/plat-samsung/include/plat/gpio-core.h | |
@@ -71,7 +71,7 @@ struct samsung_gpio_chip { | |
void __iomem *base; | |
int irq_base; | |
int group; | |
- spinlock_t lock; | |
+ ipipe_spinlock_t lock; | |
#ifdef CONFIG_PM | |
u32 pm_save[4]; | |
#endif | |
diff --git a/arch/arm/plat-samsung/samsung-time.c b/arch/arm/plat-samsung/samsung-time.c | |
index f899cbc..f1cd386 100644 | |
--- a/arch/arm/plat-samsung/samsung-time.c | |
+++ b/arch/arm/plat-samsung/samsung-time.c | |
@@ -15,6 +15,8 @@ | |
#include <linux/clk.h> | |
#include <linux/clockchips.h> | |
#include <linux/platform_device.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include <asm/smp_twd.h> | |
#include <asm/mach/time.h> | |
@@ -232,12 +234,20 @@ void __init samsung_set_timer_source(enum samsung_timer_mode event, | |
timer_source.source_id = source; | |
} | |
+#ifdef CONFIG_IPIPE | |
+static struct ipipe_timer itimer = { | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static struct clock_event_device time_event_device = { | |
.name = "samsung_event_timer", | |
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
.rating = 200, | |
.set_next_event = samsung_set_next_event, | |
.set_mode = samsung_set_mode, | |
+#ifdef CONFIG_IPIPE | |
+ .ipipe_timer = &itimer, | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) | |
@@ -246,6 +256,8 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) | |
evt->event_handler(evt); | |
+ __ipipe_tsc_update(); | |
+ | |
return IRQ_HANDLED; | |
} | |
@@ -275,13 +287,18 @@ static void __init samsung_clockevent_init(void) | |
clock_count_per_tick = clock_rate / HZ; | |
time_event_device.cpumask = cpumask_of(0); | |
+#ifdef CONFIG_IPIPE | |
+ time_event_device.irq = irq_number; | |
+#endif /* CONFIG_IPIPE */ | |
clockevents_config_and_register(&time_event_device, clock_rate, 1, -1); | |
irq_number = timer_source.event_id + IRQ_TIMER0; | |
setup_irq(irq_number, &samsung_clock_event_irq); | |
+ | |
+ | |
} | |
-static void __iomem *samsung_timer_reg(void) | |
+static unsigned long samsung_phys_timer_reg(void) | |
{ | |
unsigned long offset = 0; | |
@@ -302,7 +319,12 @@ static void __iomem *samsung_timer_reg(void) | |
return NULL; | |
} | |
- return S3C_TIMERREG(offset); | |
+ return 0x00300000 + offset; | |
+} | |
+ | |
+static void __iomem *samsung_timer_reg(void) | |
+{ | |
+ return S3C_ADDR(samsung_phys_timer_reg()); | |
} | |
/* | |
@@ -322,6 +344,12 @@ static u32 notrace samsung_read_sched_clock(void) | |
return ~__raw_readl(reg); | |
} | |
+#ifdef CONFIG_IPIPE | |
+static struct __ipipe_tscinfo tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static void __init samsung_clocksource_init(void) | |
{ | |
unsigned long pclk; | |
@@ -342,6 +370,14 @@ static void __init samsung_clocksource_init(void) | |
if (clocksource_mmio_init(samsung_timer_reg(), "samsung_clocksource_timer", | |
clock_rate, 250, TSIZE, clocksource_mmio_readl_down)) | |
panic("samsung_clocksource_timer: can't register clocksource\n"); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ tsc_info.u.mask = (1ULL << TSIZE) - 1; | |
+ tsc_info.freq = clock_rate; | |
+ tsc_info.counter_vaddr = (unsigned long)samsung_timer_reg(); | |
+ tsc_info.u.counter_paddr = samsung_phys_timer_reg(); | |
+ __ipipe_tsc_register(&tsc_info); | |
+#endif /* CONFIG_IPIPE */ | |
} | |
static void __init samsung_timer_resources(void) | |
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S | |
index 46e1749..6b5a37e 100644 | |
--- a/arch/arm/vfp/entry.S | |
+++ b/arch/arm/vfp/entry.S | |
@@ -27,7 +27,6 @@ ENTRY(do_vfp) | |
add r11, r4, #1 @ increment it | |
str r11, [r10, #TI_PREEMPT] | |
#endif | |
- enable_irq | |
ldr r4, .LCvfp | |
ldr r11, [r10, #TI_CPU] @ CPU number | |
add r10, r10, #TI_VFPSTATE @ r10 = workspace | |
@@ -35,6 +34,7 @@ ENTRY(do_vfp) | |
ENDPROC(do_vfp) | |
ENTRY(vfp_null_entry) | |
+ enable_irq | |
#ifdef CONFIG_PREEMPT_COUNT | |
get_thread_info r10 | |
ldr r4, [r10, #TI_PREEMPT] @ get preempt count | |
@@ -53,6 +53,7 @@ ENDPROC(vfp_null_entry) | |
__INIT | |
ENTRY(vfp_testing_entry) | |
+ enable_irq | |
#ifdef CONFIG_PREEMPT_COUNT | |
get_thread_info r10 | |
ldr r4, [r10, #TI_PREEMPT] @ get preempt count | |
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S | |
index 8d10dc8..4a84ec4 100644 | |
--- a/arch/arm/vfp/vfphw.S | |
+++ b/arch/arm/vfp/vfphw.S | |
@@ -20,7 +20,7 @@ | |
#include "../kernel/entry-header.S" | |
.macro DBGSTR, str | |
-#ifdef DEBUG | |
+#if defined(DEBUG) | |
stmfd sp!, {r0-r3, ip, lr} | |
ldr r0, =1f | |
bl printk | |
@@ -34,7 +34,7 @@ | |
.endm | |
.macro DBGSTR1, str, arg | |
-#ifdef DEBUG | |
+#if defined(DEBUG) | |
stmfd sp!, {r0-r3, ip, lr} | |
mov r1, \arg | |
ldr r0, =1f | |
@@ -49,7 +49,7 @@ | |
.endm | |
.macro DBGSTR3, str, arg1, arg2, arg3 | |
-#ifdef DEBUG | |
+#if defined(DEBUG) | |
stmfd sp!, {r0-r3, ip, lr} | |
mov r3, \arg3 | |
mov r2, \arg2 | |
@@ -98,6 +98,11 @@ ENTRY(vfp_support_entry) | |
@ thread wants ownership of the VFP hardware, save the old | |
@ state if there was a previous (valid) owner. | |
+ enable_irq | |
+#ifdef CONFIG_IPIPE | |
+ disable_irq | |
+ ldr r4, [r3, r11, lsl #2] @ reload vfp_current_hw_state pointer | |
+#endif | |
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending | |
@ exceptions, so we can get at the | |
@ rest of it | |
@@ -135,6 +140,15 @@ vfp_reload_hw: | |
beq vfp_hw_state_valid | |
vfp_reload_hw: | |
+ enable_irq | |
+#ifdef CONFIG_IPIPE | |
+ disable_irq | |
+ mrc p15, 0, ip, c0, c0, 5 @ reload current CPU number | |
+ and r11, ip, #255 | |
+ ldr r4, vfp_cpu_logical_map_address | |
+ ldr r11, [r4, r11, lsl #2] | |
+#endif | |
+ | |
@ We're loading this threads state into the VFP hardware. Update | |
@ the CPU number which contains the most up to date VFP context. | |
str r11, [r10, #VFP_CPU] | |
@@ -169,6 +183,7 @@ vfp_hw_state_valid: | |
@ out before setting an FPEXC that | |
@ stops us reading stuff | |
VFPFMXR FPEXC, r1 @ Restore FPEXC last | |
+ enable_irq_cond | |
sub r2, r2, #4 @ Retry current instruction - if Thumb | |
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, | |
@ else it's one 32-bit instruction, so | |
@@ -197,6 +212,7 @@ look_for_VFP_exceptions: | |
@ Fall into hand on to next handler - appropriate coproc instr | |
@ not recognised by VFP | |
+ enable_irq_cond | |
DBGSTR "not VFP" | |
#ifdef CONFIG_PREEMPT_COUNT | |
get_thread_info r10 | |
@@ -242,6 +258,10 @@ ENDPROC(vfp_save_state) | |
.align | |
vfp_current_hw_state_address: | |
.word vfp_current_hw_state | |
+#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP) | |
+vfp_cpu_logical_map_address: | |
+ .word __cpu_logical_map | |
+#endif | |
.macro tbl_branch, base, tmp, shift | |
#ifdef CONFIG_THUMB2_KERNEL | |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c | |
index 5dfbb0b..2f1562c 100644 | |
--- a/arch/arm/vfp/vfpmodule.c | |
+++ b/arch/arm/vfp/vfpmodule.c | |
@@ -92,6 +92,7 @@ static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) | |
static void vfp_thread_flush(struct thread_info *thread) | |
{ | |
union vfp_state *vfp = &thread->vfpstate; | |
+ unsigned long flags; | |
unsigned int cpu; | |
/* | |
@@ -102,11 +103,11 @@ static void vfp_thread_flush(struct thread_info *thread) | |
* Do this first to ensure that preemption won't overwrite our | |
* state saving should access to the VFP be enabled at this point. | |
*/ | |
- cpu = get_cpu(); | |
+ cpu = __ipipe_get_cpu(flags); | |
if (vfp_current_hw_state[cpu] == vfp) | |
vfp_current_hw_state[cpu] = NULL; | |
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | |
- put_cpu(); | |
+ __ipipe_put_cpu(flags); | |
memset(vfp, 0, sizeof(union vfp_state)); | |
@@ -121,11 +122,12 @@ static void vfp_thread_exit(struct thread_info *thread) | |
{ | |
/* release case: Per-thread VFP cleanup. */ | |
union vfp_state *vfp = &thread->vfpstate; | |
- unsigned int cpu = get_cpu(); | |
+ unsigned long flags; | |
+ unsigned int cpu = __ipipe_get_cpu(flags); | |
if (vfp_current_hw_state[cpu] == vfp) | |
vfp_current_hw_state[cpu] = NULL; | |
- put_cpu(); | |
+ __ipipe_put_cpu(flags); | |
} | |
static void vfp_thread_copy(struct thread_info *thread) | |
@@ -165,6 +167,7 @@ static void vfp_thread_copy(struct thread_info *thread) | |
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |
{ | |
struct thread_info *thread = v; | |
+ unsigned long flags; | |
u32 fpexc; | |
#ifdef CONFIG_SMP | |
unsigned int cpu; | |
@@ -172,8 +175,9 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |
switch (cmd) { | |
case THREAD_NOTIFY_SWITCH: | |
- fpexc = fmrx(FPEXC); | |
+ flags = hard_cond_local_irq_save(); | |
+ fpexc = fmrx(FPEXC); | |
#ifdef CONFIG_SMP | |
cpu = thread->cpu; | |
@@ -191,6 +195,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |
* old state. | |
*/ | |
fmxr(FPEXC, fpexc & ~FPEXC_EN); | |
+ hard_cond_local_irq_restore(flags); | |
break; | |
case THREAD_NOTIFY_FLUSH: | |
@@ -334,7 +339,7 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) | |
*/ | |
void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) | |
{ | |
- u32 fpscr, orig_fpscr, fpsid, exceptions; | |
+ u32 fpscr, orig_fpscr, fpsid, exceptions, next_trigger = 0; | |
pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); | |
@@ -364,6 +369,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) | |
/* | |
* Synchronous exception, emulate the trigger instruction | |
*/ | |
+ hard_cond_local_irq_enable(); | |
goto emulate; | |
} | |
@@ -376,7 +382,18 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) | |
trigger = fmrx(FPINST); | |
regs->ARM_pc -= 4; | |
#endif | |
- } else if (!(fpexc & FPEXC_DEX)) { | |
+ if (fpexc & FPEXC_FP2V) { | |
+ /* | |
+ * The barrier() here prevents fpinst2 being read | |
+ * before the condition above. | |
+ */ | |
+ barrier(); | |
+ next_trigger = fmrx(FPINST2); | |
+ } | |
+ } | |
+ hard_cond_local_irq_enable(); | |
+ | |
+ if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { | |
/* | |
* Illegal combination of bits. It can be caused by an | |
* unallocated VFP instruction but with FPSCR.IXE set and not | |
@@ -416,18 +433,14 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) | |
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) | |
goto exit; | |
- /* | |
- * The barrier() here prevents fpinst2 being read | |
- * before the condition above. | |
- */ | |
- barrier(); | |
- trigger = fmrx(FPINST2); | |
+ trigger = next_trigger; | |
emulate: | |
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); | |
if (exceptions) | |
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); | |
exit: | |
+ hard_cond_local_irq_enable(); | |
preempt_enable(); | |
} | |
@@ -514,7 +527,8 @@ static inline void vfp_pm_init(void) { } | |
*/ | |
void vfp_sync_hwstate(struct thread_info *thread) | |
{ | |
- unsigned int cpu = get_cpu(); | |
+ unsigned long flags; | |
+ unsigned int cpu = __ipipe_get_cpu(flags); | |
if (vfp_state_in_hw(cpu, thread)) { | |
u32 fpexc = fmrx(FPEXC); | |
@@ -527,17 +541,18 @@ void vfp_sync_hwstate(struct thread_info *thread) | |
fmxr(FPEXC, fpexc); | |
} | |
- put_cpu(); | |
+ __ipipe_put_cpu(flags); | |
} | |
/* Ensure that the thread reloads the hardware VFP state on the next use. */ | |
void vfp_flush_hwstate(struct thread_info *thread) | |
{ | |
- unsigned int cpu = get_cpu(); | |
+ unsigned long flags; | |
+ unsigned int cpu = __ipipe_get_cpu(flags); | |
vfp_force_reload(cpu, thread); | |
- put_cpu(); | |
+ __ipipe_put_cpu(flags); | |
} | |
/* | |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c | |
index a2b2541..c650d46 100644 | |
--- a/drivers/clocksource/arm_arch_timer.c | |
+++ b/drivers/clocksource/arm_arch_timer.c | |
@@ -17,6 +17,8 @@ | |
#include <linux/interrupt.h> | |
#include <linux/of_irq.h> | |
#include <linux/io.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include <asm/arch_timer.h> | |
#include <asm/virt.h> | |
@@ -43,14 +45,54 @@ static bool arch_timer_use_virtual = true; | |
* Architected system timer support. | |
*/ | |
-static inline irqreturn_t timer_handler(const int access, | |
- struct clock_event_device *evt) | |
+static int arch_timer_ack(const int access) | |
{ | |
unsigned long ctrl; | |
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); | |
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { | |
ctrl |= ARCH_TIMER_CTRL_IT_MASK; | |
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); | |
+ return 1; | |
+ } | |
+ return 0; | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE | |
+static DEFINE_PER_CPU(struct ipipe_timer, arch_itimer); | |
+static struct __ipipe_tscinfo tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING_ARCH, | |
+ .u = { | |
+ { | |
+ .mask = 0xffffffffffffffff, | |
+ }, | |
+ }, | |
+}; | |
+ | |
+static void arch_itimer_ack_phys(void) | |
+{ | |
+ arch_timer_ack(ARCH_TIMER_PHYS_ACCESS); | |
+} | |
+ | |
+static void arch_itimer_ack_virt(void) | |
+{ | |
+ arch_timer_ack(ARCH_TIMER_VIRT_ACCESS); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+static inline irqreturn_t timer_handler(int irq, const int access, | |
+ struct clock_event_device *evt) | |
+{ | |
+ if (clockevent_ipipe_stolen(evt)) | |
+ goto stolen; | |
+ | |
+ if (arch_timer_ack(access)) { | |
+#ifdef CONFIG_IPIPE | |
+ struct ipipe_timer *itimer = __this_cpu_ptr(&arch_itimer); | |
+ if (itimer->irq != irq) | |
+ itimer->irq = irq; | |
+#endif /* CONFIG_IPIPE */ | |
+ stolen: | |
+ __ipipe_tsc_update(); | |
evt->event_handler(evt); | |
return IRQ_HANDLED; | |
} | |
@@ -62,14 +104,14 @@ static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) | |
{ | |
struct clock_event_device *evt = dev_id; | |
- return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); | |
+ return timer_handler(irq, ARCH_TIMER_VIRT_ACCESS, evt); | |
} | |
static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) | |
{ | |
struct clock_event_device *evt = dev_id; | |
- return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); | |
+ return timer_handler(irq, ARCH_TIMER_PHYS_ACCESS, evt); | |
} | |
static inline void timer_set_mode(const int access, int mode) | |
@@ -142,6 +184,30 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) | |
clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); | |
+#ifdef CONFIG_IPIPE | |
+ clk->ipipe_timer = __this_cpu_ptr(&arch_itimer); | |
+ if (arch_timer_use_virtual) { | |
+ clk->ipipe_timer->irq = arch_timer_ppi[VIRT_PPI]; | |
+ clk->ipipe_timer->ack = arch_itimer_ack_virt; | |
+ } else { | |
+ clk->ipipe_timer->irq = arch_timer_ppi[PHYS_SECURE_PPI]; | |
+ clk->ipipe_timer->ack = arch_itimer_ack_phys; | |
+ } | |
+ clk->ipipe_timer->freq = arch_timer_rate; | |
+ | |
+ /* | |
+ * Change CNTKCTL to give access to the physical counter to | |
+ * user-space, this has to be done once for each core. | |
+ */ | |
+ { | |
+ unsigned ctl; | |
+ asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (ctl)); | |
+ ctl |= 1; /* PL0PCTEN */ | |
+ asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r"(ctl)); | |
+ isb(); | |
+ } | |
+#endif | |
+ | |
clockevents_config_and_register(clk, arch_timer_rate, | |
0xf, 0x7fffffff); | |
@@ -283,6 +349,11 @@ static int __init arch_timer_register(void) | |
goto out; | |
} | |
+#ifdef CONFIG_IPIPE | |
+ tsc_info.freq = arch_timer_rate; | |
+ __ipipe_tsc_register(&tsc_info); | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
clocksource_register_hz(&clocksource_counter, arch_timer_rate); | |
cyclecounter.mult = clocksource_counter.mult; | |
cyclecounter.shift = clocksource_counter.shift; | |
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c | |
index 02af420..9f42ddc 100644 | |
--- a/drivers/clocksource/mxs_timer.c | |
+++ b/drivers/clocksource/mxs_timer.c | |
@@ -22,6 +22,8 @@ | |
#include <linux/err.h> | |
#include <linux/interrupt.h> | |
+#include <linux/ipipe_tickdev.h> | |
+#include <linux/ipipe.h> | |
#include <linux/irq.h> | |
#include <linux/clockchips.h> | |
#include <linux/clk.h> | |
@@ -76,10 +78,17 @@ | |
#define BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL 0xb | |
#define BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS 0xf | |
+#define IPIPE_DIV_ORDER 0 /* APBX clock prescaler order */ | |
+#define IPIPE_DIV (1 << IPIPE_DIV_ORDER) | |
+#define BV_TIMROTv2_TIMCTRLn_PRESCALE (1 << 4) | |
+ | |
static struct clock_event_device mxs_clockevent_device; | |
static enum clock_event_mode mxs_clockevent_mode = CLOCK_EVT_MODE_UNUSED; | |
static void __iomem *mxs_timrot_base; | |
+#ifdef CONFIG_IPIPE | |
+static unsigned long mxs_timrot_paddr; | |
+#endif /* CONFIG_IPIPE */ | |
static u32 timrot_major_version; | |
static inline void timrot_irq_disable(void) | |
@@ -128,7 +137,11 @@ static irqreturn_t mxs_timer_interrupt(int irq, void *dev_id) | |
{ | |
struct clock_event_device *evt = dev_id; | |
- timrot_irq_acknowledge(); | |
+ if (!clockevent_ipipe_stolen(evt)) | |
+ timrot_irq_acknowledge(); | |
+ | |
+ __ipipe_tsc_update(); | |
+ | |
evt->event_handler(evt); | |
return IRQ_HANDLED; | |
@@ -193,21 +206,45 @@ static void mxs_set_mode(enum clock_event_mode mode, | |
} | |
} | |
+#ifdef CONFIG_IPIPE | |
+static struct ipipe_timer mxs_itimer = { | |
+ .ack = timrot_irq_acknowledge, | |
+}; | |
+ | |
+static struct __ipipe_tscinfo __maybe_unused tsc_info = { | |
+ .type = IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN, | |
+ .u = { | |
+ { | |
+ .mask = 0xffffffff, | |
+ }, | |
+ }, | |
+}; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static struct clock_event_device mxs_clockevent_device = { | |
.name = "mxs_timrot", | |
.features = CLOCK_EVT_FEAT_ONESHOT, | |
.set_mode = mxs_set_mode, | |
.set_next_event = timrotv2_set_next_event, | |
.rating = 200, | |
+#ifdef CONFIG_IPIPE | |
+ .ipipe_timer = &mxs_itimer, | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
static int __init mxs_clockevent_init(struct clk *timer_clk) | |
{ | |
+ unsigned int c = clk_get_rate(timer_clk); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ c /= IPIPE_DIV; | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
if (timrot_is_v1()) | |
mxs_clockevent_device.set_next_event = timrotv1_set_next_event; | |
mxs_clockevent_device.cpumask = cpumask_of(0); | |
clockevents_config_and_register(&mxs_clockevent_device, | |
- clk_get_rate(timer_clk), | |
+ c, | |
timrot_is_v1() ? 0xf : 0x2, | |
timrot_is_v1() ? 0xfffe : 0xfffffffe); | |
@@ -231,11 +268,19 @@ static int __init mxs_clocksource_init(struct clk *timer_clk) | |
{ | |
unsigned int c = clk_get_rate(timer_clk); | |
- if (timrot_is_v1()) | |
+ if (timrot_is_v1()) { | |
clocksource_register_hz(&clocksource_mxs, c); | |
- else { | |
+ } else { | |
+#ifndef CONFIG_IPIPE | |
clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1), | |
"mxs_timer", c, 200, 32, clocksource_mmio_readl_down); | |
+#else /* CONFIG_IPIPE */ | |
+ c /= IPIPE_DIV; | |
+ tsc_info.freq = c; | |
+ tsc_info.counter_vaddr = (unsigned long)mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1); | |
+ tsc_info.u.counter_paddr = mxs_timrot_paddr + HW_TIMROT_RUNNING_COUNTn(1); | |
+ __ipipe_tsc_register(&tsc_info); | |
+#endif /* CONFIG_IPIPE */ | |
setup_sched_clock(mxs_read_sched_clock_v2, 32, c); | |
} | |
@@ -245,11 +290,23 @@ static int __init mxs_clocksource_init(struct clk *timer_clk) | |
static void __init mxs_timer_init(struct device_node *np) | |
{ | |
struct clk *timer_clk; | |
+ unsigned long xtal; | |
int irq; | |
mxs_timrot_base = of_iomap(np, 0); | |
WARN_ON(!mxs_timrot_base); | |
+#ifdef CONFIG_IPIPE | |
+ if (mxs_timrot_base){ | |
+ struct resource res; | |
+ | |
+ if (of_address_to_resource(np, 0, &res)) | |
+ res.start = 0; | |
+ | |
+ mxs_timrot_paddr = res.start; | |
+ } | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
timer_clk = of_clk_get(np, 0); | |
if (IS_ERR(timer_clk)) { | |
pr_err("%s: failed to get clk\n", __func__); | |
@@ -270,20 +327,26 @@ static void __init mxs_timer_init(struct device_node *np) | |
MX28_TIMROT_VERSION_OFFSET)); | |
timrot_major_version >>= BP_TIMROT_MAJOR_VERSION; | |
+ if (timrot_is_v1()) | |
+ xtal = BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL; | |
+ else { | |
+#ifndef CONFIG_IPIPE | |
+ xtal = BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS; | |
+#else | |
+ xtal = BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS | | |
+ (IPIPE_DIV_ORDER * BV_TIMROTv2_TIMCTRLn_PRESCALE); | |
+#endif | |
+ } | |
/* one for clock_event */ | |
- __raw_writel((timrot_is_v1() ? | |
- BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL : | |
- BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) | | |
- BM_TIMROT_TIMCTRLn_UPDATE | | |
- BM_TIMROT_TIMCTRLn_IRQ_EN, | |
- mxs_timrot_base + HW_TIMROT_TIMCTRLn(0)); | |
+ __raw_writel(xtal | | |
+ BM_TIMROT_TIMCTRLn_UPDATE | | |
+ BM_TIMROT_TIMCTRLn_IRQ_EN, | |
+ mxs_timrot_base + HW_TIMROT_TIMCTRLn(0)); | |
/* another for clocksource */ | |
- __raw_writel((timrot_is_v1() ? | |
- BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL : | |
- BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) | | |
- BM_TIMROT_TIMCTRLn_RELOAD, | |
- mxs_timrot_base + HW_TIMROT_TIMCTRLn(1)); | |
+ __raw_writel(xtal | | |
+ BM_TIMROT_TIMCTRLn_RELOAD, | |
+ mxs_timrot_base + HW_TIMROT_TIMCTRLn(1)); | |
/* set clocksource timer fixed count to the maximum */ | |
if (timrot_is_v1()) | |
@@ -300,5 +363,9 @@ static void __init mxs_timer_init(struct device_node *np) | |
/* Make irqs happen */ | |
irq = irq_of_parse_and_map(np, 0); | |
setup_irq(irq, &mxs_timer_irq); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ mxs_itimer.irq = irq; | |
+#endif | |
} | |
CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init); | |
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c | |
index 7176743..755325c 100644 | |
--- a/drivers/gpio/gpio-mxc.c | |
+++ b/drivers/gpio/gpio-mxc.c | |
@@ -32,6 +32,7 @@ | |
#include <linux/of.h> | |
#include <linux/of_device.h> | |
#include <linux/module.h> | |
+#include <linux/ipipe.h> | |
#include <asm-generic/bug.h> | |
enum mxc_gpio_hwtype { | |
@@ -65,6 +66,9 @@ struct mxc_gpio_port { | |
struct irq_domain *domain; | |
struct bgpio_chip bgc; | |
u32 both_edges; | |
+#ifdef CONFIG_IPIPE | |
+ unsigned nonroot; | |
+#endif /* CONFIG_IPIPE */ | |
}; | |
static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = { | |
@@ -264,7 +268,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) | |
if (port->both_edges & (1 << irqoffset)) | |
mxc_flip_edge(port, irqoffset); | |
- generic_handle_irq(irq_find_mapping(port->domain, irqoffset)); | |
+ ipipe_handle_demuxed_irq(irq_find_mapping(port->domain, irqoffset)); | |
irq_stat &= ~(1 << irqoffset); | |
} | |
@@ -290,8 +294,11 @@ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) | |
static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) | |
{ | |
u32 irq_msk, irq_stat; | |
+ struct irq_chip *chip = irq_get_chip(irq); | |
struct mxc_gpio_port *port; | |
+ chained_irq_enter(chip, desc); | |
+ | |
/* walk through all interrupt status registers */ | |
list_for_each_entry(port, &mxc_gpio_ports, node) { | |
irq_msk = readl(port->base + GPIO_IMR); | |
@@ -302,6 +309,8 @@ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) | |
if (irq_stat) | |
mxc_gpio_irq_handler(port, irq_stat); | |
} | |
+ | |
+ chained_irq_exit(chip, desc); | |
} | |
/* | |
@@ -518,6 +527,120 @@ static struct platform_driver mxc_gpio_driver = { | |
.id_table = mxc_gpio_devtype, | |
}; | |
+#if defined(CONFIG_IPIPE) && \ | |
+ (defined(CONFIG_MXC_TZIC) || defined(CONFIG_SOC_IMX6Q)) | |
+extern void tzic_set_irq_prio(int irq, int hi); | |
+extern void tzic_mute_pic(void); | |
+extern void tzic_unmute_pic(void); | |
+extern void gic_mute(void); | |
+extern void gic_unmute(void); | |
+extern void gic_set_irq_prio(int irq, int hi); | |
+ | |
+#ifdef CONFIG_MXC_TZIC | |
+static unsigned is_mx5; | |
+#endif /* CONFIG_MXC_TZIC */ | |
+#ifdef CONFIG_SOC_IMX6Q | |
+static unsigned is_mx6; | |
+#endif /* CONFIG_SOC_IMX6Q */ | |
+ | |
+static void mxc_set_irq_prio(int irq, int hi) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ struct irq_data *idata = irq_desc_get_irq_data(desc); | |
+ | |
+#ifdef CONFIG_SOC_IMX6Q | |
+ if (is_mx6) | |
+ gic_set_irq_prio(idata->hwirq, hi); | |
+#endif /* CONFIG_SOC_IMX6Q */ | |
+ | |
+#ifdef CONFIG_MXC_TZIC | |
+ if (is_mx5) | |
+ tzic_set_irq_prio(idata->hwirq, hi); | |
+#endif /* CONFIG_MXC_TZIC */ | |
+} | |
+ | |
+static void mxc_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ struct irq_data *idata = irq_desc_get_irq_data(desc); | |
+ struct irq_chip *chip = irq_data_get_irq_chip(idata); | |
+ | |
+ if (chip->irq_set_type == gpio_set_irq_type) { | |
+ /* It is a gpio. */ | |
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(idata); | |
+ struct mxc_gpio_port *port = gc->private; | |
+ | |
+ if (ipd == &ipipe_root) { | |
+ port->nonroot &= ~(1 << idata->hwirq); | |
+ if (port->nonroot == 0) { | |
+ mxc_set_irq_prio(port->irq, 0); | |
+ if (port->irq_high > 0) | |
+ mxc_set_irq_prio(port->irq_high, 0); | |
+ } | |
+ } else { | |
+ port->nonroot |= (1 << idata->hwirq); | |
+ if (port->nonroot == (1 << idata->hwirq)) { | |
+ mxc_set_irq_prio(port->irq, 1); | |
+ if (port->irq_high > 0) | |
+ mxc_set_irq_prio(port->irq_high, 1); | |
+ } | |
+ } | |
+ } else | |
+ mxc_set_irq_prio(irq, ipd != &ipipe_root); | |
+} | |
+ | |
+static void mxc_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ struct irq_data *idata = irq_desc_get_irq_data(desc); | |
+ struct irq_chip *chip = irq_data_get_irq_chip(idata); | |
+ | |
+ if (chip->irq_set_type == gpio_set_irq_type) { | |
+ /* It is a gpio. */ | |
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(idata); | |
+ struct mxc_gpio_port *port = gc->private; | |
+ | |
+ if (ipd != &ipipe_root) { | |
+ port->nonroot &= ~(1 << idata->hwirq); | |
+ if (port->nonroot == 0) { | |
+ mxc_set_irq_prio(port->irq, 0); | |
+ if (port->irq_high > 0) | |
+ mxc_set_irq_prio(port->irq_high, 0); | |
+ } | |
+ } | |
+ } else if (ipd != &ipipe_root) | |
+ mxc_set_irq_prio(irq, 0); | |
+} | |
+ | |
+void __init mxc_pic_muter_register(void) | |
+{ | |
+ struct ipipe_mach_pic_muter pic_muter = { | |
+ .enable_irqdesc = mxc_enable_irqdesc, | |
+ .disable_irqdesc = mxc_disable_irqdesc, | |
+ .mute = tzic_mute_pic, | |
+ .unmute = tzic_unmute_pic, | |
+ }; | |
+ | |
+ is_mx5 = 1; | |
+ ipipe_pic_muter_register(&pic_muter); | |
+} | |
+ | |
+#ifdef CONFIG_SOC_IMX6Q | |
+void __init mx6_pic_muter_register(void) | |
+{ | |
+ struct ipipe_mach_pic_muter pic_muter = { | |
+ .enable_irqdesc = mxc_enable_irqdesc, | |
+ .disable_irqdesc = mxc_disable_irqdesc, | |
+ .mute = gic_mute, | |
+ .unmute = gic_unmute, | |
+ }; | |
+ | |
+ is_mx6 = 1; | |
+ ipipe_pic_muter_register(&pic_muter); | |
+} | |
+#endif /* CONFIG_SOC_IMX6Q */ | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static int __init gpio_mxc_init(void) | |
{ | |
return platform_driver_register(&mxc_gpio_driver); | |
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c | |
index f8e6af2..42c4e5a 100644 | |
--- a/drivers/gpio/gpio-mxs.c | |
+++ b/drivers/gpio/gpio-mxs.c | |
@@ -34,6 +34,7 @@ | |
#include <linux/slab.h> | |
#include <linux/basic_mmio_gpio.h> | |
#include <linux/module.h> | |
+#include <linux/ipipe.h> | |
#define MXS_SET 0x4 | |
#define MXS_CLR 0x8 | |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c | |
index 5405212..1485a7a 100644 | |
--- a/drivers/gpio/gpio-omap.c | |
+++ b/drivers/gpio/gpio-omap.c | |
@@ -28,6 +28,7 @@ | |
#include <linux/irqchip/chained_irq.h> | |
#include <linux/gpio.h> | |
#include <linux/platform_data/gpio-omap.h> | |
+#include <linux/ipipe.h> | |
#define OFF_MODE 1 | |
@@ -59,7 +60,7 @@ struct gpio_bank { | |
u32 saved_datain; | |
u32 level_mask; | |
u32 toggle_mask; | |
- spinlock_t lock; | |
+ ipipe_spinlock_t lock; | |
struct gpio_chip chip; | |
struct clk *dbck; | |
u32 mod_usage; | |
@@ -81,6 +82,10 @@ struct gpio_bank { | |
int (*get_context_loss_count)(struct device *dev); | |
struct omap_gpio_reg_offs *regs; | |
+#ifdef CONFIG_IPIPE | |
+ unsigned nonroot; | |
+ unsigned muted; | |
+#endif | |
}; | |
#define GPIO_INDEX(bank, gpio) (gpio % bank->width) | |
@@ -378,8 +383,8 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) | |
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {} | |
#endif | |
-static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, | |
- unsigned trigger) | |
+static inline int _set_gpio_triggering(struct gpio_bank *bank, int gpio, | |
+ unsigned trigger) | |
{ | |
void __iomem *reg = bank->base; | |
void __iomem *base = bank->base; | |
@@ -525,7 +530,7 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) | |
return retval; | |
} | |
-static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) | |
+static inline void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) | |
{ | |
void __iomem *reg = bank->base; | |
@@ -561,7 +566,7 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank) | |
return l; | |
} | |
-static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) | |
+static inline void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) | |
{ | |
void __iomem *reg = bank->base; | |
u32 l; | |
@@ -583,7 +588,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) | |
__raw_writel(l, reg); | |
} | |
-static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) | |
+static inline void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) | |
{ | |
void __iomem *reg = bank->base; | |
u32 l; | |
@@ -644,7 +649,7 @@ static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable) | |
return 0; | |
} | |
-static void _reset_gpio(struct gpio_bank *bank, int gpio) | |
+static inline void _reset_gpio(struct gpio_bank *bank, int gpio) | |
{ | |
_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1); | |
_set_gpio_irqenable(bank, gpio, 0); | |
@@ -730,7 +735,10 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |
bank = irq_get_handler_data(irq); | |
isr_reg = bank->base + bank->regs->irqstatus; | |
+ | |
+#ifndef CONFIG_IPIPE | |
pm_runtime_get_sync(bank->dev); | |
+#endif | |
if (WARN_ON(!isr_reg)) | |
goto exit; | |
@@ -776,7 +784,8 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |
if (bank->toggle_mask & (1 << bit)) | |
_toggle_gpio_edge_triggering(bank, bit); | |
- generic_handle_irq(irq_find_mapping(bank->domain, bit)); | |
+ ipipe_handle_demuxed_irq | |
+ (irq_find_mapping(bank->domain, bit)); | |
} | |
} | |
/* if bank has any level sensitive GPIO pin interrupt | |
@@ -786,7 +795,9 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |
exit: | |
if (!unmasked) | |
chained_irq_exit(chip, desc); | |
+#ifndef CONFIG_IPIPE | |
pm_runtime_put(bank->dev); | |
+#endif | |
} | |
static void gpio_irq_shutdown(struct irq_data *d) | |
@@ -830,6 +841,19 @@ static void gpio_mask_irq(struct irq_data *d) | |
spin_unlock_irqrestore(&bank->lock, flags); | |
} | |
+static void gpio_mask_ack_irq(struct irq_data *d) | |
+{ | |
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d); | |
+ unsigned int gpio = irq_to_gpio(bank, d->hwirq); | |
+ unsigned long flags; | |
+ | |
+ spin_lock_irqsave(&bank->lock, flags); | |
+ _set_gpio_irqenable(bank, gpio, 0); | |
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); | |
+ spin_unlock_irqrestore(&bank->lock, flags); | |
+ _clear_gpio_irqstatus(bank, gpio); | |
+} | |
+ | |
static void gpio_unmask_irq(struct irq_data *d) | |
{ | |
struct gpio_bank *bank = irq_data_get_irq_chip_data(d); | |
@@ -858,6 +882,7 @@ static struct irq_chip gpio_irq_chip = { | |
.irq_shutdown = gpio_irq_shutdown, | |
.irq_ack = gpio_ack_irq, | |
.irq_mask = gpio_mask_irq, | |
+ .irq_mask_ack = gpio_mask_ack_irq, | |
.irq_unmask = gpio_unmask_irq, | |
.irq_set_type = gpio_irq_type, | |
.irq_set_wake = gpio_wake_enable, | |
@@ -1124,6 +1149,7 @@ static void omap_gpio_chip_init(struct gpio_bank *bank) | |
set_irq_flags(irq, IRQF_VALID); | |
} | |
} | |
+ | |
irq_set_chained_handler(bank->irq, gpio_irq_handler); | |
irq_set_handler_data(bank->irq, bank); | |
} | |
@@ -1256,6 +1282,164 @@ static int omap_gpio_probe(struct platform_device *pdev) | |
#ifdef CONFIG_ARCH_OMAP2PLUS | |
+#if defined(CONFIG_IPIPE) | |
+extern void omap3_intc_mute(void); | |
+extern void omap3_intc_unmute(void); | |
+extern void omap3_intc_set_irq_prio(int irq, int hi); | |
+extern void gic_mute(void); | |
+extern void gic_unmute(void); | |
+extern void gic_set_irq_prio(int irq, int hi); | |
+static unsigned ipipe_mach_omap; | |
+ | |
+static inline void omap2plus_pic_set_irq_prio(int irq, int hi) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ struct irq_data *idata = irq_desc_get_irq_data(desc); | |
+ | |
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) | |
+ if (ipipe_mach_omap ==3) | |
+ omap3_intc_set_irq_prio(idata->hwirq, hi); | |
+#endif /* omap3 */ | |
+#ifdef CONFIG_ARM_GIC | |
+ if (ipipe_mach_omap == 4) | |
+ gic_set_irq_prio(idata->hwirq, hi); | |
+#endif /* gic */ | |
+} | |
+ | |
+static void omap2plus_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ struct irq_data *idata = irq_desc_get_irq_data(desc); | |
+ struct irq_chip *chip = irq_data_get_irq_chip(idata); | |
+ | |
+ if (chip == &gpio_irq_chip) { | |
+ /* It is a gpio. */ | |
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(idata); | |
+ | |
+ if (ipd == &ipipe_root) { | |
+ bank->nonroot &= ~(1 << idata->hwirq); | |
+ if (bank->nonroot == 0) | |
+ omap2plus_pic_set_irq_prio(bank->irq, 0); | |
+ } else { | |
+ bank->nonroot |= (1 << idata->hwirq); | |
+ if (bank->nonroot == (1 << idata->hwirq)) | |
+ omap2plus_pic_set_irq_prio(bank->irq, 1); | |
+ } | |
+ } else | |
+ omap2plus_pic_set_irq_prio(irq, ipd != &ipipe_root); | |
+} | |
+ | |
+static void omap2plus_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ struct irq_data *idata = irq_desc_get_irq_data(desc); | |
+ struct irq_chip *chip = irq_data_get_irq_chip(idata); | |
+ | |
+ if (chip == &gpio_irq_chip) { | |
+ /* It is a gpio. */ | |
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(idata); | |
+ | |
+ if (ipd != &ipipe_root) { | |
+ bank->nonroot &= ~(1 << idata->hwirq); | |
+ if (bank->nonroot == 0) | |
+ omap2plus_pic_set_irq_prio(bank->irq, 0); | |
+ } | |
+ } else if (ipd != &ipipe_root) | |
+ omap2plus_pic_set_irq_prio(irq, 0); | |
+} | |
+ | |
+static inline void omap2plus_mute_gpio(void) | |
+{ | |
+ struct gpio_bank *bank; | |
+ unsigned muted; | |
+ | |
+ list_for_each_entry(bank, &omap_gpio_list, node) { | |
+ if (bank->nonroot == 0) | |
+ continue; | |
+ | |
+ muted = ~bank->nonroot; | |
+ if (muted) | |
+ muted &= _get_gpio_irqbank_mask(bank); | |
+ bank->muted = muted; | |
+ if (muted) | |
+ _disable_gpio_irqbank(bank, muted); | |
+ } | |
+} | |
+static inline void omap2plus_unmute_gpio(void) | |
+{ | |
+ struct gpio_bank *bank; | |
+ unsigned muted; | |
+ | |
+ list_for_each_entry(bank, &omap_gpio_list, node) { | |
+ if (bank->nonroot == 0) | |
+ continue; | |
+ | |
+ muted = bank->muted; | |
+ if (muted) | |
+ _enable_gpio_irqbank(bank, muted); | |
+ } | |
+} | |
+ | |
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) | |
+static void omap3_mute_pic(void) | |
+{ | |
+ omap3_intc_mute(); | |
+ | |
+ omap2plus_mute_gpio(); | |
+} | |
+ | |
+static void omap3_unmute_pic(void) | |
+{ | |
+ omap2plus_unmute_gpio(); | |
+ | |
+ omap3_intc_unmute(); | |
+} | |
+ | |
+void __init omap3_pic_muter_register(void) | |
+{ | |
+ struct ipipe_mach_pic_muter muter = { | |
+ .enable_irqdesc = omap2plus_enable_irqdesc, | |
+ .disable_irqdesc = omap2plus_disable_irqdesc, | |
+ .mute = omap3_mute_pic, | |
+ .unmute = omap3_unmute_pic, | |
+ }; | |
+ | |
+ ipipe_pic_muter_register(&muter); | |
+ ipipe_mach_omap = 3; | |
+} | |
+#endif /* omap3 */ | |
+ | |
+#ifdef CONFIG_ARM_GIC | |
+static void omap4_mute_pic(void) | |
+{ | |
+ gic_mute(); | |
+ | |
+ omap2plus_mute_gpio(); | |
+} | |
+ | |
+static void omap4_unmute_pic(void) | |
+{ | |
+ omap2plus_unmute_gpio(); | |
+ | |
+ gic_unmute(); | |
+} | |
+ | |
+void __init omap4_pic_muter_register(void) | |
+{ | |
+ struct ipipe_mach_pic_muter muter = { | |
+ .enable_irqdesc = omap2plus_enable_irqdesc, | |
+ .disable_irqdesc = omap2plus_disable_irqdesc, | |
+ .mute = omap4_mute_pic, | |
+ .unmute = omap4_unmute_pic, | |
+ }; | |
+ | |
+ ipipe_pic_muter_register(&muter); | |
+ ipipe_mach_omap = 4; | |
+} | |
+#endif /* GIC */ | |
+ | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
#if defined(CONFIG_PM_RUNTIME) | |
static void omap_gpio_restore_context(struct gpio_bank *bank); | |
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c | |
index df2199d..8795ea5f 100644 | |
--- a/drivers/gpio/gpio-pxa.c | |
+++ b/drivers/gpio/gpio-pxa.c | |
@@ -26,6 +26,7 @@ | |
#include <linux/platform_device.h> | |
#include <linux/syscore_ops.h> | |
#include <linux/slab.h> | |
+#include <linux/ipipe.h> | |
#include <mach/irqs.h> | |
@@ -100,7 +101,7 @@ struct pxa_gpio_id { | |
int gpio_nums; | |
}; | |
-static DEFINE_SPINLOCK(gpio_lock); | |
+static IPIPE_DEFINE_SPINLOCK(gpio_lock); | |
static struct pxa_gpio_chip *pxa_gpio_chips; | |
static enum pxa_gpio_type gpio_type; | |
static void __iomem *gpio_reg_base; | |
@@ -413,7 +414,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) | |
for_each_set_bit(n, &gedr, BITS_PER_LONG) { | |
loop = 1; | |
- generic_handle_irq(gpio_to_irq(gpio_base + n)); | |
+ ipipe_handle_demuxed_irq(gpio_to_irq(gpio_base + n)); | |
} | |
} | |
} while (loop); | |
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c | |
index 8ea3b33..1079825 100644 | |
--- a/drivers/gpio/gpio-sa1100.c | |
+++ b/drivers/gpio/gpio-sa1100.c | |
@@ -31,9 +31,9 @@ static int sa1100_direction_input(struct gpio_chip *chip, unsigned offset) | |
{ | |
unsigned long flags; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
GPDR &= ~GPIO_GPIO(offset); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return 0; | |
} | |
@@ -41,10 +41,10 @@ static int sa1100_direction_output(struct gpio_chip *chip, unsigned offset, int | |
{ | |
unsigned long flags; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
sa1100_gpio_set(chip, offset, value); | |
GPDR |= GPIO_GPIO(offset); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return 0; | |
} | |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c | |
index 19ceaa6..d56f20a 100644 | |
--- a/drivers/irqchip/irq-gic.c | |
+++ b/drivers/irqchip/irq-gic.c | |
@@ -41,6 +41,7 @@ | |
#include <linux/slab.h> | |
#include <linux/irqchip/chained_irq.h> | |
#include <linux/irqchip/arm-gic.h> | |
+#include <linux/ipipe.h> | |
#include <asm/irq.h> | |
#include <asm/exception.h> | |
@@ -70,7 +71,7 @@ struct gic_chip_data { | |
#endif | |
}; | |
-static DEFINE_RAW_SPINLOCK(irq_controller_lock); | |
+static IPIPE_DEFINE_RAW_SPINLOCK(irq_controller_lock); | |
/* | |
* The GIC mapping of CPU interfaces does not necessarily match | |
@@ -153,37 +154,74 @@ static inline unsigned int gic_irq(struct irq_data *d) | |
*/ | |
static void gic_mask_irq(struct irq_data *d) | |
{ | |
- u32 mask = 1 << (gic_irq(d) % 32); | |
+ u32 mask = 1 << (d->irq % 32); | |
+ unsigned long flags; | |
- raw_spin_lock(&irq_controller_lock); | |
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags); | |
+ ipipe_lock_irq(d->irq); | |
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | |
if (gic_arch_extn.irq_mask) | |
gic_arch_extn.irq_mask(d); | |
- raw_spin_unlock(&irq_controller_lock); | |
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags); | |
} | |
static void gic_unmask_irq(struct irq_data *d) | |
{ | |
- u32 mask = 1 << (gic_irq(d) % 32); | |
+ u32 mask = 1 << (d->irq % 32); | |
+ unsigned long flags; | |
- raw_spin_lock(&irq_controller_lock); | |
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags); | |
if (gic_arch_extn.irq_unmask) | |
gic_arch_extn.irq_unmask(d); | |
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | |
- raw_spin_unlock(&irq_controller_lock); | |
+ ipipe_unlock_irq(d->irq); | |
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags); | |
} | |
static void gic_eoi_irq(struct irq_data *d) | |
{ | |
+ unsigned long flags; | |
+ | |
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags); | |
if (gic_arch_extn.irq_eoi) { | |
- raw_spin_lock(&irq_controller_lock); | |
gic_arch_extn.irq_eoi(d); | |
- raw_spin_unlock(&irq_controller_lock); | |
} | |
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags); | |
+ writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+static void gic_hold_irq(struct irq_data *d) | |
+{ | |
+ u32 mask = 1 << (d->irq % 32); | |
+ unsigned long flags; | |
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags); | |
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | |
+ if (gic_arch_extn.irq_mask) | |
+ gic_arch_extn.irq_mask(d); | |
+ if (gic_arch_extn.irq_eoi) { | |
+ gic_arch_extn.irq_eoi(d); | |
+ } | |
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | |
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags); | |
+} | |
+ | |
+static void gic_release_irq(struct irq_data *d) | |
+{ | |
+ u32 mask = 1 << (d->irq % 32); | |
+ unsigned long flags; | |
+ | |
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags); | |
+ if (gic_arch_extn.irq_unmask) | |
+ gic_arch_extn.irq_unmask(d); | |
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | |
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags); | |
} | |
+#endif | |
+ | |
static int gic_set_type(struct irq_data *d, unsigned int type) | |
{ | |
void __iomem *base = gic_dist_base(d); | |
@@ -193,6 +231,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |
u32 confmask = 0x2 << ((gicirq % 16) * 2); | |
u32 confoff = (gicirq / 16) * 4; | |
bool enabled = false; | |
+ unsigned long flags; | |
u32 val; | |
/* Interrupt configuration for SGIs can't be changed */ | |
@@ -202,7 +241,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |
if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) | |
return -EINVAL; | |
- raw_spin_lock(&irq_controller_lock); | |
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags); | |
if (gic_arch_extn.irq_set_type) | |
gic_arch_extn.irq_set_type(d, type); | |
@@ -227,7 +266,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |
if (enabled) | |
writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); | |
- raw_spin_unlock(&irq_controller_lock); | |
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags); | |
return 0; | |
} | |
@@ -248,6 +287,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | |
unsigned int shift = (gic_irq(d) % 4) * 8; | |
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | |
+ unsigned long flags; | |
u32 val, mask, bit; | |
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) | |
@@ -256,10 +296,10 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |
mask = 0xff << shift; | |
bit = gic_cpu_map[cpu] << shift; | |
- raw_spin_lock(&irq_controller_lock); | |
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags); | |
val = readl_relaxed(reg) & ~mask; | |
writel_relaxed(val | bit, reg); | |
- raw_spin_unlock(&irq_controller_lock); | |
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags); | |
return IRQ_SET_MASK_OK; | |
} | |
@@ -292,13 +332,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs | |
if (likely(irqnr > 15 && irqnr < 1021)) { | |
irqnr = irq_find_mapping(gic->domain, irqnr); | |
- handle_IRQ(irqnr, regs); | |
+ ipipe_handle_multi_irq(irqnr, regs); | |
continue; | |
} | |
if (irqnr < 16) { | |
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); | |
#ifdef CONFIG_SMP | |
- handle_IPI(irqnr, regs); | |
+ ipipe_handle_multi_ipi(irqnr, regs); | |
#endif | |
continue; | |
} | |
@@ -311,13 +351,13 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |
struct gic_chip_data *chip_data = irq_get_handler_data(irq); | |
struct irq_chip *chip = irq_get_chip(irq); | |
unsigned int cascade_irq, gic_irq; | |
- unsigned long status; | |
+ unsigned long status, flags; | |
chained_irq_enter(chip, desc); | |
- raw_spin_lock(&irq_controller_lock); | |
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags); | |
status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); | |
- raw_spin_unlock(&irq_controller_lock); | |
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags); | |
gic_irq = (status & 0x3ff); | |
if (gic_irq == 1023) | |
@@ -327,7 +367,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |
if (unlikely(gic_irq < 32 || gic_irq > 1020)) | |
handle_bad_irq(cascade_irq, desc); | |
else | |
- generic_handle_irq(cascade_irq); | |
+ ipipe_handle_demuxed_irq(cascade_irq); | |
out: | |
chained_irq_exit(chip, desc); | |
@@ -338,6 +378,10 @@ static struct irq_chip gic_chip = { | |
.irq_mask = gic_mask_irq, | |
.irq_unmask = gic_unmask_irq, | |
.irq_eoi = gic_eoi_irq, | |
+#ifdef CONFIG_IPIPE | |
+ .irq_hold = gic_hold_irq, | |
+ .irq_release = gic_release_irq, | |
+#endif | |
.irq_set_type = gic_set_type, | |
.irq_retrigger = gic_retrigger, | |
#ifdef CONFIG_SMP | |
@@ -374,6 +418,37 @@ static u8 gic_get_cpumask(struct gic_chip_data *gic) | |
return mask; | |
} | |
+#if defined(CONFIG_IPIPE) | |
+void gic_mute(void) | |
+{ | |
+ writel_relaxed(0x90, gic_data_cpu_base(&gic_data[0]) + GIC_CPU_PRIMASK); | |
+} | |
+ | |
+void gic_unmute(void) | |
+{ | |
+ writel_relaxed(0xf0, gic_data_cpu_base(&gic_data[0]) + GIC_CPU_PRIMASK); | |
+} | |
+ | |
+void gic_set_irq_prio(int irq, int hi) | |
+{ | |
+ void __iomem *dist_base; | |
+ unsigned gic_irqs; | |
+ | |
+ if (irq < 32) /* The IPIs always are high priority */ | |
+ return; | |
+ | |
+ dist_base = gic_data_dist_base(&gic_data[0]);; | |
+ gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f; | |
+ gic_irqs = (gic_irqs + 1) * 32; | |
+ if (gic_irqs > 1020) | |
+ gic_irqs = 1020; | |
+ if (irq >= gic_irqs) | |
+ return; | |
+ | |
+ writeb_relaxed(hi ? 0x10 : 0xa0, dist_base + GIC_DIST_PRI + irq); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
static void __init gic_dist_init(struct gic_chip_data *gic) | |
{ | |
unsigned int i; | |
@@ -447,7 +522,11 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) | |
* Set priority on PPI and SGI interrupts | |
*/ | |
for (i = 0; i < 32; i += 4) | |
+#if !defined(CONFIG_IPIPE) | |
writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); | |
+#else /* IPIPE */ | |
+ writel_relaxed(0x10101010, dist_base + GIC_DIST_PRI + i * 4 / 4); | |
+#endif /* IPIPE */ | |
writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); | |
writel_relaxed(1, base + GIC_CPU_CTRL); | |
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c | |
index 63b3d4e..acbf4e7 100644 | |
--- a/drivers/irqchip/irq-mxs.c | |
+++ b/drivers/irqchip/irq-mxs.c | |
@@ -60,6 +60,16 @@ static void icoll_mask_irq(struct irq_data *d) | |
icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq)); | |
} | |
+#ifdef CONFIG_IPIPE | |
+static void icoll_mask_ack_irq(struct irq_data *d) | |
+{ | |
+ __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, | |
+ icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq)); | |
+ __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0, | |
+ icoll_base + HW_ICOLL_LEVELACK); | |
+} | |
+#endif | |
+ | |
static void icoll_unmask_irq(struct irq_data *d) | |
{ | |
__raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, | |
@@ -69,6 +79,9 @@ static void icoll_unmask_irq(struct irq_data *d) | |
static struct irq_chip mxs_icoll_chip = { | |
.irq_ack = icoll_ack_irq, | |
.irq_mask = icoll_mask_irq, | |
+#ifdef CONFIG_IPIPE | |
+ .irq_mask_ack = icoll_mask_ack_irq, | |
+#endif /* CONFIG_IPIPE */ | |
.irq_unmask = icoll_unmask_irq, | |
}; | |
@@ -79,7 +92,7 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) | |
irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); | |
__raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); | |
irqnr = irq_find_mapping(icoll_domain, irqnr); | |
- handle_IRQ(irqnr, regs); | |
+ ipipe_handle_multi_irq(irqnr, regs); | |
} | |
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, | |
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c | |
index bbcc944..28a8fd1 100644 | |
--- a/drivers/irqchip/irq-s3c24xx.c | |
+++ b/drivers/irqchip/irq-s3c24xx.c | |
@@ -5,6 +5,8 @@ | |
* Ben Dooks <[email protected]> | |
* Copyright (c) 2012 Heiko Stuebner <[email protected]> | |
* | |
+ * Copyright (C) 2006, 2007 Sebastian Smolorz <[email protected]>, emlix GmbH | |
+ * | |
* This program is free software; you can redistribute it and/or modify | |
* it under the terms of the GNU General Public License as published by | |
* the Free Software Foundation; either version 2 of the License, or | |
@@ -24,6 +26,7 @@ | |
#include <linux/interrupt.h> | |
#include <linux/ioport.h> | |
#include <linux/device.h> | |
+#include <linux/ipipe.h> | |
#include <linux/irqdomain.h> | |
#include <linux/irqchip/chained_irq.h> | |
#include <linux/of.h> | |
@@ -328,7 +331,7 @@ static void s3c_irq_demux(unsigned int irq, struct irq_desc *desc) | |
n = __ffs(src); | |
src &= ~(1 << n); | |
irq = irq_find_mapping(sub_intc->domain, offset + n); | |
- generic_handle_irq(irq); | |
+ ipipe_handle_demuxed_irq(irq); | |
} | |
chained_irq_exit(chip, desc); | |
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c | |
index 47a52ab..f7c6fc5 100644 | |
--- a/drivers/irqchip/irq-versatile-fpga.c | |
+++ b/drivers/irqchip/irq-versatile-fpga.c | |
@@ -74,7 +74,7 @@ static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc) | |
do { | |
irq = ffs(status) - 1; | |
status &= ~(1 << irq); | |
- generic_handle_irq(irq_find_mapping(f->domain, irq)); | |
+ ipipe_handle_demuxed_irq(irq_find_mapping(f->domain, irq)); | |
} while (status); | |
} | |
@@ -91,7 +91,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs) | |
while ((status = readl(f->base + IRQ_STATUS))) { | |
irq = ffs(status) - 1; | |
- handle_IRQ(irq_find_mapping(f->domain, irq), regs); | |
+ ipipe_handle_multi_irq(irq_find_mapping(f->domain, irq), regs); | |
handled = 1; | |
} | |
@@ -147,6 +147,9 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, | |
f->chip.name = name; | |
f->chip.irq_ack = fpga_irq_mask; | |
f->chip.irq_mask = fpga_irq_mask; | |
+#ifdef CONFIG_IPIPE | |
+ f->chip.irq_mask_ack = fpga_irq_mask; | |
+#endif | |
f->chip.irq_unmask = fpga_irq_unmask; | |
f->valid = valid; | |
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c | |
index 2bbb004..0cbc3e7 100644 | |
--- a/drivers/irqchip/irq-vic.c | |
+++ b/drivers/irqchip/irq-vic.c | |
@@ -32,6 +32,7 @@ | |
#include <linux/device.h> | |
#include <linux/amba/bus.h> | |
#include <linux/irqchip/arm-vic.h> | |
+#include <linux/ipipe.h> | |
#include <asm/exception.h> | |
#include <asm/irq.h> | |
@@ -310,7 +311,7 @@ static void vic_unmask_irq(struct irq_data *d) | |
#if defined(CONFIG_PM) | |
static struct vic_device *vic_from_irq(unsigned int irq) | |
{ | |
- struct vic_device *v = vic_devices; | |
+ struct vic_device *v = vic_devices; | |
unsigned int base_irq = irq & ~31; | |
int id; | |
@@ -349,6 +350,9 @@ static struct irq_chip vic_chip = { | |
.name = "VIC", | |
.irq_ack = vic_ack_irq, | |
.irq_mask = vic_mask_irq, | |
+#ifdef CONFIG_IPIPE | |
+ .irq_mask_ack = vic_ack_irq, | |
+#endif /* CONFIG_IPIPE */ | |
.irq_unmask = vic_unmask_irq, | |
.irq_set_wake = vic_set_wake, | |
}; | |
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c | |
index 8527743..3824c23 100644 | |
--- a/drivers/irqchip/spear-shirq.c | |
+++ b/drivers/irqchip/spear-shirq.c | |
@@ -27,7 +27,7 @@ | |
#include "irqchip.h" | |
-static DEFINE_SPINLOCK(lock); | |
+static IPIPE_DEFINE_SPINLOCK(lock); | |
/* spear300 shared irq registers offsets and masks */ | |
#define SPEAR300_INT_ENB_MASK_REG 0x54 | |
@@ -191,6 +191,9 @@ static struct irq_chip shirq_chip = { | |
.name = "spear-shirq", | |
.irq_ack = shirq_irq_mask, | |
.irq_mask = shirq_irq_mask, | |
+#ifdef CONFIG_IPIPE | |
+ .irq_mask_ack = shirq_irq_mask, | |
+#endif /* CONFIG_IPIPE */ | |
.irq_unmask = shirq_irq_unmask, | |
}; | |
@@ -213,7 +216,7 @@ static void shirq_handler(unsigned irq, struct irq_desc *desc) | |
if (!(j & val)) | |
continue; | |
- generic_handle_irq(shirq->irq_base + i); | |
+ ipipe_handle_demuxed_irq(shirq->irq_base + i); | |
/* clear interrupt */ | |
if (shirq->regs.clear_reg == -1) | |
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c | |
index a5f9888..a1d2aab 100644 | |
--- a/drivers/mfd/twl4030-irq.c | |
+++ b/drivers/mfd/twl4030-irq.c | |
@@ -35,6 +35,7 @@ | |
#include <linux/of.h> | |
#include <linux/irqdomain.h> | |
#include <linux/i2c/twl.h> | |
+#include <linux/ipipe.h> | |
#include "twl-core.h" | |
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c | |
index 277a8db..beddc7e 100644 | |
--- a/drivers/mfd/twl6030-irq.c | |
+++ b/drivers/mfd/twl6030-irq.c | |
@@ -182,7 +182,14 @@ static int twl6030_irq_thread(void *data) | |
if (sts.int_sts & 0x1) { | |
int module_irq = twl6030_irq_base + | |
twl6030_interrupt_mapping[i]; | |
+#ifndef CONFIG_IPIPE | |
generic_handle_irq(module_irq); | |
+#else | |
+ { | |
+ struct irq_desc *d = irq_to_desc(module_irq); | |
+ d->ipipe_ack(module_irq, d); | |
+ } | |
+#endif | |
} | |
local_irq_enable(); | |
@@ -443,4 +450,3 @@ int twl6030_exit_irq(void) | |
} | |
return 0; | |
} | |
- | |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig | |
index c002d86..8f88d33 100644 | |
--- a/drivers/misc/Kconfig | |
+++ b/drivers/misc/Kconfig | |
@@ -70,7 +70,7 @@ config ATMEL_TCLIB | |
config ATMEL_TCB_CLKSRC | |
bool "TC Block Clocksource" | |
- depends on ATMEL_TCLIB | |
+ depends on ATMEL_TCLIB && !IPIPE | |
default y | |
help | |
Select this to get a high precision clocksource based on a | |
@@ -354,14 +354,14 @@ config SENSORS_BH1780 | |
will be called bh1780gli. | |
config SENSORS_BH1770 | |
- tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor" | |
- depends on I2C | |
- ---help--- | |
- Say Y here if you want to build a driver for BH1770GLC (ROHM) or | |
+ tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor" | |
+ depends on I2C | |
+ ---help--- | |
+ Say Y here if you want to build a driver for BH1770GLC (ROHM) or | |
SFH7770 (Osram) combined ambient light and proximity sensor chip. | |
- To compile this driver as a module, choose M here: the | |
- module will be called bh1770glc. If unsure, say N here. | |
+ To compile this driver as a module, choose M here: the | |
+ module will be called bh1770glc. If unsure, say N here. | |
config SENSORS_APDS990X | |
tristate "APDS990X combined als and proximity sensors" | |
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c | |
index 0127601..d076d92 100644 | |
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c | |
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c | |
@@ -808,7 +808,7 @@ static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs) | |
irq_base = ipu->irq_start + regs[i] * 32; | |
for_each_set_bit(bit, &status, 32) | |
- generic_handle_irq(irq_base + bit); | |
+ ipipe_handle_demuxed_irq(irq_base + bit); | |
} | |
} | |
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-prv.h b/drivers/staging/imx-drm/ipu-v3/ipu-prv.h | |
index 5518028..e5d8b19 100644 | |
--- a/drivers/staging/imx-drm/ipu-v3/ipu-prv.h | |
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-prv.h | |
@@ -157,7 +157,7 @@ struct ipu_soc { | |
struct device *dev; | |
const struct ipu_devtype *devtype; | |
enum ipuv3_type ipu_type; | |
- spinlock_t lock; | |
+ ipipe_spinlock_t lock; | |
struct mutex channel_lock; | |
void __iomem *cm_reg; | |
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig | |
index c4cc27e..fdc993a 100644 | |
--- a/drivers/cpuidle/Kconfig | |
+++ b/drivers/cpuidle/Kconfig | |
@@ -2,6 +2,7 @@ | |
config CPU_IDLE | |
bool "CPU idle PM support" | |
default y if ACPI || PPC_PSERIES | |
+ depends on !(ARCH_OMAP4 && IPIPE) | |
help | |
CPU idle is a generic framework for supporting software-controlled | |
idle processor power management. It includes modular cross-platform | |
diff --git a/fs/exec.c b/fs/exec.c | |
index 1f44670..4efa21a 100644 | |
--- a/fs/exec.c | |
+++ b/fs/exec.c | |
@@ -815,6 +815,7 @@ static int exec_mmap(struct mm_struct *mm) | |
{ | |
struct task_struct *tsk; | |
struct mm_struct * old_mm, *active_mm; | |
+ unsigned long flags; | |
/* Notify parent that we're no longer interested in the old VM */ | |
tsk = current; | |
@@ -838,8 +839,10 @@ static int exec_mmap(struct mm_struct *mm) | |
task_lock(tsk); | |
active_mm = tsk->active_mm; | |
tsk->mm = mm; | |
+ ipipe_mm_switch_protect(flags); | |
tsk->active_mm = mm; | |
activate_mm(active_mm, mm); | |
+ ipipe_mm_switch_unprotect(flags); | |
task_unlock(tsk); | |
arch_pick_mmap_layout(mm); | |
if (old_mm) { | |
diff --git a/fs/proc/array.c b/fs/proc/array.c | |
index cbd0f1b..44680c7 100644 | |
--- a/fs/proc/array.c | |
+++ b/fs/proc/array.c | |
@@ -144,6 +144,10 @@ static const char * const task_state_array[] = { | |
"K (wakekill)", /* 128 */ | |
"W (waking)", /* 256 */ | |
"P (parked)", /* 512 */ | |
+#ifdef CONFIG_IPIPE | |
+ "H (hardening)", /* 1024 */ | |
+ "N (wakeup disabled)", /* 2048 */ | |
+#endif | |
}; | |
static inline const char *get_task_state(struct task_struct *tsk) | |
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h | |
index 33bd2de..f752d41 100644 | |
--- a/include/asm-generic/atomic.h | |
+++ b/include/asm-generic/atomic.h | |
@@ -68,11 +68,11 @@ static inline int atomic_add_return(int i, atomic_t *v) | |
unsigned long flags; | |
int temp; | |
- raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ | |
+ flags = hard_local_irq_save(); /* Don't trace it in an irqsoff handler */ | |
temp = v->counter; | |
temp += i; | |
v->counter = temp; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return temp; | |
} | |
@@ -91,11 +91,11 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |
unsigned long flags; | |
int temp; | |
- raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ | |
+ flags = hard_local_irq_save(); | |
temp = v->counter; | |
temp -= i; | |
v->counter = temp; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return temp; | |
} | |
@@ -158,9 +158,9 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) | |
unsigned long flags; | |
mask = ~mask; | |
- raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | |
+ flags = hard_local_irq_save(); | |
v->counter &= mask; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
} | |
#endif | |
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h | |
index 9ae6c34..3d4535a 100644 | |
--- a/include/asm-generic/bitops/atomic.h | |
+++ b/include/asm-generic/bitops/atomic.h | |
@@ -21,20 +21,20 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | |
* this is the substitute */ | |
#define _atomic_spin_lock_irqsave(l,f) do { \ | |
arch_spinlock_t *s = ATOMIC_HASH(l); \ | |
- local_irq_save(f); \ | |
+ (f) = hard_local_irq_save(); \ | |
arch_spin_lock(s); \ | |
} while(0) | |
#define _atomic_spin_unlock_irqrestore(l,f) do { \ | |
arch_spinlock_t *s = ATOMIC_HASH(l); \ | |
arch_spin_unlock(s); \ | |
- local_irq_restore(f); \ | |
+ hard_local_irq_restore(f); \ | |
} while(0) | |
#else | |
-# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | |
-# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | |
+# define _atomic_spin_lock_irqsave(l,f) do { (f) = hard_local_irq_save(); } while (0) | |
+# define _atomic_spin_unlock_irqrestore(l,f) do { hard_local_irq_restore(f); } while (0) | |
#endif | |
/* | |
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h | |
index d8d4c89..6559e2d 100644 | |
--- a/include/asm-generic/cmpxchg-local.h | |
+++ b/include/asm-generic/cmpxchg-local.h | |
@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, | |
if (size == 8 && sizeof(unsigned long) != 8) | |
wrong_size_cmpxchg(ptr); | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
switch (size) { | |
case 1: prev = *(u8 *)ptr; | |
if (prev == old) | |
@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, | |
default: | |
wrong_size_cmpxchg(ptr); | |
} | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return prev; | |
} | |
@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr, | |
u64 prev; | |
unsigned long flags; | |
- raw_local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
prev = *(u64 *)ptr; | |
if (prev == old) | |
*(u64 *)ptr = new; | |
- raw_local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return prev; | |
} | |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h | |
index d17784e..d8e2912 100644 | |
--- a/include/asm-generic/percpu.h | |
+++ b/include/asm-generic/percpu.h | |
@@ -67,6 +67,22 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |
#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) | |
#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var))) | |
+#ifdef CONFIG_IPIPE | |
+#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP) | |
+extern int __ipipe_check_percpu_access(void); | |
+#define __ipipe_cpu_offset \ | |
+ ({ \ | |
+ WARN_ON_ONCE(__ipipe_check_percpu_access()); \ | |
+ __my_cpu_offset; \ | |
+ }) | |
+#else | |
+#define __ipipe_cpu_offset __my_cpu_offset | |
+#endif | |
+#ifndef __ipipe_this_cpu_ptr | |
+#define __ipipe_this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __ipipe_cpu_offset) | |
+#endif | |
+#define __ipipe_this_cpu_read(var) (*__ipipe_this_cpu_ptr(&(var))) | |
+#endif /* CONFIG_IPIPE */ | |
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA | |
extern void setup_per_cpu_areas(void); | |
@@ -82,6 +98,8 @@ extern void setup_per_cpu_areas(void); | |
#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) | |
#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | |
#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | |
+#define __ipipe_this_cpu_ptr(ptr) VERIFY_PERCPU_PTR(ptr) | |
+#define __ipipe_this_cpu_read(var) (*__ipipe_this_cpu_ptr(&(var))) | |
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | |
#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | |
diff --git a/include/ipipe/setup.h b/include/ipipe/setup.h | |
new file mode 100644 | |
index 0000000..c2bc521 | |
--- /dev/null | |
+++ b/include/ipipe/setup.h | |
@@ -0,0 +1,10 @@ | |
+#ifndef _IPIPE_SETUP_H | |
+#define _IPIPE_SETUP_H | |
+ | |
+/* | |
+ * Placeholders for setup hooks defined by client domains. | |
+ */ | |
+ | |
+static inline void __ipipe_early_client_setup(void) { } | |
+ | |
+#endif /* !_IPIPE_SETUP_H */ | |
diff --git a/include/ipipe/thread_info.h b/include/ipipe/thread_info.h | |
new file mode 100644 | |
index 0000000..1f6e9c3 | |
--- /dev/null | |
+++ b/include/ipipe/thread_info.h | |
@@ -0,0 +1,14 @@ | |
+#ifndef _IPIPE_THREAD_INFO_H | |
+#define _IPIPE_THREAD_INFO_H | |
+ | |
+/* | |
+ * Placeholder for private thread information defined by client | |
+ * domains. | |
+ */ | |
+ | |
+struct ipipe_threadinfo { | |
+}; | |
+ | |
+static inline void __ipipe_init_threadinfo(struct ipipe_threadinfo *p) { } | |
+ | |
+#endif /* !_IPIPE_THREAD_INFO_H */ | |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h | |
index 963d714..cbb49fc 100644 | |
--- a/include/linux/clockchips.h | |
+++ b/include/linux/clockchips.h | |
@@ -112,6 +112,15 @@ struct clock_event_device { | |
int irq; | |
const struct cpumask *cpumask; | |
struct list_head list; | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ struct ipipe_timer *ipipe_timer; | |
+ unsigned ipipe_stolen; | |
+ | |
+#define clockevent_ipipe_stolen(evt) ((evt)->ipipe_stolen) | |
+#else | |
+#define clockevent_ipipe_stolen(evt) (0) | |
+#endif /* !CONFIG_IPIPE */ | |
} ____cacheline_aligned; | |
/* | |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h | |
index 7279b94..d55a23f 100644 | |
--- a/include/linux/clocksource.h | |
+++ b/include/linux/clocksource.h | |
@@ -195,6 +195,10 @@ struct clocksource { | |
cycle_t cs_last; | |
cycle_t wd_last; | |
#endif | |
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE | |
+ cycle_t (*ipipe_read)(struct clocksource *cs); | |
+#endif /* CONFIG_IPIPE_WANT_CLOCKSOURCE */ | |
+ | |
} ____cacheline_aligned; | |
/* | |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h | |
index 99d0fbc..3650c8a 100644 | |
--- a/include/linux/ftrace.h | |
+++ b/include/linux/ftrace.h | |
@@ -103,6 +103,7 @@ enum { | |
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, | |
FTRACE_OPS_FL_STUB = 1 << 7, | |
FTRACE_OPS_FL_INITIALIZED = 1 << 8, | |
+ FTRACE_OPS_FL_IPIPE_EXCLUSIVE = 1 << 9, | |
}; | |
struct ftrace_ops { | |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h | |
index c1d6555..a44befb 100644 | |
--- a/include/linux/hardirq.h | |
+++ b/include/linux/hardirq.h | |
@@ -178,6 +178,7 @@ extern void irq_exit(void); | |
#define nmi_enter() \ | |
do { \ | |
+ __ipipe_nmi_enter(); \ | |
lockdep_off(); \ | |
ftrace_nmi_enter(); \ | |
BUG_ON(in_nmi()); \ | |
@@ -194,6 +195,7 @@ extern void irq_exit(void); | |
sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ | |
ftrace_nmi_exit(); \ | |
lockdep_on(); \ | |
+ __ipipe_nmi_exit(); \ | |
} while (0) | |
#endif /* LINUX_HARDIRQ_H */ | |
diff --git a/include/linux/i8253.h b/include/linux/i8253.h | |
index e6bb36a..898a91a 100644 | |
--- a/include/linux/i8253.h | |
+++ b/include/linux/i8253.h | |
@@ -12,6 +12,7 @@ | |
#include <linux/param.h> | |
#include <linux/spinlock.h> | |
#include <linux/timex.h> | |
+#include <linux/ipipe_lock.h> | |
/* i8253A PIT registers */ | |
#define PIT_MODE 0x43 | |
@@ -20,7 +21,7 @@ | |
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) | |
-extern raw_spinlock_t i8253_lock; | |
+IPIPE_DECLARE_RAW_SPINLOCK(i8253_lock); | |
extern struct clock_event_device i8253_clockevent; | |
extern void clockevent_i8253_init(bool oneshot); | |
diff --git a/include/linux/ipipe.h b/include/linux/ipipe.h | |
new file mode 100644 | |
index 0000000..3b09626 | |
--- /dev/null | |
+++ b/include/linux/ipipe.h | |
@@ -0,0 +1,452 @@ | |
+/* -*- linux-c -*- | |
+ * include/linux/ipipe.h | |
+ * | |
+ * Copyright (C) 2002-2007 Philippe Gerum. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __LINUX_IPIPE_H | |
+#define __LINUX_IPIPE_H | |
+ | |
+#include <linux/spinlock.h> | |
+#include <linux/cache.h> | |
+#include <linux/percpu.h> | |
+#include <linux/irq.h> | |
+#include <linux/thread_info.h> | |
+#include <linux/ipipe_base.h> | |
+#include <linux/ipipe_debug.h> | |
+#include <asm/ptrace.h> | |
+#include <asm/ipipe.h> | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+#include <linux/ipipe_domain.h> | |
+ | |
+/* ipipe_set_hooks(..., enables) */ | |
+#define IPIPE_SYSCALL __IPIPE_SYSCALL_E | |
+#define IPIPE_TRAP __IPIPE_TRAP_E | |
+#define IPIPE_KEVENT __IPIPE_KEVENT_E | |
+ | |
+struct ipipe_sysinfo { | |
+ int sys_nr_cpus; /* Number of CPUs on board */ | |
+ int sys_hrtimer_irq; /* hrtimer device IRQ */ | |
+ u64 sys_hrtimer_freq; /* hrtimer device frequency */ | |
+ u64 sys_hrclock_freq; /* hrclock device frequency */ | |
+ u64 sys_cpu_freq; /* CPU frequency (Hz) */ | |
+ struct ipipe_arch_sysinfo arch; | |
+}; | |
+ | |
+struct ipipe_work_header { | |
+ size_t size; | |
+ void (*handler)(struct ipipe_work_header *work); | |
+}; | |
+ | |
+extern unsigned int __ipipe_printk_virq; | |
+ | |
+void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq); | |
+ | |
+void __ipipe_complete_domain_migration(void); | |
+ | |
+int __ipipe_switch_tail(void); | |
+ | |
+int __ipipe_migrate_head(void); | |
+ | |
+void __ipipe_reenter_root(void); | |
+ | |
+int __ipipe_disable_ondemand_mappings(struct task_struct *p); | |
+ | |
+int __ipipe_pin_vma(struct mm_struct *mm, struct vm_area_struct *vma); | |
+ | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ | |
+#define prepare_arch_switch(next) \ | |
+ do { \ | |
+ hard_local_irq_enable(); \ | |
+ __ipipe_report_schedule(current, next); \ | |
+ } while(0) | |
+ | |
+#ifndef ipipe_get_active_mm | |
+static inline struct mm_struct *ipipe_get_active_mm(void) | |
+{ | |
+ return __this_cpu_read(ipipe_percpu.active_mm); | |
+} | |
+#define ipipe_get_active_mm ipipe_get_active_mm | |
+#endif | |
+ | |
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
+ | |
+#define prepare_arch_switch(next) \ | |
+ do { \ | |
+ __ipipe_report_schedule(current, next); \ | |
+ hard_local_irq_disable(); \ | |
+ } while(0) | |
+ | |
+#ifndef ipipe_get_active_mm | |
+#define ipipe_get_active_mm() (current->active_mm) | |
+#endif | |
+ | |
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ | |
+ | |
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE | |
+ | |
+extern unsigned long long __ipipe_cs_freq; | |
+ | |
+extern struct clocksource *__ipipe_cs; | |
+ | |
+#endif /* CONFIG_IPIPE_WANT_CLOCKSOURCE */ | |
+ | |
+static inline bool __ipipe_hrclock_ok(void) | |
+{ | |
+ return __ipipe_hrclock_freq != 0; | |
+} | |
+ | |
+static inline void __ipipe_nmi_enter(void) | |
+{ | |
+ __this_cpu_write(ipipe_percpu.nmi_state, __ipipe_root_status); | |
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status); | |
+ ipipe_save_context_nmi(); | |
+} | |
+ | |
+static inline void __ipipe_nmi_exit(void) | |
+{ | |
+ ipipe_restore_context_nmi(); | |
+ if (!test_bit(IPIPE_STALL_FLAG, __this_cpu_ptr(&ipipe_percpu.nmi_state))) | |
+ __clear_bit(IPIPE_STALL_FLAG, &__ipipe_root_status); | |
+} | |
+ | |
+/* KVM-side calls, hw IRQs off. */ | |
+static inline void __ipipe_enter_vm(struct ipipe_vm_notifier *vmf) | |
+{ | |
+ struct ipipe_percpu_data *p; | |
+ | |
+ p = __this_cpu_ptr(&ipipe_percpu); | |
+ p->vm_notifier = vmf; | |
+ barrier(); | |
+} | |
+ | |
+static inline void __ipipe_exit_vm(void) | |
+{ | |
+ struct ipipe_percpu_data *p; | |
+ | |
+ p = __this_cpu_ptr(&ipipe_percpu); | |
+ p->vm_notifier = NULL; | |
+ barrier(); | |
+} | |
+ | |
+/* Client-side call, hw IRQs off. */ | |
+void __ipipe_notify_vm_preemption(void); | |
+ | |
+static inline void __ipipe_sync_pipeline(struct ipipe_domain *top) | |
+{ | |
+ if (__ipipe_current_domain != top) { | |
+ __ipipe_do_sync_pipeline(top); | |
+ return; | |
+ } | |
+ if (!test_bit(IPIPE_STALL_FLAG, &ipipe_this_cpu_context(top)->status)) | |
+ __ipipe_sync_stage(); | |
+} | |
+ | |
+void ipipe_register_head(struct ipipe_domain *ipd, | |
+ const char *name); | |
+ | |
+void ipipe_unregister_head(struct ipipe_domain *ipd); | |
+ | |
+int ipipe_request_irq(struct ipipe_domain *ipd, | |
+ unsigned int irq, | |
+ ipipe_irq_handler_t handler, | |
+ void *cookie, | |
+ ipipe_irq_ackfn_t ackfn); | |
+ | |
+void ipipe_free_irq(struct ipipe_domain *ipd, | |
+ unsigned int irq); | |
+ | |
+void ipipe_raise_irq(unsigned int irq); | |
+ | |
+void ipipe_set_hooks(struct ipipe_domain *ipd, | |
+ int enables); | |
+ | |
+unsigned int ipipe_alloc_virq(void); | |
+ | |
+void ipipe_free_virq(unsigned int virq); | |
+ | |
+static inline void ipipe_post_irq_head(unsigned int irq) | |
+{ | |
+ __ipipe_set_irq_pending(ipipe_head_domain, irq); | |
+} | |
+ | |
+static inline void ipipe_post_irq_root(unsigned int irq) | |
+{ | |
+ __ipipe_set_irq_pending(&ipipe_root, irq); | |
+} | |
+ | |
+static inline void ipipe_stall_head(void) | |
+{ | |
+ hard_local_irq_disable(); | |
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_head_status); | |
+} | |
+ | |
+static inline unsigned long ipipe_test_and_stall_head(void) | |
+{ | |
+ hard_local_irq_disable(); | |
+ return __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_head_status); | |
+} | |
+ | |
+static inline unsigned long ipipe_test_head(void) | |
+{ | |
+ unsigned long flags, ret; | |
+ | |
+ flags = hard_smp_local_irq_save(); | |
+ ret = test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status); | |
+ hard_smp_local_irq_restore(flags); | |
+ | |
+ return ret; | |
+} | |
+ | |
+void ipipe_unstall_head(void); | |
+ | |
+void __ipipe_restore_head(unsigned long x); | |
+ | |
+static inline void ipipe_restore_head(unsigned long x) | |
+{ | |
+ ipipe_check_irqoff(); | |
+ if ((x ^ test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status)) & 1) | |
+ __ipipe_restore_head(x); | |
+} | |
+ | |
+void __ipipe_post_work_root(struct ipipe_work_header *work); | |
+ | |
+#define ipipe_post_work_root(p, header) \ | |
+ do { \ | |
+ void header_not_at_start(void); \ | |
+ if (offsetof(typeof(*(p)), header)) { \ | |
+ header_not_at_start(); \ | |
+ } \ | |
+ __ipipe_post_work_root(&(p)->header); \ | |
+ } while (0) | |
+ | |
+int ipipe_get_sysinfo(struct ipipe_sysinfo *sysinfo); | |
+ | |
+unsigned long ipipe_critical_enter(void (*syncfn)(void)); | |
+ | |
+void ipipe_critical_exit(unsigned long flags); | |
+ | |
+void ipipe_prepare_panic(void); | |
+ | |
+static inline void ipipe_set_foreign_stack(struct ipipe_domain *ipd) | |
+{ | |
+ /* Must be called hw interrupts off. */ | |
+ __set_bit(IPIPE_NOSTACK_FLAG, &ipipe_this_cpu_context(ipd)->status); | |
+} | |
+ | |
+static inline void ipipe_clear_foreign_stack(struct ipipe_domain *ipd) | |
+{ | |
+ /* Must be called hw interrupts off. */ | |
+ __clear_bit(IPIPE_NOSTACK_FLAG, &ipipe_this_cpu_context(ipd)->status); | |
+} | |
+ | |
+static inline int ipipe_test_foreign_stack(void) | |
+{ | |
+ /* Must be called hw interrupts off. */ | |
+ return test_bit(IPIPE_NOSTACK_FLAG, &__ipipe_current_context->status); | |
+} | |
+ | |
+#ifndef ipipe_safe_current | |
+#define ipipe_safe_current() \ | |
+ ({ \ | |
+ struct task_struct *__p__; \ | |
+ unsigned long __flags__; \ | |
+ __flags__ = hard_smp_local_irq_save(); \ | |
+ __p__ = ipipe_test_foreign_stack() ? &init_task : current; \ | |
+ hard_smp_local_irq_restore(__flags__); \ | |
+ __p__; \ | |
+ }) | |
+#endif | |
+ | |
+#ifdef CONFIG_SMP | |
+void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask); | |
+void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask); | |
+#else /* !CONFIG_SMP */ | |
+static inline | |
+void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask) { } | |
+static inline void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask) { } | |
+#endif /* CONFIG_SMP */ | |
+ | |
+static inline void ipipe_restore_root_nosync(unsigned long x) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_smp_local_irq_save(); | |
+ __ipipe_restore_root_nosync(x); | |
+ hard_smp_local_irq_restore(flags); | |
+} | |
+ | |
+/* Must be called hw IRQs off. */ | |
+static inline void ipipe_lock_irq(unsigned int irq) | |
+{ | |
+ struct ipipe_domain *ipd = __ipipe_current_domain; | |
+ if (ipd == ipipe_root_domain) | |
+ __ipipe_lock_irq(irq); | |
+} | |
+ | |
+/* Must be called hw IRQs off. */ | |
+static inline void ipipe_unlock_irq(unsigned int irq) | |
+{ | |
+ struct ipipe_domain *ipd = __ipipe_current_domain; | |
+ if (ipd == ipipe_root_domain) | |
+ __ipipe_unlock_irq(irq); | |
+} | |
+ | |
+static inline struct ipipe_threadinfo *ipipe_current_threadinfo(void) | |
+{ | |
+ return ¤t_thread_info()->ipipe_data; | |
+} | |
+ | |
+#define ipipe_task_threadinfo(p) (&task_thread_info(p)->ipipe_data) | |
+ | |
+static inline void ipipe_enable_irq(unsigned int irq) | |
+{ | |
+ struct irq_desc *desc; | |
+ struct irq_chip *chip; | |
+ | |
+ desc = irq_to_desc(irq); | |
+ if (desc == NULL) | |
+ return; | |
+ | |
+ chip = irq_desc_get_chip(desc); | |
+ | |
+ if (WARN_ON_ONCE(chip->irq_enable == NULL && chip->irq_unmask == NULL)) | |
+ return; | |
+ | |
+ if (chip->irq_enable) | |
+ chip->irq_enable(&desc->irq_data); | |
+ else | |
+ chip->irq_unmask(&desc->irq_data); | |
+} | |
+ | |
+static inline void ipipe_disable_irq(unsigned int irq) | |
+{ | |
+ struct irq_desc *desc; | |
+ struct irq_chip *chip; | |
+ | |
+ desc = irq_to_desc(irq); | |
+ if (desc == NULL) | |
+ return; | |
+ | |
+ chip = irq_desc_get_chip(desc); | |
+ | |
+ if (WARN_ON_ONCE(chip->irq_disable == NULL && chip->irq_mask == NULL)) | |
+ return; | |
+ | |
+ if (chip->irq_disable) | |
+ chip->irq_disable(&desc->irq_data); | |
+ else | |
+ chip->irq_mask(&desc->irq_data); | |
+} | |
+ | |
+static inline void ipipe_end_irq(unsigned int irq) | |
+{ | |
+ struct irq_desc *desc = irq_to_desc(irq); | |
+ | |
+ if (desc) | |
+ desc->ipipe_end(irq, desc); | |
+} | |
+ | |
+static inline int ipipe_chained_irq_p(struct irq_desc *desc) | |
+{ | |
+ void __ipipe_chained_irq(unsigned irq, struct irq_desc *desc); | |
+ | |
+ return desc->handle_irq == __ipipe_chained_irq; | |
+} | |
+ | |
+static inline void ipipe_handle_demuxed_irq(unsigned int cascade_irq) | |
+{ | |
+ ipipe_trace_irq_entry(cascade_irq); | |
+ __ipipe_dispatch_irq(cascade_irq, IPIPE_IRQF_NOSYNC); | |
+ ipipe_trace_irq_exit(cascade_irq); | |
+} | |
+ | |
+#define ipipe_enable_notifier(p) \ | |
+ do { \ | |
+ barrier(); \ | |
+ (p)->ipipe.flags |= PF_EVNOTIFY; \ | |
+ } while (0) | |
+ | |
+#define ipipe_disable_notifier(p) \ | |
+ do { \ | |
+ barrier(); \ | |
+ (p)->ipipe.flags &= ~(PF_EVNOTIFY|PF_MAYDAY); \ | |
+ } while (0) | |
+ | |
+#define ipipe_notifier_enabled_p(p) \ | |
+ (((p)->ipipe.flags) & PF_EVNOTIFY) | |
+ | |
+#define ipipe_raise_mayday(p) \ | |
+ do { \ | |
+ ipipe_check_irqoff(); \ | |
+ if (ipipe_notifier_enabled_p(p)) \ | |
+ (p)->ipipe.flags |= PF_MAYDAY; \ | |
+ } while (0) | |
+ | |
+extern bool __ipipe_probe_access; | |
+ | |
+long ipipe_probe_kernel_read(void *dst, void *src, size_t size); | |
+long ipipe_probe_kernel_write(void *dst, void *src, size_t size); | |
+ | |
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || defined(CONFIG_PROVE_LOCKING) || \ | |
+ defined(CONFIG_PREEMPT_VOLUNTARY) || defined(CONFIG_IPIPE_DEBUG_CONTEXT) | |
+extern void __ipipe_uaccess_might_fault(void); | |
+#else | |
+#define __ipipe_uaccess_might_fault() might_fault() | |
+#endif | |
+ | |
+#include <linux/ipipe_compat.h> | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+#define __ipipe_root_p 1 | |
+#define ipipe_root_p 1 | |
+ | |
+static inline void __ipipe_complete_domain_migration(void) { } | |
+ | |
+static inline int __ipipe_switch_tail(void) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+static inline void __ipipe_nmi_enter(void) { } | |
+ | |
+static inline void __ipipe_nmi_exit(void) { } | |
+ | |
+#define ipipe_safe_current() current | |
+#define ipipe_processor_id() smp_processor_id() | |
+ | |
+static inline int ipipe_test_foreign_stack(void) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+static inline void ipipe_lock_irq(unsigned int irq) { } | |
+ | |
+static inline void ipipe_unlock_irq(unsigned int irq) { } | |
+ | |
+#define ipipe_probe_kernel_read(d, s, sz) probe_kernel_read(d, s, sz) | |
+#define ipipe_probe_kernel_write(d, s, sz) probe_kernel_write(d, s, sz) | |
+#define __ipipe_uaccess_might_fault() might_fault() | |
+ | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
+#endif /* !__LINUX_IPIPE_H */ | |
diff --git a/include/linux/ipipe_base.h b/include/linux/ipipe_base.h | |
new file mode 100644 | |
index 0000000..fc88b2e | |
--- /dev/null | |
+++ b/include/linux/ipipe_base.h | |
@@ -0,0 +1,392 @@ | |
+/* -*- linux-c -*- | |
+ * include/linux/ipipe_base.h | |
+ * | |
+ * Copyright (C) 2002-2012 Philippe Gerum. | |
+ * 2007 Jan Kiszka. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __LINUX_IPIPE_BASE_H | |
+#define __LINUX_IPIPE_BASE_H | |
+ | |
+struct kvm_vcpu; | |
+struct ipipe_vm_notifier; | |
+struct irq_desc; | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+#define IPIPE_CORE_APIREV CONFIG_IPIPE_CORE_APIREV | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT | |
+void ipipe_root_only(void); | |
+#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */ | |
+static inline void ipipe_root_only(void) { } | |
+#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */ | |
+ | |
+typedef void (*ipipe_irq_handler_t)(unsigned int irq, | |
+ void *cookie); | |
+ | |
+void ipipe_unstall_root(void); | |
+ | |
+void ipipe_restore_root(unsigned long x); | |
+ | |
+#include <asm/ipipe_base.h> | |
+#include <linux/compiler.h> | |
+ | |
+#ifndef IPIPE_NR_ROOT_IRQS | |
+#define IPIPE_NR_ROOT_IRQS NR_IRQS | |
+#endif /* !IPIPE_NR_ROOT_IRQS */ | |
+ | |
+#define __bpl_up(x) (((x)+(BITS_PER_LONG-1)) & ~(BITS_PER_LONG-1)) | |
+/* Number of virtual IRQs (must be a multiple of BITS_PER_LONG) */ | |
+#define IPIPE_NR_VIRQS BITS_PER_LONG | |
+/* First virtual IRQ # (must be aligned on BITS_PER_LONG) */ | |
+#define IPIPE_VIRQ_BASE __bpl_up(IPIPE_NR_XIRQS) | |
+/* Total number of IRQ slots */ | |
+#define IPIPE_NR_IRQS (IPIPE_VIRQ_BASE+IPIPE_NR_VIRQS) | |
+ | |
+static inline int ipipe_virtual_irq_p(unsigned int irq) | |
+{ | |
+ return irq >= IPIPE_VIRQ_BASE && irq < IPIPE_NR_IRQS; | |
+} | |
+ | |
+#define IPIPE_IRQ_LOMAPSZ (IPIPE_NR_IRQS / BITS_PER_LONG) | |
+#if IPIPE_IRQ_LOMAPSZ > BITS_PER_LONG | |
+/* | |
+ * We need a 3-level mapping. This allows us to handle up to 32k IRQ | |
+ * vectors on 32bit machines, 256k on 64bit ones. | |
+ */ | |
+#define __IPIPE_3LEVEL_IRQMAP 1 | |
+#define IPIPE_IRQ_MDMAPSZ (__bpl_up(IPIPE_IRQ_LOMAPSZ) / BITS_PER_LONG) | |
+#else | |
+/* | |
+ * 2-level mapping is enough. This allows us to handle up to 1024 IRQ | |
+ * vectors on 32bit machines, 4096 on 64bit ones. | |
+ */ | |
+#define __IPIPE_2LEVEL_IRQMAP 1 | |
+#endif | |
+ | |
+/* Per-cpu pipeline status */ | |
+#define IPIPE_STALL_FLAG 0 /* interrupts (virtually) disabled. */ | |
+#define IPIPE_NOSTACK_FLAG 1 /* running on foreign stack. */ | |
+#define IPIPE_STALL_MASK (1L << IPIPE_STALL_FLAG) | |
+#define IPIPE_NOSTACK_MASK (1L << IPIPE_NOSTACK_FLAG) | |
+ | |
+/* Interrupt control bits */ | |
+#define IPIPE_HANDLE_FLAG 0 | |
+#define IPIPE_STICKY_FLAG 1 | |
+#define IPIPE_LOCK_FLAG 2 | |
+#define IPIPE_HANDLE_MASK (1 << IPIPE_HANDLE_FLAG) | |
+#define IPIPE_STICKY_MASK (1 << IPIPE_STICKY_FLAG) | |
+#define IPIPE_LOCK_MASK (1 << IPIPE_LOCK_FLAG) | |
+ | |
+struct pt_regs; | |
+struct ipipe_domain; | |
+ | |
+struct ipipe_trap_data { | |
+ int exception; | |
+ struct pt_regs *regs; | |
+}; | |
+ | |
+#define IPIPE_KEVT_SCHEDULE 0 | |
+#define IPIPE_KEVT_SIGWAKE 1 | |
+#define IPIPE_KEVT_SETSCHED 2 | |
+#define IPIPE_KEVT_SETAFFINITY 3 | |
+#define IPIPE_KEVT_EXIT 4 | |
+#define IPIPE_KEVT_CLEANUP 5 | |
+#define IPIPE_KEVT_HOSTRT 6 | |
+ | |
+struct ipipe_vm_notifier { | |
+ void (*handler)(struct ipipe_vm_notifier *nfy); | |
+}; | |
+ | |
+void __ipipe_init_early(void); | |
+ | |
+void __ipipe_init(void); | |
+ | |
+#ifdef CONFIG_PROC_FS | |
+void __ipipe_init_proc(void); | |
+#ifdef CONFIG_IPIPE_TRACE | |
+void __ipipe_init_tracer(void); | |
+#else /* !CONFIG_IPIPE_TRACE */ | |
+static inline void __ipipe_init_tracer(void) { } | |
+#endif /* CONFIG_IPIPE_TRACE */ | |
+#else /* !CONFIG_PROC_FS */ | |
+static inline void __ipipe_init_proc(void) { } | |
+#endif /* CONFIG_PROC_FS */ | |
+ | |
+void __ipipe_restore_root_nosync(unsigned long x); | |
+ | |
+#define IPIPE_IRQF_NOACK 0x1 | |
+#define IPIPE_IRQF_NOSYNC 0x2 | |
+ | |
+void __ipipe_dispatch_irq(unsigned int irq, int flags); | |
+ | |
+void __ipipe_do_sync_stage(void); | |
+ | |
+void __ipipe_do_sync_pipeline(struct ipipe_domain *top); | |
+ | |
+void __ipipe_lock_irq(unsigned int irq); | |
+ | |
+void __ipipe_unlock_irq(unsigned int irq); | |
+ | |
+void __ipipe_do_critical_sync(unsigned int irq, void *cookie); | |
+ | |
+void __ipipe_ack_edge_irq(unsigned int irq, struct irq_desc *desc); | |
+ | |
+void __ipipe_nop_irq(unsigned int irq, struct irq_desc *desc); | |
+ | |
+static inline void __ipipe_idle(void) | |
+{ | |
+ ipipe_unstall_root(); | |
+} | |
+ | |
+#ifndef __ipipe_sync_check | |
+#define __ipipe_sync_check 1 | |
+#endif | |
+ | |
+static inline void __ipipe_sync_stage(void) | |
+{ | |
+ if (likely(__ipipe_sync_check)) | |
+ __ipipe_do_sync_stage(); | |
+} | |
+ | |
+#ifndef __ipipe_check_root_resched | |
+#ifdef CONFIG_PREEMPT | |
+#define __ipipe_check_root_resched() \ | |
+ (preempt_count() == 0 && need_resched()) | |
+#else | |
+#define __ipipe_check_root_resched() 0 | |
+#endif | |
+#endif | |
+ | |
+#ifndef __ipipe_run_irqtail | |
+#define __ipipe_run_irqtail(irq) do { } while(0) | |
+#endif | |
+ | |
+void __ipipe_flush_printk(unsigned int irq, void *cookie); | |
+ | |
+void __ipipe_pin_range_globally(unsigned long start, | |
+ unsigned long end); | |
+ | |
+#define hard_preempt_disable() \ | |
+ ({ \ | |
+ unsigned long __flags__; \ | |
+ __flags__ = hard_local_irq_save(); \ | |
+ if (__ipipe_root_p) \ | |
+ preempt_disable(); \ | |
+ __flags__; \ | |
+ }) | |
+ | |
+#define hard_preempt_enable(flags) \ | |
+ do { \ | |
+ if (__ipipe_root_p) { \ | |
+ preempt_enable_no_resched(); \ | |
+ hard_local_irq_restore(flags); \ | |
+ preempt_check_resched(); \ | |
+ } else \ | |
+ hard_local_irq_restore(flags); \ | |
+ } while (0) | |
+ | |
+#define __ipipe_get_cpu(flags) ({ (flags) = hard_preempt_disable(); ipipe_processor_id(); }) | |
+#define __ipipe_put_cpu(flags) hard_preempt_enable(flags) | |
+ | |
+int __ipipe_notify_syscall(struct pt_regs *regs); | |
+ | |
+int __ipipe_notify_trap(int exception, struct pt_regs *regs); | |
+ | |
+int __ipipe_notify_kevent(int event, void *data); | |
+ | |
+#define __ipipe_report_trap(exception, regs) \ | |
+ __ipipe_notify_trap(exception, regs) | |
+ | |
+#define __ipipe_report_sigwake(p) \ | |
+ do { \ | |
+ if (ipipe_notifier_enabled_p(p)) \ | |
+ __ipipe_notify_kevent(IPIPE_KEVT_SIGWAKE, p); \ | |
+ } while (0) | |
+ | |
+struct ipipe_cpu_migration_data { | |
+ struct task_struct *task; | |
+ int dest_cpu; | |
+}; | |
+ | |
+#define __ipipe_report_setaffinity(__p, __dest_cpu) \ | |
+ do { \ | |
+ struct ipipe_cpu_migration_data d = { \ | |
+ .task = (__p), \ | |
+ .dest_cpu = (__dest_cpu), \ | |
+ }; \ | |
+ if (ipipe_notifier_enabled_p(__p)) \ | |
+ __ipipe_notify_kevent(IPIPE_KEVT_SETAFFINITY, &d); \ | |
+ } while (0) | |
+ | |
+#define __ipipe_report_exit(p) \ | |
+ do { \ | |
+ if (ipipe_notifier_enabled_p(p)) \ | |
+ __ipipe_notify_kevent(IPIPE_KEVT_EXIT, p); \ | |
+ } while (0) | |
+ | |
+#define __ipipe_report_setsched(p) \ | |
+ do { \ | |
+ if (ipipe_notifier_enabled_p(p)) \ | |
+ __ipipe_notify_kevent(IPIPE_KEVT_SETSCHED, p); \ | |
+ } while (0) | |
+ | |
+#define __ipipe_report_schedule(prev, next) \ | |
+do { \ | |
+ if ((ipipe_notifier_enabled_p(next) || \ | |
+ ipipe_notifier_enabled_p(prev))) { \ | |
+ __this_cpu_write(ipipe_percpu.rqlock_owner, prev); \ | |
+ __ipipe_notify_kevent(IPIPE_KEVT_SCHEDULE, next); \ | |
+ } \ | |
+} while (0) | |
+ | |
+#define __ipipe_report_cleanup(mm) \ | |
+ __ipipe_notify_kevent(IPIPE_KEVT_CLEANUP, mm) | |
+ | |
+void __ipipe_notify_vm_preemption(void); | |
+ | |
+#define hard_cond_local_irq_enable() hard_local_irq_enable() | |
+#define hard_cond_local_irq_disable() hard_local_irq_disable() | |
+#define hard_cond_local_irq_save() hard_local_irq_save() | |
+#define hard_cond_local_irq_restore(flags) hard_local_irq_restore(flags) | |
+ | |
+struct ipipe_task_info { | |
+ unsigned long flags; | |
+}; | |
+ | |
+#ifdef CONFIG_IPIPE_LEGACY | |
+ | |
+#define IPIPE_FIRST_EVENT IPIPE_NR_FAULTS | |
+#define IPIPE_EVENT_SCHEDULE IPIPE_FIRST_EVENT | |
+#define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 1) | |
+#define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 2) | |
+#define IPIPE_EVENT_SETAFFINITY (IPIPE_FIRST_EVENT + 3) | |
+#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 4) | |
+#define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 5) | |
+#define IPIPE_EVENT_HOSTRT (IPIPE_FIRST_EVENT + 6) | |
+#define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT + 7) | |
+#define IPIPE_LAST_EVENT IPIPE_EVENT_SYSCALL | |
+#define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1) | |
+ | |
+typedef int (*ipipe_event_handler_t)(unsigned int event, | |
+ struct ipipe_domain *from, | |
+ void *data); | |
+struct ipipe_legacy_context { | |
+ unsigned int domid; | |
+ int priority; | |
+ void *pdd; | |
+ ipipe_event_handler_t handlers[IPIPE_NR_EVENTS]; | |
+}; | |
+ | |
+#define __ipipe_init_taskinfo(p) \ | |
+ do { \ | |
+ __ipipe_clear_taskflags(p); \ | |
+ memset(p->ptd, 0, sizeof(p->ptd)); \ | |
+ } while (0) | |
+ | |
+#else /* !CONFIG_IPIPE_LEGACY */ | |
+ | |
+struct ipipe_legacy_context { | |
+}; | |
+ | |
+#define __ipipe_init_taskinfo(p) \ | |
+ do { \ | |
+ __ipipe_clear_taskflags(p); \ | |
+ } while (0) | |
+ | |
+#endif /* !CONFIG_IPIPE_LEGACY */ | |
+ | |
+#define __ipipe_clear_taskflags(p) \ | |
+ do { \ | |
+ (p)->ipipe.flags = 0; \ | |
+ } while (0) | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+struct task_struct; | |
+struct mm_struct; | |
+ | |
+struct ipipe_task_info { | |
+}; | |
+ | |
+static inline void __ipipe_init_early(void) { } | |
+ | |
+static inline void __ipipe_init(void) { } | |
+ | |
+static inline void __ipipe_init_proc(void) { } | |
+ | |
+static inline void __ipipe_idle(void) { } | |
+ | |
+static inline void __ipipe_report_sigwake(struct task_struct *p) { } | |
+ | |
+static inline void __ipipe_report_setaffinity(struct task_struct *p, | |
+ int dest_cpu) { } | |
+ | |
+static inline void __ipipe_report_setsched(struct task_struct *p) { } | |
+ | |
+static inline void __ipipe_report_exit(struct task_struct *p) { } | |
+ | |
+static inline void __ipipe_report_cleanup(struct mm_struct *mm) { } | |
+ | |
+#define __ipipe_report_trap(exception, regs) 0 | |
+ | |
+static inline void __ipipe_init_taskinfo(struct task_struct *p) { } | |
+ | |
+static inline void __ipipe_clear_taskflags(struct task_struct *p) { } | |
+ | |
+static inline void __ipipe_pin_range_globally(unsigned long start, | |
+ unsigned long end) | |
+{ } | |
+ | |
+#define hard_preempt_disable() ({ preempt_disable(); 0; }) | |
+#define hard_preempt_enable(flags) ({ preempt_enable(); (void)(flags); }) | |
+ | |
+#define __ipipe_get_cpu(flags) ({ (void)(flags); get_cpu(); }) | |
+#define __ipipe_put_cpu(flags) \ | |
+ do { \ | |
+ (void)(flags); \ | |
+ put_cpu(); \ | |
+ } while (0) | |
+ | |
+#define __ipipe_root_tick_p(regs) 1 | |
+ | |
+#define ipipe_handle_demuxed_irq(irq) generic_handle_irq(irq) | |
+ | |
+#define __ipipe_serial_debug(fmt, args...) do { } while (0) | |
+ | |
+#define __ipipe_enter_vm(vmf) do { } while (0) | |
+ | |
+static inline void __ipipe_exit_vm(void) { } | |
+ | |
+static inline void __ipipe_notify_vm_preemption(void) { } | |
+ | |
+static inline void ipipe_root_only(void) { } | |
+ | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
+static inline void ipipe_preempt_root_only(void) | |
+{ | |
+#if defined(CONFIG_IPIPE_DEBUG_CONTEXT) && \ | |
+ defined(CONFIG_IPIPE_LEGACY) && \ | |
+ !defined(CONFIG_IPIPE_HAVE_SAFE_THREAD_INFO) | |
+ ipipe_root_only(); | |
+#endif | |
+} | |
+ | |
+#endif /* !__LINUX_IPIPE_BASE_H */ | |
diff --git a/include/linux/ipipe_compat.h b/include/linux/ipipe_compat.h | |
new file mode 100644 | |
index 0000000..fab8f45 | |
--- /dev/null | |
+++ b/include/linux/ipipe_compat.h | |
@@ -0,0 +1,284 @@ | |
+/* -*- linux-c -*- | |
+ * include/linux/ipipe_compat.h | |
+ * | |
+ * Copyright (C) 2012 Philippe Gerum. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __LINUX_IPIPE_COMPAT_H | |
+#define __LINUX_IPIPE_COMPAT_H | |
+ | |
+#ifndef __LINUX_IPIPE_H | |
+#error "Do not include this file directly, use linux/ipipe.h instead" | |
+#endif | |
+ | |
+#ifdef CONFIG_IPIPE_LEGACY | |
+ | |
+#define IPIPE_HEAD_PRIORITY (-1) | |
+#define IPIPE_ROOT_PRIO 100 | |
+#define IPIPE_ROOT_ID 0 | |
+#define IPIPE_ROOT_NPTDKEYS 4 | |
+ | |
+#define IPIPE_DUMMY_FLAG 31 | |
+#define IPIPE_WIRED_FLAG IPIPE_HANDLE_FLAG | |
+#define IPIPE_WIRED_MASK (1 << IPIPE_WIRED_FLAG) | |
+#define IPIPE_PASS_FLAG IPIPE_DUMMY_FLAG | |
+#define IPIPE_PASS_MASK (1 << IPIPE_PASS_FLAG) | |
+#define IPIPE_DYNAMIC_FLAG IPIPE_HANDLE_FLAG | |
+#define IPIPE_DYNAMIC_MASK (1 << IPIPE_DYNAMIC_FLAG) | |
+#define IPIPE_SYSTEM_FLAG IPIPE_DUMMY_FLAG | |
+#define IPIPE_SYSTEM_MASK (1 << IPIPE_SYSTEM_FLAG) | |
+#define IPIPE_EXCLUSIVE_FLAG IPIPE_DUMMY_FLAG | |
+#define IPIPE_EXCLUSIVE_MASK (1 << IPIPE_EXCLUSIVE_FLAG) | |
+ | |
+#define IPIPE_NR_CPUS NR_CPUS | |
+ | |
+#define IPIPE_EVENT_SELF 0x80000000 | |
+#define IPIPE_EVENT_RETURN IPIPE_TRAP_MAYDAY | |
+ | |
+#define TASK_ATOMICSWITCH TASK_HARDENING | |
+ | |
+struct ipipe_domain_attr { | |
+ unsigned int domid; | |
+ const char *name; | |
+ int priority; | |
+ void (*entry) (void); | |
+ void *pdd; | |
+}; | |
+ | |
+void ipipe_init_attr(struct ipipe_domain_attr *attr); | |
+ | |
+int ipipe_register_domain(struct ipipe_domain *ipd, | |
+ struct ipipe_domain_attr *attr); | |
+ | |
+int ipipe_unregister_domain(struct ipipe_domain *ipd); | |
+ | |
+int ipipe_alloc_ptdkey(void); | |
+ | |
+int ipipe_free_ptdkey(int key); | |
+ | |
+int ipipe_set_ptd(int key, void *value); | |
+ | |
+void *ipipe_get_ptd(int key); | |
+ | |
+int ipipe_virtualize_irq(struct ipipe_domain *ipd, | |
+ unsigned int irq, | |
+ ipipe_irq_handler_t handler, | |
+ void *cookie, | |
+ ipipe_irq_ackfn_t ackfn, | |
+ unsigned int modemask); | |
+ | |
+ipipe_event_handler_t ipipe_catch_event(struct ipipe_domain *ipd, | |
+ unsigned int event, | |
+ ipipe_event_handler_t handler); | |
+ | |
+int ipipe_setscheduler_root(struct task_struct *p, | |
+ int policy, | |
+ int prio); | |
+ | |
+static inline void ipipe_check_context(struct ipipe_domain *border_ipd) | |
+{ | |
+ ipipe_root_only(); | |
+} | |
+ | |
+static inline void ipipe_set_printk_sync(struct ipipe_domain *ipd) | |
+{ | |
+ ipipe_prepare_panic(); | |
+} | |
+ | |
+static inline void __ipipe_propagate_irq(unsigned int irq) | |
+{ | |
+ ipipe_post_irq_root(irq); | |
+} | |
+ | |
+static inline void __ipipe_schedule_irq_head(unsigned int irq) | |
+{ | |
+ ipipe_post_irq_head(irq); | |
+} | |
+ | |
+static inline void __ipipe_schedule_irq_root(unsigned int irq) | |
+{ | |
+ ipipe_post_irq_root(irq); | |
+} | |
+ | |
+static inline int ipipe_trigger_irq(unsigned int irq) | |
+{ | |
+ ipipe_raise_irq(irq); | |
+ return 1; | |
+} | |
+ | |
+static inline void ipipe_stall_pipeline_from(struct ipipe_domain *ipd) | |
+{ | |
+ if (ipd != ipipe_root_domain) | |
+ ipipe_stall_head(); | |
+ else | |
+ ipipe_stall_root(); | |
+} | |
+ | |
+static inline | |
+unsigned long ipipe_test_and_stall_pipeline_from(struct ipipe_domain *ipd) | |
+{ | |
+ if (ipd != ipipe_root_domain) | |
+ return ipipe_test_and_stall_head(); | |
+ | |
+ return ipipe_test_and_stall_root(); | |
+} | |
+ | |
+static inline | |
+void ipipe_unstall_pipeline_from(struct ipipe_domain *ipd) | |
+{ | |
+ if (ipd != ipipe_root_domain) | |
+ ipipe_unstall_head(); | |
+ else | |
+ ipipe_unstall_root(); | |
+} | |
+ | |
+static inline | |
+void ipipe_restore_pipeline_from(struct ipipe_domain *ipd, | |
+ unsigned long x) | |
+{ | |
+ if (ipd != ipipe_root_domain) | |
+ ipipe_restore_head(x); | |
+ else | |
+ ipipe_restore_root(x); | |
+} | |
+ | |
+static inline | |
+unsigned long ipipe_test_pipeline_from(struct ipipe_domain *ipd) | |
+{ | |
+ return test_bit(IPIPE_STALL_FLAG, &ipipe_this_cpu_context(ipd)->status); | |
+} | |
+ | |
+static inline void ipipe_stall_pipeline_head(void) | |
+{ | |
+ ipipe_stall_head(); | |
+} | |
+ | |
+static inline unsigned long ipipe_test_and_stall_pipeline_head(void) | |
+{ | |
+ return ipipe_test_and_stall_head(); | |
+} | |
+ | |
+static inline void ipipe_unstall_pipeline_head(void) | |
+{ | |
+ ipipe_unstall_head(); | |
+} | |
+ | |
+static inline void ipipe_restore_pipeline_head(unsigned long x) | |
+{ | |
+ ipipe_restore_head(x); | |
+} | |
+ | |
+static inline int ipipe_disable_ondemand_mappings(struct task_struct *p) | |
+{ | |
+ return __ipipe_disable_ondemand_mappings(p); | |
+} | |
+ | |
+static inline int ipipe_reenter_root(struct task_struct *prev, | |
+ int policy, | |
+ int prio) | |
+{ | |
+ __ipipe_reenter_root(); | |
+ return 0; | |
+} | |
+ | |
+static inline void ipipe_root_preempt_notify(void) | |
+{ | |
+ ipipe_notify_root_preemption(); | |
+} | |
+ | |
+#define ipipe_return_notify(p) ipipe_raise_mayday(p) | |
+ | |
+/* | |
+ * Keep the following as a macro, so that client code could check for | |
+ * the support of the invariant pipeline head optimization. | |
+ */ | |
+#define __ipipe_pipeline_head() ipipe_head_domain | |
+ | |
+static inline int irqs_disabled_hw(void) | |
+{ | |
+ return hard_irqs_disabled(); | |
+} | |
+ | |
+static inline void local_irq_disable_hw(void) | |
+{ | |
+ hard_local_irq_disable(); | |
+} | |
+ | |
+static inline void local_irq_enable_hw(void) | |
+{ | |
+ hard_local_irq_enable(); | |
+} | |
+ | |
+#define local_irq_save_hw(flags) \ | |
+ do { \ | |
+ (flags) = hard_local_irq_save(); \ | |
+ } while (0) | |
+ | |
+static inline void local_irq_restore_hw(unsigned long flags) | |
+{ | |
+ hard_local_irq_restore(flags); | |
+} | |
+ | |
+#define local_save_flags_hw(flags) \ | |
+ do { \ | |
+ (flags) = hard_local_save_flags(); \ | |
+ } while (0) | |
+ | |
+#define local_irq_save_hw_smp(flags) \ | |
+ do { \ | |
+ (flags) = hard_smp_local_irq_save(); \ | |
+ } while (0) | |
+#define local_irq_restore_hw_smp(flags) hard_smp_local_irq_restore(flags) | |
+ | |
+#define local_irq_save_hw_cond(flags) \ | |
+ do { \ | |
+ (flags) = hard_cond_local_irq_save(); \ | |
+ } while (0) | |
+#define local_irq_restore_hw_cond(flags) hard_cond_local_irq_restore(flags) | |
+ | |
+void __ipipe_legacy_init_stage(struct ipipe_domain *ipd); | |
+ | |
+/* | |
+ * These values have no real meaning from a versioning POV, however | |
+ * they are guaranteed to look more recent than any legacy patch | |
+ * release ever published in the past. | |
+ */ | |
+#define IPIPE_MAJOR_NUMBER 3 | |
+#define IPIPE_MINOR_NUMBER 0 | |
+#define IPIPE_PATCH_NUMBER 0 | |
+ | |
+#define __IPIPE_FEATURE_REQUEST_TICKDEV 1 | |
+#define __IPIPE_FEATURE_FASTPEND_IRQ 1 | |
+#define __IPIPE_FEATURE_TRACE_EVENT 1 | |
+#define __IPIPE_FEATURE_ENABLE_NOTIFIER 1 | |
+#define __IPIPE_FEATURE_PREPARE_PANIC 1 | |
+#define __IPIPE_FEATURE_SYSINFO_V2 1 | |
+#define __IPIPE_FEATURE_PIC_MUTE 1 | |
+#ifdef CONFIG_IPIPE_HAVE_VM_NOTIFIER | |
+#define __IPIPE_FEATURE_ROOTPREEMPT_NOTIFIER 1 | |
+#endif | |
+ | |
+#else /* !CONFIG_IPIPE_LEGACY */ | |
+ | |
+static inline void __ipipe_legacy_init_stage(struct ipipe_domain *ipd) | |
+{ | |
+} | |
+ | |
+#endif /* !CONFIG_IPIPE_LEGACY */ | |
+ | |
+#endif /* !__LINUX_IPIPE_COMPAT_H */ | |
diff --git a/include/linux/ipipe_debug.h b/include/linux/ipipe_debug.h | |
new file mode 100644 | |
index 0000000..b6bf8c6 | |
--- /dev/null | |
+++ b/include/linux/ipipe_debug.h | |
@@ -0,0 +1,98 @@ | |
+/* -*- linux-c -*- | |
+ * include/linux/ipipe_debug.h | |
+ * | |
+ * Copyright (C) 2012 Philippe Gerum <[email protected]>. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __LINUX_IPIPE_DEBUG_H | |
+#define __LINUX_IPIPE_DEBUG_H | |
+ | |
+#include <linux/ipipe_domain.h> | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT | |
+ | |
+#include <asm/bug.h> | |
+ | |
+static inline int ipipe_disable_context_check(void) | |
+{ | |
+ return xchg(__this_cpu_ptr(&ipipe_percpu.context_check), 0); | |
+} | |
+ | |
+static inline void ipipe_restore_context_check(int old_state) | |
+{ | |
+ __this_cpu_write(ipipe_percpu.context_check, old_state); | |
+} | |
+ | |
+static inline void ipipe_context_check_off(void) | |
+{ | |
+ int cpu; | |
+ for_each_online_cpu(cpu) | |
+ per_cpu(ipipe_percpu, cpu).context_check = 0; | |
+} | |
+ | |
+static inline void ipipe_save_context_nmi(void) | |
+{ | |
+ int state = ipipe_disable_context_check(); | |
+ __this_cpu_write(ipipe_percpu.context_check_saved, state); | |
+} | |
+ | |
+static inline void ipipe_restore_context_nmi(void) | |
+{ | |
+ ipipe_restore_context_check(__this_cpu_read(ipipe_percpu.context_check_saved)); | |
+} | |
+ | |
+#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */ | |
+ | |
+static inline int ipipe_disable_context_check(void) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+static inline void ipipe_restore_context_check(int old_state) { } | |
+ | |
+static inline void ipipe_context_check_off(void) { } | |
+ | |
+static inline void ipipe_save_context_nmi(void) { } | |
+ | |
+static inline void ipipe_restore_context_nmi(void) { } | |
+ | |
+#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */ | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG | |
+ | |
+#define ipipe_check_irqoff() \ | |
+ do { \ | |
+ if (WARN_ON_ONCE(!hard_irqs_disabled())) \ | |
+ hard_local_irq_disable(); \ | |
+ } while (0) | |
+ | |
+#else /* !CONFIG_IPIPE_DEBUG */ | |
+ | |
+static inline void ipipe_check_irqoff(void) { } | |
+ | |
+#endif /* !CONFIG_IPIPE_DEBUG */ | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL | |
+#define IPIPE_WARN(c) WARN_ON(c) | |
+#define IPIPE_WARN_ONCE(c) WARN_ON_ONCE(c) | |
+#else | |
+#define IPIPE_WARN(c) do { (void)(c); } while (0) | |
+#define IPIPE_WARN_ONCE(c) do { (void)(c); } while (0) | |
+#endif | |
+ | |
+#endif /* !__LINUX_IPIPE_DEBUG_H */ | |
diff --git a/include/linux/ipipe_domain.h b/include/linux/ipipe_domain.h | |
new file mode 100644 | |
index 0000000..1da5a32 | |
--- /dev/null | |
+++ b/include/linux/ipipe_domain.h | |
@@ -0,0 +1,311 @@ | |
+/* -*- linux-c -*- | |
+ * include/linux/ipipe_domain.h | |
+ * | |
+ * Copyright (C) 2007-2012 Philippe Gerum. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __LINUX_IPIPE_DOMAIN_H | |
+#define __LINUX_IPIPE_DOMAIN_H | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+#include <linux/mutex.h> | |
+#include <linux/percpu.h> | |
+#include <asm/ptrace.h> | |
+ | |
+struct task_struct; | |
+struct mm_struct; | |
+struct irq_desc; | |
+struct ipipe_vm_notifier; | |
+ | |
+#define __IPIPE_SYSCALL_P 0 | |
+#define __IPIPE_TRAP_P 1 | |
+#define __IPIPE_KEVENT_P 2 | |
+#define __IPIPE_SYSCALL_E (1 << __IPIPE_SYSCALL_P) | |
+#define __IPIPE_TRAP_E (1 << __IPIPE_TRAP_P) | |
+#define __IPIPE_KEVENT_E (1 << __IPIPE_KEVENT_P) | |
+#define __IPIPE_ALL_E 0x7 | |
+#define __IPIPE_SYSCALL_R (8 << __IPIPE_SYSCALL_P) | |
+#define __IPIPE_TRAP_R (8 << __IPIPE_TRAP_P) | |
+#define __IPIPE_KEVENT_R (8 << __IPIPE_KEVENT_P) | |
+#define __IPIPE_SHIFT_R 3 | |
+#define __IPIPE_ALL_R (__IPIPE_ALL_E << __IPIPE_SHIFT_R) | |
+ | |
+typedef void (*ipipe_irq_ackfn_t)(unsigned int irq, struct irq_desc *desc); | |
+ | |
+struct ipipe_domain { | |
+ int context_offset; | |
+ struct ipipe_irqdesc { | |
+ unsigned long control; | |
+ ipipe_irq_ackfn_t ackfn; | |
+ ipipe_irq_handler_t handler; | |
+ void *cookie; | |
+ } ____cacheline_aligned irqs[IPIPE_NR_IRQS]; | |
+ const char *name; | |
+ struct mutex mutex; | |
+ struct ipipe_legacy_context legacy; | |
+}; | |
+ | |
+static inline void * | |
+__ipipe_irq_cookie(struct ipipe_domain *ipd, unsigned int irq) | |
+{ | |
+ return ipd->irqs[irq].cookie; | |
+} | |
+ | |
+static inline ipipe_irq_handler_t | |
+__ipipe_irq_handler(struct ipipe_domain *ipd, unsigned int irq) | |
+{ | |
+ return ipd->irqs[irq].handler; | |
+} | |
+ | |
+extern struct ipipe_domain ipipe_root; | |
+ | |
+#define ipipe_root_domain (&ipipe_root) | |
+ | |
+extern struct ipipe_domain *ipipe_head_domain; | |
+ | |
+struct ipipe_percpu_domain_data { | |
+ unsigned long status; /* <= Must be first in struct. */ | |
+ unsigned long irqpend_himap; | |
+#ifdef __IPIPE_3LEVEL_IRQMAP | |
+ unsigned long irqpend_mdmap[IPIPE_IRQ_MDMAPSZ]; | |
+#endif | |
+ unsigned long irqpend_lomap[IPIPE_IRQ_LOMAPSZ]; | |
+ unsigned long irqheld_map[IPIPE_IRQ_LOMAPSZ]; | |
+ unsigned long irqall[IPIPE_NR_IRQS]; | |
+ struct ipipe_domain *domain; | |
+ int coflags; | |
+}; | |
+ | |
+struct ipipe_percpu_data { | |
+ struct ipipe_percpu_domain_data root; | |
+ struct ipipe_percpu_domain_data head; | |
+ struct ipipe_percpu_domain_data *curr; | |
+ struct pt_regs tick_regs; | |
+ int hrtimer_irq; | |
+ struct task_struct *task_hijacked; | |
+ struct task_struct *rqlock_owner; | |
+ struct ipipe_vm_notifier *vm_notifier; | |
+ unsigned long nmi_state; | |
+#ifdef CONFIG_IPIPE_WANT_ACTIVE_MM | |
+ struct mm_struct *active_mm; | |
+#endif | |
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT | |
+ int context_check; | |
+ int context_check_saved; | |
+#endif | |
+}; | |
+ | |
+/* | |
+ * CAREFUL: all accessors based on __ipipe_this_cpu_ptr() you may find | |
+ * in this file should be used only while hw interrupts are off, to | |
+ * prevent from CPU migration regardless of the running domain. | |
+ */ | |
+DECLARE_PER_CPU(struct ipipe_percpu_data, ipipe_percpu); | |
+ | |
+static inline struct ipipe_percpu_domain_data * | |
+__context_of(struct ipipe_percpu_data *p, struct ipipe_domain *ipd) | |
+{ | |
+ return (void *)p + ipd->context_offset; | |
+} | |
+ | |
+/** | |
+ * ipipe_percpu_context - return the address of the pipeline context | |
+ * data for a domain on a given CPU. | |
+ * | |
+ * NOTE: this is the slowest accessor, use it carefully. Prefer | |
+ * ipipe_this_cpu_context() for requests targeted at the current | |
+ * CPU. Additionally, if the target domain is known at build time, | |
+ * consider ipipe_this_cpu_{root, head}_context(). | |
+ */ | |
+static inline struct ipipe_percpu_domain_data * | |
+ipipe_percpu_context(struct ipipe_domain *ipd, int cpu) | |
+{ | |
+ return __context_of(&per_cpu(ipipe_percpu, cpu), ipd); | |
+} | |
+ | |
+/** | |
+ * ipipe_this_cpu_context - return the address of the pipeline context | |
+ * data for a domain on the current CPU. hw IRQs must be off. | |
+ * | |
+ * NOTE: this accessor is a bit faster, but since we don't know which | |
+ * one of "root" or "head" ipd refers to, we still need to compute the | |
+ * context address from its offset. | |
+ */ | |
+static inline struct ipipe_percpu_domain_data * | |
+ipipe_this_cpu_context(struct ipipe_domain *ipd) | |
+{ | |
+ return __context_of(__ipipe_this_cpu_ptr(&ipipe_percpu), ipd); | |
+} | |
+ | |
+/** | |
+ * ipipe_this_cpu_root_context - return the address of the pipeline | |
+ * context data for the root domain on the current CPU. hw IRQs must | |
+ * be off. | |
+ * | |
+ * NOTE: this accessor is recommended when the domain we refer to is | |
+ * known at build time to be the root one. | |
+ */ | |
+static inline struct ipipe_percpu_domain_data * | |
+ipipe_this_cpu_root_context(void) | |
+{ | |
+ return __ipipe_this_cpu_ptr(&ipipe_percpu.root); | |
+} | |
+ | |
+/** | |
+ * ipipe_this_cpu_head_context - return the address of the pipeline | |
+ * context data for the registered head domain on the current CPU. hw | |
+ * IRQs must be off. | |
+ * | |
+ * NOTE: this accessor is recommended when the domain we refer to is | |
+ * known at build time to be the registered head domain. This address | |
+ * is always different from the context data of the root domain in | |
+ * absence of registered head domain. To get the address of the | |
+ * context data for the domain leading the pipeline at the time of the | |
+ * call (which may be root in absence of registered head domain), use | |
+ * ipipe_this_cpu_leading_context() instead. | |
+ */ | |
+static inline struct ipipe_percpu_domain_data * | |
+ipipe_this_cpu_head_context(void) | |
+{ | |
+ return __ipipe_this_cpu_ptr(&ipipe_percpu.head); | |
+} | |
+ | |
+/** | |
+ * ipipe_this_cpu_leading_context - return the address of the pipeline | |
+ * context data for the domain leading the pipeline on the current | |
+ * CPU. hw IRQs must be off. | |
+ * | |
+ * NOTE: this accessor is required when either root or a registered | |
+ * head domain may be the final target of this call, depending on | |
+ * whether the high priority domain was installed via | |
+ * ipipe_register_head(). | |
+ */ | |
+static inline struct ipipe_percpu_domain_data * | |
+ipipe_this_cpu_leading_context(void) | |
+{ | |
+ return ipipe_this_cpu_context(ipipe_head_domain); | |
+} | |
+ | |
+/** | |
+ * __ipipe_get_current_context() - return the address of the pipeline | |
+ * context data of the domain running on the current CPU. hw IRQs must | |
+ * be off. | |
+ */ | |
+static inline struct ipipe_percpu_domain_data *__ipipe_get_current_context(void) | |
+{ | |
+ return __ipipe_this_cpu_read(ipipe_percpu.curr); | |
+} | |
+ | |
+#define __ipipe_current_context __ipipe_get_current_context() | |
+ | |
+/** | |
+ * __ipipe_set_current_context() - switch the current CPU to the | |
+ * specified domain context. hw IRQs must be off. | |
+ * | |
+ * NOTE: this is the only way to change the current domain for the | |
+ * current CPU. Don't bypass. | |
+ */ | |
+static inline | |
+void __ipipe_set_current_context(struct ipipe_percpu_domain_data *pd) | |
+{ | |
+ struct ipipe_percpu_data *p; | |
+ p = __ipipe_this_cpu_ptr(&ipipe_percpu); | |
+ p->curr = pd; | |
+} | |
+ | |
+/** | |
+ * __ipipe_set_current_domain() - switch the current CPU to the | |
+ * specified domain. This is equivalent to calling | |
+ * __ipipe_set_current_context() with the context data of that | |
+ * domain. hw IRQs must be off. | |
+ */ | |
+static inline void __ipipe_set_current_domain(struct ipipe_domain *ipd) | |
+{ | |
+ struct ipipe_percpu_data *p; | |
+ p = __ipipe_this_cpu_ptr(&ipipe_percpu); | |
+ p->curr = __context_of(p, ipd); | |
+} | |
+ | |
+static inline struct ipipe_percpu_domain_data *ipipe_current_context(void) | |
+{ | |
+ struct ipipe_percpu_domain_data *pd; | |
+ unsigned long flags; | |
+ | |
+ flags = hard_smp_local_irq_save(); | |
+ pd = __ipipe_get_current_context(); | |
+ hard_smp_local_irq_restore(flags); | |
+ | |
+ return pd; | |
+} | |
+ | |
+static inline struct ipipe_domain *__ipipe_get_current_domain(void) | |
+{ | |
+ return __ipipe_get_current_context()->domain; | |
+} | |
+ | |
+#define __ipipe_current_domain __ipipe_get_current_domain() | |
+ | |
+/** | |
+ * __ipipe_get_current_domain() - return the address of the pipeline | |
+ * domain running on the current CPU. hw IRQs must be off. | |
+ */ | |
+static inline struct ipipe_domain *ipipe_get_current_domain(void) | |
+{ | |
+ struct ipipe_domain *ipd; | |
+ unsigned long flags; | |
+ | |
+ flags = hard_smp_local_irq_save(); | |
+ ipd = __ipipe_get_current_domain(); | |
+ hard_smp_local_irq_restore(flags); | |
+ | |
+ return ipd; | |
+} | |
+ | |
+#define ipipe_current_domain ipipe_get_current_domain() | |
+ | |
+#define __ipipe_root_p (__ipipe_current_domain == ipipe_root_domain) | |
+#define ipipe_root_p (ipipe_current_domain == ipipe_root_domain) | |
+ | |
+#ifdef CONFIG_SMP | |
+#define __ipipe_root_status (ipipe_this_cpu_root_context()->status) | |
+#else | |
+extern unsigned long __ipipe_root_status; | |
+#endif | |
+ | |
+#define __ipipe_head_status (ipipe_this_cpu_head_context()->status) | |
+ | |
+/** | |
+ * __ipipe_ipending_p() - Whether we have interrupts pending | |
+ * (i.e. logged) for the given domain context on the current CPU. hw | |
+ * IRQs must be off. | |
+ */ | |
+static inline int __ipipe_ipending_p(struct ipipe_percpu_domain_data *pd) | |
+{ | |
+ return pd->irqpend_himap != 0; | |
+} | |
+ | |
+static inline unsigned long | |
+__ipipe_cpudata_irq_hits(struct ipipe_domain *ipd, int cpu, unsigned int irq) | |
+{ | |
+ return ipipe_percpu_context(ipd, cpu)->irqall[irq]; | |
+} | |
+ | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
+#endif /* !__LINUX_IPIPE_DOMAIN_H */ | |
diff --git a/include/linux/ipipe_lock.h b/include/linux/ipipe_lock.h | |
new file mode 100644 | |
index 0000000..7440412 | |
--- /dev/null | |
+++ b/include/linux/ipipe_lock.h | |
@@ -0,0 +1,257 @@ | |
+/* -*- linux-c -*- | |
+ * include/linux/ipipe_lock.h | |
+ * | |
+ * Copyright (C) 2009 Philippe Gerum. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __LINUX_IPIPE_LOCK_H | |
+#define __LINUX_IPIPE_LOCK_H | |
+ | |
+typedef struct { | |
+ arch_spinlock_t arch_lock; | |
+} __ipipe_spinlock_t; | |
+ | |
+#define ipipe_spinlock_p(lock) \ | |
+ __builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t *) | |
+ | |
+#define std_spinlock_raw_p(lock) \ | |
+ __builtin_types_compatible_p(typeof(lock), raw_spinlock_t *) | |
+ | |
+#define std_spinlock_p(lock) \ | |
+ __builtin_types_compatible_p(typeof(lock), spinlock_t *) | |
+ | |
+#define ipipe_spinlock(lock) ((__ipipe_spinlock_t *)(lock)) | |
+#define std_spinlock_raw(lock) ((raw_spinlock_t *)(lock)) | |
+#define std_spinlock(lock) ((spinlock_t *)(lock)) | |
+ | |
+#define PICK_SPINLOCK_IRQSAVE(lock, flags) \ | |
+ do { \ | |
+ if (ipipe_spinlock_p(lock)) \ | |
+ (flags) = __ipipe_spin_lock_irqsave(ipipe_spinlock(lock)); \ | |
+ else if (std_spinlock_raw_p(lock)) \ | |
+ __real_raw_spin_lock_irqsave(std_spinlock_raw(lock), flags); \ | |
+ else if (std_spinlock_p(lock)) \ | |
+ __real_raw_spin_lock_irqsave(&std_spinlock(lock)->rlock, flags); \ | |
+ else __bad_lock_type(); \ | |
+ } while (0) | |
+ | |
+#define PICK_SPINTRYLOCK_IRQSAVE(lock, flags) \ | |
+ ({ \ | |
+ int __ret__; \ | |
+ if (ipipe_spinlock_p(lock)) \ | |
+ __ret__ = __ipipe_spin_trylock_irqsave(ipipe_spinlock(lock), &(flags)); \ | |
+ else if (std_spinlock_raw_p(lock)) \ | |
+ __ret__ = __real_raw_spin_trylock_irqsave(std_spinlock_raw(lock), flags); \ | |
+ else if (std_spinlock_p(lock)) \ | |
+ __ret__ = __real_raw_spin_trylock_irqsave(&std_spinlock(lock)->rlock, flags); \ | |
+ else __bad_lock_type(); \ | |
+ __ret__; \ | |
+ }) | |
+ | |
+#define PICK_SPINTRYLOCK_IRQ(lock) \ | |
+ ({ \ | |
+ int __ret__; \ | |
+ if (ipipe_spinlock_p(lock)) \ | |
+ __ret__ = __ipipe_spin_trylock_irq(ipipe_spinlock(lock)); \ | |
+ else if (std_spinlock_raw_p(lock)) \ | |
+ __ret__ = __real_raw_spin_trylock_irq(std_spinlock_raw(lock)); \ | |
+ else if (std_spinlock_p(lock)) \ | |
+ __ret__ = __real_raw_spin_trylock_irq(&std_spinlock(lock)->rlock); \ | |
+ else __bad_lock_type(); \ | |
+ __ret__; \ | |
+ }) | |
+ | |
+#define PICK_SPINUNLOCK_IRQRESTORE(lock, flags) \ | |
+ do { \ | |
+ if (ipipe_spinlock_p(lock)) \ | |
+ __ipipe_spin_unlock_irqrestore(ipipe_spinlock(lock), flags); \ | |
+ else { \ | |
+ __ipipe_spin_unlock_debug(flags); \ | |
+ if (std_spinlock_raw_p(lock)) \ | |
+ __real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \ | |
+ else if (std_spinlock_p(lock)) \ | |
+ __real_raw_spin_unlock_irqrestore(&std_spinlock(lock)->rlock, flags); \ | |
+ } \ | |
+ } while (0) | |
+ | |
+#define PICK_SPINOP(op, lock) \ | |
+ ({ \ | |
+ if (ipipe_spinlock_p(lock)) \ | |
+ arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \ | |
+ else if (std_spinlock_raw_p(lock)) \ | |
+ __real_raw_spin##op(std_spinlock_raw(lock)); \ | |
+ else if (std_spinlock_p(lock)) \ | |
+ __real_raw_spin##op(&std_spinlock(lock)->rlock); \ | |
+ else __bad_lock_type(); \ | |
+ (void)0; \ | |
+ }) | |
+ | |
+#define PICK_SPINOP_RET(op, lock, type) \ | |
+ ({ \ | |
+ type __ret__; \ | |
+ if (ipipe_spinlock_p(lock)) \ | |
+ __ret__ = arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \ | |
+ else if (std_spinlock_raw_p(lock)) \ | |
+ __ret__ = __real_raw_spin##op(std_spinlock_raw(lock)); \ | |
+ else if (std_spinlock_p(lock)) \ | |
+ __ret__ = __real_raw_spin##op(&std_spinlock(lock)->rlock); \ | |
+ else { __ret__ = -1; __bad_lock_type(); } \ | |
+ __ret__; \ | |
+ }) | |
+ | |
+#define arch_spin_lock_init(lock) \ | |
+ do { \ | |
+ IPIPE_DEFINE_SPINLOCK(__lock__); \ | |
+ *((ipipe_spinlock_t *)lock) = __lock__; \ | |
+ } while (0) | |
+ | |
+#define arch_spin_lock_irq(lock) \ | |
+ do { \ | |
+ hard_local_irq_disable(); \ | |
+ arch_spin_lock(lock); \ | |
+ } while (0) | |
+ | |
+#define arch_spin_unlock_irq(lock) \ | |
+ do { \ | |
+ arch_spin_unlock(lock); \ | |
+ hard_local_irq_enable(); \ | |
+ } while (0) | |
+ | |
+typedef struct { | |
+ arch_rwlock_t arch_lock; | |
+} __ipipe_rwlock_t; | |
+ | |
+#define ipipe_rwlock_p(lock) \ | |
+ __builtin_types_compatible_p(typeof(lock), __ipipe_rwlock_t *) | |
+ | |
+#define std_rwlock_p(lock) \ | |
+ __builtin_types_compatible_p(typeof(lock), rwlock_t *) | |
+ | |
+#define ipipe_rwlock(lock) ((__ipipe_rwlock_t *)(lock)) | |
+#define std_rwlock(lock) ((rwlock_t *)(lock)) | |
+ | |
+#define PICK_RWOP(op, lock) \ | |
+ do { \ | |
+ if (ipipe_rwlock_p(lock)) \ | |
+ arch##op(&ipipe_rwlock(lock)->arch_lock); \ | |
+ else if (std_rwlock_p(lock)) \ | |
+ _raw##op(std_rwlock(lock)); \ | |
+ else __bad_lock_type(); \ | |
+ } while (0) | |
+ | |
+extern int __bad_lock_type(void); | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+#define ipipe_spinlock_t __ipipe_spinlock_t | |
+#define IPIPE_DEFINE_RAW_SPINLOCK(x) ipipe_spinlock_t x = IPIPE_SPIN_LOCK_UNLOCKED | |
+#define IPIPE_DECLARE_RAW_SPINLOCK(x) extern ipipe_spinlock_t x | |
+#define IPIPE_DEFINE_SPINLOCK(x) IPIPE_DEFINE_RAW_SPINLOCK(x) | |
+#define IPIPE_DECLARE_SPINLOCK(x) IPIPE_DECLARE_RAW_SPINLOCK(x) | |
+ | |
+#define IPIPE_SPIN_LOCK_UNLOCKED \ | |
+ (__ipipe_spinlock_t) { .arch_lock = __ARCH_SPIN_LOCK_UNLOCKED } | |
+ | |
+#define spin_lock_irqsave_cond(lock, flags) \ | |
+ spin_lock_irqsave(lock, flags) | |
+ | |
+#define spin_unlock_irqrestore_cond(lock, flags) \ | |
+ spin_unlock_irqrestore(lock, flags) | |
+ | |
+#define raw_spin_lock_irqsave_cond(lock, flags) \ | |
+ raw_spin_lock_irqsave(lock, flags) | |
+ | |
+#define raw_spin_unlock_irqrestore_cond(lock, flags) \ | |
+ raw_spin_unlock_irqrestore(lock, flags) | |
+ | |
+void __ipipe_spin_lock_irq(ipipe_spinlock_t *lock); | |
+ | |
+int __ipipe_spin_trylock_irq(ipipe_spinlock_t *lock); | |
+ | |
+void __ipipe_spin_unlock_irq(ipipe_spinlock_t *lock); | |
+ | |
+unsigned long __ipipe_spin_lock_irqsave(ipipe_spinlock_t *lock); | |
+ | |
+int __ipipe_spin_trylock_irqsave(ipipe_spinlock_t *lock, | |
+ unsigned long *x); | |
+ | |
+void __ipipe_spin_unlock_irqrestore(ipipe_spinlock_t *lock, | |
+ unsigned long x); | |
+ | |
+void __ipipe_spin_unlock_irqbegin(ipipe_spinlock_t *lock); | |
+ | |
+void __ipipe_spin_unlock_irqcomplete(unsigned long x); | |
+ | |
+#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP) | |
+void __ipipe_spin_unlock_debug(unsigned long flags); | |
+#else | |
+#define __ipipe_spin_unlock_debug(flags) do { } while (0) | |
+#endif | |
+ | |
+#define ipipe_rwlock_t __ipipe_rwlock_t | |
+#define IPIPE_DEFINE_RWLOCK(x) ipipe_rwlock_t x = IPIPE_RW_LOCK_UNLOCKED | |
+#define IPIPE_DECLARE_RWLOCK(x) extern ipipe_rwlock_t x | |
+ | |
+#define IPIPE_RW_LOCK_UNLOCKED \ | |
+ (__ipipe_rwlock_t) { .arch_lock = __ARCH_RW_LOCK_UNLOCKED } | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+#define ipipe_spinlock_t spinlock_t | |
+#define IPIPE_DEFINE_SPINLOCK(x) DEFINE_SPINLOCK(x) | |
+#define IPIPE_DECLARE_SPINLOCK(x) extern spinlock_t x | |
+#define IPIPE_SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(unknown) | |
+#define IPIPE_DEFINE_RAW_SPINLOCK(x) DEFINE_RAW_SPINLOCK(x) | |
+#define IPIPE_DECLARE_RAW_SPINLOCK(x) extern raw_spinlock_t x | |
+ | |
+#define spin_lock_irqsave_cond(lock, flags) \ | |
+ do { \ | |
+ (void)(flags); \ | |
+ spin_lock(lock); \ | |
+ } while(0) | |
+ | |
+#define spin_unlock_irqrestore_cond(lock, flags) \ | |
+ spin_unlock(lock) | |
+ | |
+#define raw_spin_lock_irqsave_cond(lock, flags) \ | |
+ do { \ | |
+ (void)(flags); \ | |
+ raw_spin_lock(lock); \ | |
+ } while(0) | |
+ | |
+#define raw_spin_unlock_irqrestore_cond(lock, flags) \ | |
+ raw_spin_unlock(lock) | |
+ | |
+#define __ipipe_spin_lock_irq(lock) do { } while (0) | |
+#define __ipipe_spin_unlock_irq(lock) do { } while (0) | |
+#define __ipipe_spin_lock_irqsave(lock) 0 | |
+#define __ipipe_spin_trylock_irq(lock) 1 | |
+#define __ipipe_spin_trylock_irqsave(lock, x) ({ (void)(x); 1; }) | |
+#define __ipipe_spin_unlock_irqrestore(lock, x) do { (void)(x); } while (0) | |
+#define __ipipe_spin_unlock_irqbegin(lock) do { } while (0) | |
+#define __ipipe_spin_unlock_irqcomplete(x) do { (void)(x); } while (0) | |
+#define __ipipe_spin_unlock_debug(flags) do { } while (0) | |
+ | |
+#define ipipe_rwlock_t rwlock_t | |
+#define IPIPE_DEFINE_RWLOCK(x) DEFINE_RWLOCK(x) | |
+#define IPIPE_DECLARE_RWLOCK(x) extern rwlock_t x | |
+#define IPIPE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED | |
+ | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
+#endif /* !__LINUX_IPIPE_LOCK_H */ | |
diff --git a/include/linux/ipipe_tickdev.h b/include/linux/ipipe_tickdev.h | |
new file mode 100644 | |
index 0000000..5b4adbf | |
--- /dev/null | |
+++ b/include/linux/ipipe_tickdev.h | |
@@ -0,0 +1,145 @@ | |
+/* -*- linux-c -*- | |
+ * include/linux/ipipe_tickdev.h | |
+ * | |
+ * Copyright (C) 2007 Philippe Gerum. | |
+ * Copyright (C) 2012 Gilles Chanteperdrix | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef __LINUX_IPIPE_TICKDEV_H | |
+#define __LINUX_IPIPE_TICKDEV_H | |
+ | |
+#include <linux/list.h> | |
+#include <linux/cpumask.h> | |
+#include <linux/clockchips.h> | |
+#include <linux/ipipe_domain.h> | |
+#include <linux/clocksource.h> | |
+#include <linux/timekeeper_internal.h> | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+enum clock_event_mode; | |
+struct clock_event_device; | |
+ | |
+struct ipipe_hostrt_data { | |
+ short live; | |
+ seqcount_t seqcount; | |
+ time_t wall_time_sec; | |
+ u32 wall_time_nsec; | |
+ struct timespec wall_to_monotonic; | |
+ cycle_t cycle_last; | |
+ cycle_t mask; | |
+ u32 mult; | |
+ u32 shift; | |
+}; | |
+ | |
+struct ipipe_timer { | |
+ int irq; | |
+ void (*request)(struct ipipe_timer *timer, int steal); | |
+ int (*set)(unsigned long ticks, void *timer); | |
+ void (*ack)(void); | |
+ void (*release)(struct ipipe_timer *timer); | |
+ | |
+ /* Only if registering a timer directly */ | |
+ const char *name; | |
+ unsigned rating; | |
+ unsigned long freq; | |
+ unsigned min_delay_ticks; | |
+ const struct cpumask *cpumask; | |
+ | |
+ /* For internal use */ | |
+ void *timer_set; /* pointer passed to ->set() callback */ | |
+ struct clock_event_device *host_timer; | |
+ struct list_head link; | |
+ | |
+ /* Conversions between clock frequency and timer frequency */ | |
+ unsigned c2t_integ; | |
+ unsigned c2t_frac; | |
+ | |
+ /* For clockevent interception */ | |
+ u32 real_mult; | |
+ u32 real_shift; | |
+ void (*real_set_mode)(enum clock_event_mode mode, | |
+ struct clock_event_device *cdev); | |
+ int (*real_set_next_event)(unsigned long evt, | |
+ struct clock_event_device *cdev); | |
+}; | |
+ | |
+#define __ipipe_hrtimer_irq __ipipe_this_cpu_read(ipipe_percpu.hrtimer_irq) | |
+ | |
+extern unsigned long __ipipe_hrtimer_freq; | |
+ | |
+/* | |
+ * Called by clockevents_register_device, to register a piggybacked | |
+ * ipipe timer, if there is one | |
+ */ | |
+void ipipe_host_timer_register(struct clock_event_device *clkevt); | |
+ | |
+/* | |
+ * Register a standalone ipipe timer | |
+ */ | |
+void ipipe_timer_register(struct ipipe_timer *timer); | |
+ | |
+/* | |
+ * Chooses the best timer for each cpu. Take over its handling. | |
+ */ | |
+int ipipe_select_timers(const struct cpumask *mask); | |
+ | |
+/* | |
+ * Release the per-cpu timers | |
+ */ | |
+void ipipe_timers_release(void); | |
+ | |
+/* | |
+ * Start handling the per-cpu timer irq, and intercepting the linux clockevent | |
+ * device callbacks. | |
+ */ | |
+int ipipe_timer_start(void (*tick_handler)(void), | |
+ void (*emumode)(enum clock_event_mode mode, | |
+ struct clock_event_device *cdev), | |
+ int (*emutick)(unsigned long evt, | |
+ struct clock_event_device *cdev), | |
+ unsigned cpu); | |
+ | |
+/* | |
+ * Stop handling a per-cpu timer | |
+ */ | |
+void ipipe_timer_stop(unsigned cpu); | |
+ | |
+/* | |
+ * Program the timer | |
+ */ | |
+void ipipe_timer_set(unsigned long delay); | |
+ | |
+const char *ipipe_timer_name(void); | |
+ | |
+unsigned ipipe_timer_ns2ticks(struct ipipe_timer *timer, unsigned ns); | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+#define ipipe_host_timer_register(clkevt) do { } while (0) | |
+ | |
+#endif /* !CONFIG_IPIPE */ | |
+ | |
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT | |
+void ipipe_update_hostrt(struct timekeeper *tk); | |
+#else | |
+static inline void | |
+ipipe_update_hostrt(struct timekeeper *tk) {} | |
+#endif | |
+ | |
+#endif /* __LINUX_IPIPE_TICKDEV_H */ | |
diff --git a/include/linux/ipipe_trace.h b/include/linux/ipipe_trace.h | |
new file mode 100644 | |
index 0000000..deb0a47 | |
--- /dev/null | |
+++ b/include/linux/ipipe_trace.h | |
@@ -0,0 +1,77 @@ | |
+/* -*- linux-c -*- | |
+ * include/linux/ipipe_trace.h | |
+ * | |
+ * Copyright (C) 2005 Luotao Fu. | |
+ * 2005-2007 Jan Kiszka. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#ifndef _LINUX_IPIPE_TRACE_H | |
+#define _LINUX_IPIPE_TRACE_H | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE | |
+ | |
+#include <linux/types.h> | |
+ | |
+#ifndef BROKEN_BUILTIN_RETURN_ADDRESS | |
+#define __BUILTIN_RETURN_ADDRESS0 ((unsigned long)__builtin_return_address(0)) | |
+#define __BUILTIN_RETURN_ADDRESS1 ((unsigned long)__builtin_return_address(1)) | |
+#endif /* !BUILTIN_RETURN_ADDRESS */ | |
+ | |
+void ipipe_trace_begin(unsigned long v); | |
+void ipipe_trace_end(unsigned long v); | |
+void ipipe_trace_freeze(unsigned long v); | |
+void ipipe_trace_special(unsigned char special_id, unsigned long v); | |
+void ipipe_trace_pid(pid_t pid, short prio); | |
+void ipipe_trace_event(unsigned char id, unsigned long delay_tsc); | |
+int ipipe_trace_max_reset(void); | |
+int ipipe_trace_frozen_reset(void); | |
+ | |
+#else /* !CONFIG_IPIPE_TRACE */ | |
+ | |
+#define ipipe_trace_begin(v) do { (void)(v); } while(0) | |
+#define ipipe_trace_end(v) do { (void)(v); } while(0) | |
+#define ipipe_trace_freeze(v) do { (void)(v); } while(0) | |
+#define ipipe_trace_special(id, v) do { (void)(id); (void)(v); } while(0) | |
+#define ipipe_trace_pid(pid, prio) do { (void)(pid); (void)(prio); } while(0) | |
+#define ipipe_trace_event(id, delay_tsc) do { (void)(id); (void)(delay_tsc); } while(0) | |
+#define ipipe_trace_max_reset() ({ 0; }) | |
+#define ipipe_trace_frozen_reset() ({ 0; }) | |
+ | |
+#endif /* !CONFIG_IPIPE_TRACE */ | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_PANIC | |
+void ipipe_trace_panic_freeze(void); | |
+void ipipe_trace_panic_dump(void); | |
+#else | |
+static inline void ipipe_trace_panic_freeze(void) { } | |
+static inline void ipipe_trace_panic_dump(void) { } | |
+#endif | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF | |
+#define ipipe_trace_irq_entry(irq) ipipe_trace_begin(irq) | |
+#define ipipe_trace_irq_exit(irq) ipipe_trace_end(irq) | |
+#define ipipe_trace_irqsoff() ipipe_trace_begin(0x80000000UL) | |
+#define ipipe_trace_irqson() ipipe_trace_end(0x80000000UL) | |
+#else | |
+#define ipipe_trace_irq_entry(irq) do { (void)(irq);} while(0) | |
+#define ipipe_trace_irq_exit(irq) do { (void)(irq);} while(0) | |
+#define ipipe_trace_irqsoff() do { } while(0) | |
+#define ipipe_trace_irqson() do { } while(0) | |
+#endif | |
+ | |
+#endif /* !__LINUX_IPIPE_TRACE_H */ | |
diff --git a/include/linux/irq.h b/include/linux/irq.h | |
index bc4e066..fb4cf79 100644 | |
--- a/include/linux/irq.h | |
+++ b/include/linux/irq.h | |
@@ -317,6 +317,11 @@ struct irq_chip { | |
void (*irq_bus_lock)(struct irq_data *data); | |
void (*irq_bus_sync_unlock)(struct irq_data *data); | |
+#ifdef CONFIG_IPIPE | |
+ void (*irq_move)(struct irq_data *data); | |
+ void (*irq_hold)(struct irq_data *data); | |
+ void (*irq_release)(struct irq_data *data); | |
+#endif /* CONFIG_IPIPE */ | |
void (*irq_cpu_online)(struct irq_data *data); | |
void (*irq_cpu_offline)(struct irq_data *data); | |
@@ -533,7 +538,7 @@ extern int irq_set_irq_type(unsigned int irq, unsigned int type); | |
extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); | |
extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, | |
struct msi_desc *entry); | |
-extern struct irq_data *irq_get_irq_data(unsigned int irq); | |
+extern struct irq_data *irq_get_irq_data(unsigned int irq) __attribute__((const)); | |
static inline struct irq_chip *irq_get_chip(unsigned int irq) | |
{ | |
@@ -679,7 +684,11 @@ struct irq_chip_type { | |
* different flow mechanisms (level/edge) for it. | |
*/ | |
struct irq_chip_generic { | |
+#ifdef CONFIG_IPIPE | |
+ ipipe_spinlock_t lock; | |
+#else | |
raw_spinlock_t lock; | |
+#endif | |
void __iomem *reg_base; | |
unsigned int irq_base; | |
unsigned int irq_cnt; | |
@@ -737,18 +746,28 @@ static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | |
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) | |
#ifdef CONFIG_SMP | |
-static inline void irq_gc_lock(struct irq_chip_generic *gc) | |
+static inline unsigned long irq_gc_lock(struct irq_chip_generic *gc) | |
{ | |
- raw_spin_lock(&gc->lock); | |
+ unsigned long flags = 0; | |
+ raw_spin_lock_irqsave_cond(&gc->lock, flags); | |
+ return flags; | |
} | |
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) | |
+static inline void | |
+irq_gc_unlock(struct irq_chip_generic *gc, unsigned long flags) | |
{ | |
- raw_spin_unlock(&gc->lock); | |
+ raw_spin_unlock_irqrestore_cond(&gc->lock, flags); | |
} | |
#else | |
-static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | |
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | |
+static inline unsigned long irq_gc_lock(struct irq_chip_generic *gc) | |
+{ | |
+ return hard_cond_local_irq_save(); | |
+} | |
+static inline void | |
+irq_gc_unlock(struct irq_chip_generic *gc, unsigned long flags) | |
+{ | |
+ hard_cond_local_irq_restore(flags); | |
+} | |
#endif | |
#else /* !CONFIG_GENERIC_HARDIRQS */ | |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h | |
index 623325e..befb711 100644 | |
--- a/include/linux/irqdesc.h | |
+++ b/include/linux/irqdesc.h | |
@@ -40,6 +40,12 @@ struct irq_desc; | |
struct irq_desc { | |
struct irq_data irq_data; | |
unsigned int __percpu *kstat_irqs; | |
+#ifdef CONFIG_IPIPE | |
+ void (*ipipe_ack)(unsigned int irq, | |
+ struct irq_desc *desc); | |
+ void (*ipipe_end)(unsigned int irq, | |
+ struct irq_desc *desc); | |
+#endif /* CONFIG_IPIPE */ | |
irq_flow_handler_t handle_irq; | |
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI | |
irq_preflow_handler_t preflow_handler; | |
@@ -123,6 +129,10 @@ static inline int irq_has_action(unsigned int irq) | |
return desc->action != NULL; | |
} | |
+irq_flow_handler_t | |
+__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, | |
+ int is_chained); | |
+ | |
/* caller has locked the irq_desc and both params are valid */ | |
static inline void __irq_set_handler_locked(unsigned int irq, | |
irq_flow_handler_t handler) | |
@@ -130,6 +140,7 @@ static inline void __irq_set_handler_locked(unsigned int irq, | |
struct irq_desc *desc; | |
desc = irq_to_desc(irq); | |
+ handler = __fixup_irq_handler(desc, handler, 0); | |
desc->handle_irq = handler; | |
} | |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h | |
index 0a2dc46..854884f 100644 | |
--- a/include/linux/irqnr.h | |
+++ b/include/linux/irqnr.h | |
@@ -22,7 +22,11 @@ | |
#else /* CONFIG_GENERIC_HARDIRQS */ | |
extern int nr_irqs; | |
+#if !defined(CONFIG_IPIPE) || defined(CONFIG_SPARSE_IRQ) | |
extern struct irq_desc *irq_to_desc(unsigned int irq); | |
+#else | |
+#define irq_to_desc(irq) (&irq_desc[irq]) | |
+#endif | |
unsigned int irq_get_next_irq(unsigned int offset); | |
# define for_each_irq_desc(irq, desc) \ | |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h | |
index e9ef6d6..52e8e7b 100644 | |
--- a/include/linux/kernel.h | |
+++ b/include/linux/kernel.h | |
@@ -9,6 +9,7 @@ | |
#include <linux/compiler.h> | |
#include <linux/bitops.h> | |
#include <linux/log2.h> | |
+#include <linux/ipipe_base.h> | |
#include <linux/typecheck.h> | |
#include <linux/printk.h> | |
#include <linux/dynamic_debug.h> | |
@@ -143,9 +144,12 @@ struct user; | |
#ifdef CONFIG_PREEMPT_VOLUNTARY | |
extern int _cond_resched(void); | |
-# define might_resched() _cond_resched() | |
+# define might_resched() do { \ | |
+ ipipe_root_only(); \ | |
+ _cond_resched(); \ | |
+ } while (0) | |
#else | |
-# define might_resched() do { } while (0) | |
+# define might_resched() ipipe_root_only() | |
#endif | |
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP | |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h | |
index 8db53cf..378a6ab 100644 | |
--- a/include/linux/kvm_host.h | |
+++ b/include/linux/kvm_host.h | |
@@ -208,6 +208,9 @@ struct kvm_vcpu { | |
#ifdef CONFIG_PREEMPT_NOTIFIERS | |
struct preempt_notifier preempt_notifier; | |
#endif | |
+#ifdef CONFIG_IPIPE | |
+ struct ipipe_vm_notifier ipipe_notifier; | |
+#endif | |
int cpu; | |
int vcpu_id; | |
int srcu_idx; | |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h | |
index f5d4723..01a3bb5 100644 | |
--- a/include/linux/preempt.h | |
+++ b/include/linux/preempt.h | |
@@ -9,13 +9,20 @@ | |
#include <linux/thread_info.h> | |
#include <linux/linkage.h> | |
#include <linux/list.h> | |
+#include <linux/ipipe_base.h> | |
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) | |
extern void add_preempt_count(int val); | |
extern void sub_preempt_count(int val); | |
#else | |
-# define add_preempt_count(val) do { preempt_count() += (val); } while (0) | |
-# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0) | |
+# define add_preempt_count(val) do { \ | |
+ ipipe_preempt_root_only(); \ | |
+ preempt_count() += (val); \ | |
+ } while (0) | |
+# define sub_preempt_count(val) do { \ | |
+ ipipe_preempt_root_only(); \ | |
+ preempt_count() -= (val); \ | |
+ } while (0) | |
#endif | |
#define inc_preempt_count() add_preempt_count(1) | |
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h | |
index bc2994e..5e2da8d 100644 | |
--- a/include/linux/rwlock.h | |
+++ b/include/linux/rwlock.h | |
@@ -61,8 +61,8 @@ do { \ | |
#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) | |
#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) | |
-#define write_lock(lock) _raw_write_lock(lock) | |
-#define read_lock(lock) _raw_read_lock(lock) | |
+#define write_lock(lock) PICK_RWOP(_write_lock, lock) | |
+#define read_lock(lock) PICK_RWOP(_read_lock, lock) | |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
@@ -96,8 +96,8 @@ do { \ | |
#define read_lock_bh(lock) _raw_read_lock_bh(lock) | |
#define write_lock_irq(lock) _raw_write_lock_irq(lock) | |
#define write_lock_bh(lock) _raw_write_lock_bh(lock) | |
-#define read_unlock(lock) _raw_read_unlock(lock) | |
-#define write_unlock(lock) _raw_write_unlock(lock) | |
+#define read_unlock(lock) PICK_RWOP(_read_unlock, lock) | |
+#define write_unlock(lock) PICK_RWOP(_write_unlock, lock) | |
#define read_unlock_irq(lock) _raw_read_unlock_irq(lock) | |
#define write_unlock_irq(lock) _raw_write_unlock_irq(lock) | |
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h | |
index 9c9f049..62c894150 100644 | |
--- a/include/linux/rwlock_api_smp.h | |
+++ b/include/linux/rwlock_api_smp.h | |
@@ -141,7 +141,9 @@ static inline int __raw_write_trylock(rwlock_t *lock) | |
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | |
* not re-enabled during lock-acquire (which the preempt-spin-ops do): | |
*/ | |
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | |
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || \ | |
+ defined(CONFIG_DEBUG_LOCK_ALLOC) || \ | |
+ defined(CONFIG_IPIPE) | |
static inline void __raw_read_lock(rwlock_t *lock) | |
{ | |
diff --git a/include/linux/sched.h b/include/linux/sched.h | |
index 178a8d9..da5039f 100644 | |
--- a/include/linux/sched.h | |
+++ b/include/linux/sched.h | |
@@ -22,6 +22,7 @@ struct sched_param { | |
#include <linux/errno.h> | |
#include <linux/nodemask.h> | |
#include <linux/mm_types.h> | |
+#include <linux/ipipe.h> | |
#include <asm/page.h> | |
#include <asm/ptrace.h> | |
@@ -152,9 +153,17 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | |
#define TASK_WAKEKILL 128 | |
#define TASK_WAKING 256 | |
#define TASK_PARKED 512 | |
+#ifdef CONFIG_IPIPE | |
+#define TASK_HARDENING 1024 | |
+#define TASK_NOWAKEUP 2048 | |
+#define TASK_STATE_MAX 4096 | |
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWPHN" | |
+#else /* !CONFIG_IPIPE */ | |
+#define TASK_HARDENING 0 | |
+#define TASK_NOWAKEUP 0 | |
#define TASK_STATE_MAX 1024 | |
- | |
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" | |
+#endif /* CONFIG_IPIPE */ | |
extern char ___assert_task_state[1 - 2*!!( | |
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | |
@@ -267,6 +276,15 @@ extern void trap_init(void); | |
extern void update_process_times(int user); | |
extern void scheduler_tick(void); | |
+#ifdef CONFIG_IPIPE | |
+void update_root_process_times(struct pt_regs *regs); | |
+#else /* !CONFIG_IPIPE */ | |
+static inline void update_root_process_times(struct pt_regs *regs) | |
+{ | |
+ update_process_times(user_mode(regs)); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
extern void sched_show_task(struct task_struct *p); | |
#ifdef CONFIG_LOCKUP_DETECTOR | |
@@ -366,6 +384,9 @@ extern int get_dumpable(struct mm_struct *mm); | |
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ | |
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ | |
#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ | |
+#ifdef CONFIG_IPIPE | |
+#define MMF_VM_PINNED 31 /* ondemand load up and COW disabled */ | |
+#endif | |
#define MMF_HAS_UPROBES 19 /* has uprobes */ | |
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ | |
@@ -1343,6 +1364,10 @@ struct task_struct { | |
#endif /* CONFIG_NUMA_BALANCING */ | |
struct rcu_head rcu; | |
+ struct ipipe_task_info ipipe; | |
+#ifdef CONFIG_IPIPE_LEGACY | |
+ void *ptd[IPIPE_ROOT_NPTDKEYS]; | |
+#endif | |
/* | |
* cache last used pipe for splice | |
@@ -1639,6 +1664,10 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, | |
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | |
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ | |
+/* p->ipipe.flags */ | |
+#define PF_MAYDAY 0x1 /* MAYDAY call is pending */ | |
+#define PF_EVNOTIFY 0x2 /* Notify head domain about kernel events */ | |
+ | |
/* | |
* Only the _current_ task can read/write to tsk->flags, but other | |
* tasks can access tsk->flags in readonly mode for example | |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h | |
index 7d537ce..da7855c 100644 | |
--- a/include/linux/spinlock.h | |
+++ b/include/linux/spinlock.h | |
@@ -89,10 +89,12 @@ | |
# include <linux/spinlock_up.h> | |
#endif | |
+#include <linux/ipipe_lock.h> | |
+ | |
#ifdef CONFIG_DEBUG_SPINLOCK | |
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, | |
struct lock_class_key *key); | |
-# define raw_spin_lock_init(lock) \ | |
+# define __real_raw_spin_lock_init(lock) \ | |
do { \ | |
static struct lock_class_key __key; \ | |
\ | |
@@ -100,11 +102,14 @@ do { \ | |
} while (0) | |
#else | |
-# define raw_spin_lock_init(lock) \ | |
+# define __real_raw_spin_lock_init(lock) \ | |
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) | |
#endif | |
+#define raw_spin_lock_init(lock) PICK_SPINOP(_lock_init, lock) | |
-#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) | |
+#define __real_raw_spin_is_locked(lock) \ | |
+ arch_spin_is_locked(&(lock)->raw_lock) | |
+#define raw_spin_is_locked(lock) PICK_SPINOP_RET(_is_locked, lock, int) | |
#ifdef CONFIG_GENERIC_LOCKBREAK | |
#define raw_spin_is_contended(lock) ((lock)->break_lock) | |
@@ -165,9 +170,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |
* various methods are defined as nops in the case they are not | |
* required. | |
*/ | |
-#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) | |
+#define __real_raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) | |
+#define raw_spin_trylock(lock) PICK_SPINOP_RET(_trylock, lock, int) | |
-#define raw_spin_lock(lock) _raw_spin_lock(lock) | |
+#define __real_raw_spin_lock(lock) _raw_spin_lock(lock) | |
+#define raw_spin_lock(lock) PICK_SPINOP(_lock, lock) | |
#ifdef CONFIG_DEBUG_LOCK_ALLOC | |
# define raw_spin_lock_nested(lock, subclass) \ | |
@@ -185,7 +192,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
-#define raw_spin_lock_irqsave(lock, flags) \ | |
+#define __real_raw_spin_lock_irqsave(lock, flags) \ | |
do { \ | |
typecheck(unsigned long, flags); \ | |
flags = _raw_spin_lock_irqsave(lock); \ | |
@@ -207,7 +214,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |
#else | |
-#define raw_spin_lock_irqsave(lock, flags) \ | |
+#define __real_raw_spin_lock_irqsave(lock, flags) \ | |
do { \ | |
typecheck(unsigned long, flags); \ | |
_raw_spin_lock_irqsave(lock, flags); \ | |
@@ -218,34 +225,46 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |
#endif | |
-#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) | |
+#define raw_spin_lock_irqsave(lock, flags) \ | |
+ PICK_SPINLOCK_IRQSAVE(lock, flags) | |
+ | |
+#define __real_raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) | |
+#define raw_spin_lock_irq(lock) PICK_SPINOP(_lock_irq, lock) | |
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) | |
-#define raw_spin_unlock(lock) _raw_spin_unlock(lock) | |
-#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) | |
+#define __real_raw_spin_unlock(lock) _raw_spin_unlock(lock) | |
+#define raw_spin_unlock(lock) PICK_SPINOP(_unlock, lock) | |
+#define __real_raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) | |
+#define raw_spin_unlock_irq(lock) PICK_SPINOP(_unlock_irq, lock) | |
-#define raw_spin_unlock_irqrestore(lock, flags) \ | |
+#define __real_raw_spin_unlock_irqrestore(lock, flags) \ | |
do { \ | |
typecheck(unsigned long, flags); \ | |
_raw_spin_unlock_irqrestore(lock, flags); \ | |
} while (0) | |
+#define raw_spin_unlock_irqrestore(lock, flags) \ | |
+ PICK_SPINUNLOCK_IRQRESTORE(lock, flags) | |
+ | |
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) | |
#define raw_spin_trylock_bh(lock) \ | |
__cond_lock(lock, _raw_spin_trylock_bh(lock)) | |
-#define raw_spin_trylock_irq(lock) \ | |
+#define __real_raw_spin_trylock_irq(lock) \ | |
({ \ | |
local_irq_disable(); \ | |
- raw_spin_trylock(lock) ? \ | |
+ __real_raw_spin_trylock(lock) ? \ | |
1 : ({ local_irq_enable(); 0; }); \ | |
}) | |
+#define raw_spin_trylock_irq(lock) PICK_SPINTRYLOCK_IRQ(lock) | |
-#define raw_spin_trylock_irqsave(lock, flags) \ | |
+#define __real_raw_spin_trylock_irqsave(lock, flags) \ | |
({ \ | |
local_irq_save(flags); \ | |
raw_spin_trylock(lock) ? \ | |
1 : ({ local_irq_restore(flags); 0; }); \ | |
}) | |
+#define raw_spin_trylock_irqsave(lock, flags) \ | |
+ PICK_SPINTRYLOCK_IRQSAVE(lock, flags) | |
/** | |
* raw_spin_can_lock - would raw_spin_trylock() succeed? | |
@@ -276,24 +295,17 @@ static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | |
#define spin_lock_init(_lock) \ | |
do { \ | |
- spinlock_check(_lock); \ | |
- raw_spin_lock_init(&(_lock)->rlock); \ | |
+ raw_spin_lock_init(_lock); \ | |
} while (0) | |
-static inline void spin_lock(spinlock_t *lock) | |
-{ | |
- raw_spin_lock(&lock->rlock); | |
-} | |
+#define spin_lock(lock) raw_spin_lock(lock) | |
static inline void spin_lock_bh(spinlock_t *lock) | |
{ | |
raw_spin_lock_bh(&lock->rlock); | |
} | |
-static inline int spin_trylock(spinlock_t *lock) | |
-{ | |
- return raw_spin_trylock(&lock->rlock); | |
-} | |
+#define spin_trylock(lock) raw_spin_trylock(lock) | |
#define spin_lock_nested(lock, subclass) \ | |
do { \ | |
@@ -305,14 +317,11 @@ do { \ | |
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | |
} while (0) | |
-static inline void spin_lock_irq(spinlock_t *lock) | |
-{ | |
- raw_spin_lock_irq(&lock->rlock); | |
-} | |
+#define spin_lock_irq(lock) raw_spin_lock_irq(lock) | |
#define spin_lock_irqsave(lock, flags) \ | |
do { \ | |
- raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | |
+ raw_spin_lock_irqsave(lock, flags); \ | |
} while (0) | |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ | |
@@ -320,39 +329,28 @@ do { \ | |
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | |
} while (0) | |
-static inline void spin_unlock(spinlock_t *lock) | |
-{ | |
- raw_spin_unlock(&lock->rlock); | |
-} | |
+#define spin_unlock(lock) raw_spin_unlock(lock) | |
static inline void spin_unlock_bh(spinlock_t *lock) | |
{ | |
raw_spin_unlock_bh(&lock->rlock); | |
} | |
-static inline void spin_unlock_irq(spinlock_t *lock) | |
-{ | |
- raw_spin_unlock_irq(&lock->rlock); | |
-} | |
+#define spin_unlock_irq(lock) raw_spin_unlock_irq(lock) | |
-static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |
-{ | |
- raw_spin_unlock_irqrestore(&lock->rlock, flags); | |
-} | |
+#define spin_unlock_irqrestore(lock, flags) \ | |
+ raw_spin_unlock_irqrestore(lock, flags) | |
static inline int spin_trylock_bh(spinlock_t *lock) | |
{ | |
return raw_spin_trylock_bh(&lock->rlock); | |
} | |
-static inline int spin_trylock_irq(spinlock_t *lock) | |
-{ | |
- return raw_spin_trylock_irq(&lock->rlock); | |
-} | |
+#define spin_trylock_irq(lock) raw_spin_trylock_irq(lock) | |
#define spin_trylock_irqsave(lock, flags) \ | |
({ \ | |
- raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | |
+ raw_spin_trylock_irqsave(lock, flags); \ | |
}) | |
static inline void spin_unlock_wait(spinlock_t *lock) | |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h | |
index 51df117..7e7b9a7 100644 | |
--- a/include/linux/spinlock_api_smp.h | |
+++ b/include/linux/spinlock_api_smp.h | |
@@ -99,7 +99,9 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | |
* not re-enabled during lock-acquire (which the preempt-spin-ops do): | |
*/ | |
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | |
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || \ | |
+ defined(CONFIG_DEBUG_LOCK_ALLOC) || \ | |
+ defined(CONFIG_IPIPE) | |
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
{ | |
@@ -113,7 +115,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
* do_raw_spin_lock_flags() code, because lockdep assumes | |
* that interrupts are not re-enabled during lock-acquire: | |
*/ | |
-#ifdef CONFIG_LOCKDEP | |
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_IPIPE) | |
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | |
#else | |
do_raw_spin_lock_flags(lock, &flags); | |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h | |
index e2369c1..7041c20 100644 | |
--- a/include/linux/spinlock_up.h | |
+++ b/include/linux/spinlock_up.h | |
@@ -55,16 +55,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |
lock->slock = 1; | |
} | |
-/* | |
- * Read-write spinlocks. No debug version. | |
- */ | |
-#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) | |
-#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) | |
-#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
-#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
-#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
-#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
- | |
#else /* DEBUG_SPINLOCK */ | |
#define arch_spin_is_locked(lock) ((void)(lock), 0) | |
/* for sched.c and kernel_lock.c: */ | |
@@ -74,6 +64,13 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
#endif /* DEBUG_SPINLOCK */ | |
+#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) | |
+#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) | |
+#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
+#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
+#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
+#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
+ | |
#define arch_spin_is_contended(lock) (((void)(lock), 0)) | |
#define arch_read_can_lock(lock) (((void)(lock), 1)) | |
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h | |
index 4164529..43c8744 100644 | |
--- a/include/uapi/asm-generic/mman-common.h | |
+++ b/include/uapi/asm-generic/mman-common.h | |
@@ -19,6 +19,9 @@ | |
#define MAP_TYPE 0x0f /* Mask for type of mapping */ | |
#define MAP_FIXED 0x10 /* Interpret addr exactly */ | |
#define MAP_ANONYMOUS 0x20 /* don't use a file */ | |
+#ifndef MAP_BRK | |
+# define MAP_BRK 0 | |
+#endif | |
#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED | |
# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */ | |
#else | |
diff --git a/include/uapi/asm-generic/resource.h b/include/uapi/asm-generic/resource.h | |
index f863428..7fab40c 100644 | |
--- a/include/uapi/asm-generic/resource.h | |
+++ b/include/uapi/asm-generic/resource.h | |
@@ -58,6 +58,14 @@ | |
#endif | |
/* | |
+ * Limit the stack by to some sane default: root can always | |
+ * increase this limit if needed.. 8MB seems reasonable. | |
+ */ | |
+#ifndef _STK_LIM | |
+# define _STK_LIM (8*1024*1024) | |
+#endif | |
+ | |
+/* | |
* RLIMIT_STACK default maximum - some architectures override it: | |
*/ | |
#ifndef _STK_LIM_MAX | |
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h | |
index e0ed284..c6db05c 100644 | |
--- a/include/uapi/linux/resource.h | |
+++ b/include/uapi/linux/resource.h | |
@@ -59,12 +59,6 @@ struct rlimit64 { | |
#define PRIO_USER 2 | |
/* | |
- * Limit the stack by to some sane default: root can always | |
- * increase this limit if needed.. 8MB seems reasonable. | |
- */ | |
-#define _STK_LIM (8*1024*1024) | |
- | |
-/* | |
* GPG2 wants 64kB of mlocked memory, to make sure pass phrases | |
* and other sensitive information are never written to disk. | |
*/ | |
diff --git a/init/Kconfig b/init/Kconfig | |
index 2d9b831..5978d85 100644 | |
--- a/init/Kconfig | |
+++ b/init/Kconfig | |
@@ -55,6 +55,7 @@ config CROSS_COMPILE | |
config LOCALVERSION | |
string "Local version - append to kernel release" | |
+ default "-ipipe" | |
help | |
Append an extra string to the end of your kernel version. | |
This will show up when you type uname, for example. | |
diff --git a/init/main.c b/init/main.c | |
index e83ac04..b536b83 100644 | |
--- a/init/main.c | |
+++ b/init/main.c | |
@@ -489,7 +489,7 @@ asmlinkage void __init start_kernel(void) | |
cgroup_init_early(); | |
- local_irq_disable(); | |
+ hard_local_irq_disable(); | |
early_boot_irqs_disabled = true; | |
/* | |
@@ -526,6 +526,7 @@ asmlinkage void __init start_kernel(void) | |
pidhash_init(); | |
vfs_caches_init_early(); | |
sort_main_extable(); | |
+ __ipipe_init_early(); | |
trap_init(); | |
mm_init(); | |
@@ -556,6 +557,11 @@ asmlinkage void __init start_kernel(void) | |
softirq_init(); | |
timekeeping_init(); | |
time_init(); | |
+ /* | |
+ * We need to wait for the interrupt and time subsystems to be | |
+ * initialized before enabling the pipeline. | |
+ */ | |
+ __ipipe_init(); | |
profile_init(); | |
call_function_init(); | |
WARN(!irqs_disabled(), "Interrupts were enabled early\n"); | |
@@ -775,6 +781,7 @@ static void __init do_basic_setup(void) | |
shmem_init(); | |
driver_init(); | |
init_irq_proc(); | |
+ __ipipe_init_proc(); | |
do_ctors(); | |
usermodehelper_enable(); | |
do_initcalls(); | |
diff --git a/kernel/Makefile b/kernel/Makefile | |
index 271fd31..6660d63 100644 | |
--- a/kernel/Makefile | |
+++ b/kernel/Makefile | |
@@ -87,6 +87,7 @@ obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o | |
obj-$(CONFIG_TINY_RCU) += rcutiny.o | |
obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o | |
obj-$(CONFIG_RELAY) += relay.o | |
+obj-$(CONFIG_IPIPE) += ipipe/ | |
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | |
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | |
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o | |
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c | |
index 383f823..0699de2 100644 | |
--- a/kernel/context_tracking.c | |
+++ b/kernel/context_tracking.c | |
@@ -47,7 +47,7 @@ void user_enter(void) | |
* helpers are enough to protect RCU uses inside the exception. So | |
* just return immediately if we detect we are in an IRQ. | |
*/ | |
- if (in_interrupt()) | |
+ if (ipipe_root_p == 0 || in_interrupt()) | |
return; | |
/* Kernel threads aren't supposed to go to userspace */ | |
@@ -126,7 +126,7 @@ void user_exit(void) | |
{ | |
unsigned long flags; | |
- if (in_interrupt()) | |
+ if (ipipe_root_p == 0 || in_interrupt()) | |
return; | |
local_irq_save(flags); | |
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c | |
index 0506d44..db3875e 100644 | |
--- a/kernel/debug/debug_core.c | |
+++ b/kernel/debug/debug_core.c | |
@@ -114,8 +114,8 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { | |
*/ | |
atomic_t kgdb_active = ATOMIC_INIT(-1); | |
EXPORT_SYMBOL_GPL(kgdb_active); | |
-static DEFINE_RAW_SPINLOCK(dbg_master_lock); | |
-static DEFINE_RAW_SPINLOCK(dbg_slave_lock); | |
+static IPIPE_DEFINE_RAW_SPINLOCK(dbg_master_lock); | |
+static IPIPE_DEFINE_RAW_SPINLOCK(dbg_slave_lock); | |
/* | |
* We use NR_CPUs not PERCPU, in case kgdb is used to debug early | |
@@ -165,19 +165,21 @@ int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) | |
{ | |
int err; | |
- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, | |
- BREAK_INSTR_SIZE); | |
+ err = ipipe_probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, | |
+ BREAK_INSTR_SIZE); | |
if (err) | |
return err; | |
- err = probe_kernel_write((char *)bpt->bpt_addr, | |
- arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); | |
+ err = ipipe_probe_kernel_write((char *)bpt->bpt_addr, | |
+ arch_kgdb_ops.gdb_bpt_instr, | |
+ BREAK_INSTR_SIZE); | |
return err; | |
} | |
int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) | |
{ | |
- return probe_kernel_write((char *)bpt->bpt_addr, | |
- (char *)bpt->saved_instr, BREAK_INSTR_SIZE); | |
+ return ipipe_probe_kernel_write((char *)bpt->bpt_addr, | |
+ (char *)bpt->saved_instr, | |
+ BREAK_INSTR_SIZE); | |
} | |
int __weak kgdb_validate_break_address(unsigned long addr) | |
@@ -450,7 +452,9 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |
static void dbg_touch_watchdogs(void) | |
{ | |
touch_softlockup_watchdog_sync(); | |
+#ifndef CONFIG_IPIPE | |
clocksource_touch_watchdog(); | |
+#endif | |
rcu_cpu_stall_reset(); | |
} | |
@@ -480,7 +484,7 @@ acquirelock: | |
* Interrupts will be restored by the 'trap return' code, except when | |
* single stepping. | |
*/ | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
cpu = ks->cpu; | |
kgdb_info[cpu].debuggerinfo = regs; | |
@@ -529,7 +533,7 @@ return_normal: | |
smp_mb__before_atomic_dec(); | |
atomic_dec(&slaves_in_kgdb); | |
dbg_touch_watchdogs(); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return 0; | |
} | |
cpu_relax(); | |
@@ -547,7 +551,7 @@ return_normal: | |
atomic_set(&kgdb_active, -1); | |
raw_spin_unlock(&dbg_master_lock); | |
dbg_touch_watchdogs(); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
goto acquirelock; | |
} | |
@@ -656,7 +660,7 @@ kgdb_restore: | |
atomic_set(&kgdb_active, -1); | |
raw_spin_unlock(&dbg_master_lock); | |
dbg_touch_watchdogs(); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
return kgdb_info[cpu].ret_state; | |
} | |
@@ -742,9 +746,9 @@ static void kgdb_console_write(struct console *co, const char *s, | |
if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode) | |
return; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
gdbstub_msg_write(s, count); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
} | |
static struct console kgdbcons = { | |
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c | |
index 19d9a57..4e99085 100644 | |
--- a/kernel/debug/gdbstub.c | |
+++ b/kernel/debug/gdbstub.c | |
@@ -246,7 +246,7 @@ char *kgdb_mem2hex(char *mem, char *buf, int count) | |
*/ | |
tmp = buf + count; | |
- err = probe_kernel_read(tmp, mem, count); | |
+ err = ipipe_probe_kernel_read(tmp, mem, count); | |
if (err) | |
return NULL; | |
while (count > 0) { | |
@@ -282,7 +282,7 @@ int kgdb_hex2mem(char *buf, char *mem, int count) | |
*tmp_raw |= hex_to_bin(*tmp_hex--) << 4; | |
} | |
- return probe_kernel_write(mem, tmp_raw, count); | |
+ return ipipe_probe_kernel_write(mem, tmp_raw, count); | |
} | |
/* | |
@@ -334,7 +334,7 @@ static int kgdb_ebin2mem(char *buf, char *mem, int count) | |
size++; | |
} | |
- return probe_kernel_write(mem, c, size); | |
+ return ipipe_probe_kernel_write(mem, c, size); | |
} | |
#if DBG_MAX_REG_NUM > 0 | |
diff --git a/kernel/exit.c b/kernel/exit.c | |
index 7bb73f9..1de2495 100644 | |
--- a/kernel/exit.c | |
+++ b/kernel/exit.c | |
@@ -790,6 +790,7 @@ void do_exit(long code) | |
acct_process(); | |
trace_sched_process_exit(tsk); | |
+ __ipipe_report_exit(tsk); | |
exit_sem(tsk); | |
exit_shm(tsk); | |
exit_files(tsk); | |
diff --git a/kernel/fork.c b/kernel/fork.c | |
index 80d92e9..baa18a1 100644 | |
--- a/kernel/fork.c | |
+++ b/kernel/fork.c | |
@@ -313,6 +313,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |
tsk->stack = ti; | |
setup_thread_stack(tsk, orig); | |
+ __ipipe_init_threadinfo(&ti->ipipe_data); | |
clear_user_return_notifier(tsk); | |
clear_tsk_need_resched(tsk); | |
stackend = end_of_stack(tsk); | |
@@ -617,6 +618,7 @@ void mmput(struct mm_struct *mm) | |
ksm_exit(mm); | |
khugepaged_exit(mm); /* must run before exit_mmap */ | |
exit_mmap(mm); | |
+ __ipipe_report_cleanup(mm); | |
set_mm_exe_file(mm, NULL); | |
if (!list_empty(&mm->mmlist)) { | |
spin_lock(&mmlist_lock); | |
@@ -1083,6 +1085,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p) | |
new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); | |
new_flags |= PF_FORKNOEXEC; | |
p->flags = new_flags; | |
+ __ipipe_clear_taskflags(p); | |
} | |
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) | |
@@ -1483,6 +1486,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |
cgroup_post_fork(p); | |
if (clone_flags & CLONE_THREAD) | |
threadgroup_change_end(current); | |
+ __ipipe_init_taskinfo(p); | |
perf_event_fork(p); | |
trace_task_newtask(p, clone_flags); | |
diff --git a/kernel/ipipe/Kconfig b/kernel/ipipe/Kconfig | |
new file mode 100644 | |
index 0000000..da17b04 | |
--- /dev/null | |
+++ b/kernel/ipipe/Kconfig | |
@@ -0,0 +1,62 @@ | |
+config IPIPE | |
+ bool "Interrupt pipeline" | |
+ default y | |
+ ---help--- | |
+ Activate this option if you want the interrupt pipeline to be | |
+ compiled in. | |
+ | |
+config IPIPE_LEGACY | |
+ bool "I-pipe legacy interface" | |
+ depends on IPIPE | |
+ ---help--- | |
+ Activate this option if you want to control the interrupt | |
+ pipeline via the legacy interface. | |
+ | |
+config IPIPE_CORE | |
+ def_bool y if IPIPE | |
+ | |
+config IPIPE_WANT_CLOCKSOURCE | |
+ bool | |
+ | |
+config IPIPE_CORE_APIREV | |
+ int | |
+ depends on IPIPE | |
+ default 2 | |
+ ---help--- | |
+ The API revision level we implement. | |
+ | |
+config IPIPE_WANT_APIREV_1 | |
+ bool | |
+ | |
+config IPIPE_WANT_APIREV_2 | |
+ bool | |
+ | |
+config IPIPE_TARGET_APIREV | |
+ int | |
+ depends on IPIPE | |
+ default 1 if IPIPE_WANT_APIREV_1 | |
+ default 2 if IPIPE_WANT_APIREV_2 | |
+ default 1 if IPIPE_LEGACY | |
+ default IPIPE_CORE_APIREV | |
+ ---help--- | |
+ The API revision level the we want (must be <= | |
+ IPIPE_CORE_APIREV). | |
+ | |
+config IPIPE_HAVE_HOSTRT | |
+ bool | |
+ | |
+config IPIPE_HAVE_PIC_MUTE | |
+ bool | |
+ | |
+config HAVE_IPIPE_HOSTRT | |
+ depends on IPIPE_LEGACY | |
+ bool | |
+ | |
+config IPIPE_DELAYED_ATOMICSW | |
+ def_bool y if IPIPE_LEGACY | |
+ | |
+config IPIPE_HAVE_SAFE_THREAD_INFO | |
+ bool | |
+ | |
+config IPIPE_HAVE_VM_NOTIFIER | |
+ bool | |
diff --git a/kernel/ipipe/Kconfig.debug b/kernel/ipipe/Kconfig.debug | |
new file mode 100644 | |
index 0000000..40c82a2 | |
--- /dev/null | |
+++ b/kernel/ipipe/Kconfig.debug | |
@@ -0,0 +1,95 @@ | |
+config IPIPE_DEBUG | |
+ bool "I-pipe debugging" | |
+ depends on IPIPE | |
+ | |
+config IPIPE_DEBUG_CONTEXT | |
+ bool "Check for illicit cross-domain calls" | |
+ depends on IPIPE_DEBUG | |
+ default y | |
+ ---help--- | |
+ Enable this feature to arm checkpoints in the kernel that | |
+ verify the correct invocation context. On entry of critical | |
+ Linux services a warning is issued if the caller is not | |
+ running over the root domain. | |
+ | |
+config IPIPE_DEBUG_INTERNAL | |
+ bool "Enable internal debug checks" | |
+ depends on IPIPE_DEBUG | |
+ default y | |
+ ---help--- | |
+ When this feature is enabled, I-pipe will perform internal | |
+ consistency checks of its subsystems, e.g. on per-cpu variable | |
+ access. | |
+ | |
+config IPIPE_TRACE | |
+ bool "Latency tracing" | |
+ depends on IPIPE_DEBUG | |
+ select ARCH_WANT_FRAME_POINTERS if !ARM_UNWIND | |
+ select FRAME_POINTER if !ARM_UNWIND | |
+ select KALLSYMS | |
+ select PROC_FS | |
+ ---help--- | |
+ Activate this option if you want to use per-function tracing of | |
+ the kernel. The tracer will collect data via instrumentation | |
+ features like the one below or with the help of explicite calls | |
+ of ipipe_trace_xxx(). See include/linux/ipipe_trace.h for the | |
+ in-kernel tracing API. The collected data and runtime control | |
+ is available via /proc/ipipe/trace/*. | |
+ | |
+if IPIPE_TRACE | |
+ | |
+config IPIPE_TRACE_ENABLE | |
+ bool "Enable tracing on boot" | |
+ default y | |
+ ---help--- | |
+ Disable this option if you want to arm the tracer after booting | |
+ manually ("echo 1 > /proc/ipipe/tracer/enable"). This can reduce | |
+ boot time on slow embedded devices due to the tracer overhead. | |
+ | |
+config IPIPE_TRACE_MCOUNT | |
+ bool "Instrument function entries" | |
+ default y | |
+ select FTRACE | |
+ select FUNCTION_TRACER | |
+ ---help--- | |
+ When enabled, records every kernel function entry in the tracer | |
+ log. While this slows down the system noticeably, it provides | |
+ the highest level of information about the flow of events. | |
+ However, it can be switch off in order to record only explicit | |
+ I-pipe trace points. | |
+ | |
+config IPIPE_TRACE_IRQSOFF | |
+ bool "Trace IRQs-off times" | |
+ default y | |
+ ---help--- | |
+ Activate this option if I-pipe shall trace the longest path | |
+ with hard-IRQs switched off. | |
+ | |
+config IPIPE_TRACE_SHIFT | |
+ int "Depth of trace log (14 => 16Kpoints, 15 => 32Kpoints)" | |
+ range 10 18 | |
+ default 14 | |
+ ---help--- | |
+ The number of trace points to hold tracing data for each | |
+ trace path, as a power of 2. | |
+ | |
+config IPIPE_TRACE_VMALLOC | |
+ bool "Use vmalloc'ed trace buffer" | |
+ default y if EMBEDDED | |
+ ---help--- | |
+ Instead of reserving static kernel data, the required buffer | |
+ is allocated via vmalloc during boot-up when this option is | |
+ enabled. This can help to start systems that are low on memory, | |
+ but it slightly degrades overall performance. Try this option | |
+ when a traced kernel hangs unexpectedly at boot time. | |
+ | |
+config IPIPE_TRACE_PANIC | |
+ bool "Enable panic back traces" | |
+ default y | |
+ ---help--- | |
+ Provides services to freeze and dump a back trace on panic | |
+ situations. This is used on IPIPE_DEBUG_CONTEXT exceptions | |
+ as well as ordinary kernel oopses. You can control the number | |
+ of printed back trace points via /proc/ipipe/trace. | |
+ | |
+endif | |
diff --git a/kernel/ipipe/Makefile b/kernel/ipipe/Makefile | |
new file mode 100644 | |
index 0000000..c3ffe63 | |
--- /dev/null | |
+++ b/kernel/ipipe/Makefile | |
@@ -0,0 +1,3 @@ | |
+obj-$(CONFIG_IPIPE) += core.o timer.o | |
+obj-$(CONFIG_IPIPE_TRACE) += tracer.o | |
+obj-$(CONFIG_IPIPE_LEGACY) += compat.o | |
diff --git a/kernel/ipipe/compat.c b/kernel/ipipe/compat.c | |
new file mode 100644 | |
index 0000000..1147bf4 | |
--- /dev/null | |
+++ b/kernel/ipipe/compat.c | |
@@ -0,0 +1,268 @@ | |
+/* -*- linux-c -*- | |
+ * linux/kernel/ipipe/compat.c | |
+ * | |
+ * Copyright (C) 2012 Philippe Gerum. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ * | |
+ * I-pipe legacy interface. | |
+ */ | |
+#include <linux/version.h> | |
+#include <linux/module.h> | |
+#include <linux/init.h> | |
+#include <linux/kernel.h> | |
+#include <linux/sched.h> | |
+#include <linux/ipipe.h> | |
+ | |
+static int ptd_key_count; | |
+ | |
+static unsigned long ptd_key_map; | |
+ | |
+IPIPE_DECLARE_SPINLOCK(__ipipe_lock); | |
+ | |
+void ipipe_init_attr(struct ipipe_domain_attr *attr) | |
+{ | |
+ attr->name = "anon"; | |
+ attr->domid = 1; | |
+ attr->entry = NULL; | |
+ attr->priority = IPIPE_ROOT_PRIO; | |
+ attr->pdd = NULL; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_init_attr); | |
+ | |
+int ipipe_register_domain(struct ipipe_domain *ipd, | |
+ struct ipipe_domain_attr *attr) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ unsigned long flags; | |
+ | |
+ BUG_ON(attr->priority != IPIPE_HEAD_PRIORITY); | |
+ | |
+ ipipe_register_head(ipd, attr->name); | |
+ ipd->legacy.domid = attr->domid; | |
+ ipd->legacy.pdd = attr->pdd; | |
+ ipd->legacy.priority = INT_MAX; | |
+ | |
+ if (attr->entry == NULL) | |
+ return 0; | |
+ | |
+ flags = hard_smp_local_irq_save(); | |
+ __ipipe_set_current_domain(ipd); | |
+ hard_smp_local_irq_restore(flags); | |
+ | |
+ attr->entry(); | |
+ | |
+ flags = hard_local_irq_save(); | |
+ __ipipe_set_current_domain(ipipe_root_domain); | |
+ p = ipipe_this_cpu_root_context(); | |
+ if (__ipipe_ipending_p(p) && | |
+ !test_bit(IPIPE_STALL_FLAG, &p->status)) | |
+ __ipipe_sync_stage(); | |
+ hard_local_irq_restore(flags); | |
+ | |
+ return 0; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_register_domain); | |
+ | |
+int ipipe_unregister_domain(struct ipipe_domain *ipd) | |
+{ | |
+ ipipe_unregister_head(ipd); | |
+ | |
+ return 0; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_unregister_domain); | |
+ | |
+int ipipe_alloc_ptdkey(void) | |
+{ | |
+ unsigned long flags; | |
+ int key = -1; | |
+ | |
+ spin_lock_irqsave(&__ipipe_lock,flags); | |
+ | |
+ if (ptd_key_count < IPIPE_ROOT_NPTDKEYS) { | |
+ key = ffz(ptd_key_map); | |
+ set_bit(key,&ptd_key_map); | |
+ ptd_key_count++; | |
+ } | |
+ | |
+ spin_unlock_irqrestore(&__ipipe_lock,flags); | |
+ | |
+ return key; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_alloc_ptdkey); | |
+ | |
+int ipipe_free_ptdkey(int key) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS) | |
+ return -EINVAL; | |
+ | |
+ spin_lock_irqsave(&__ipipe_lock,flags); | |
+ | |
+ if (test_and_clear_bit(key,&ptd_key_map)) | |
+ ptd_key_count--; | |
+ | |
+ spin_unlock_irqrestore(&__ipipe_lock,flags); | |
+ | |
+ return 0; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_free_ptdkey); | |
+ | |
+int ipipe_set_ptd(int key, void *value) | |
+{ | |
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS) | |
+ return -EINVAL; | |
+ | |
+ current->ptd[key] = value; | |
+ | |
+ return 0; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_set_ptd); | |
+ | |
+void *ipipe_get_ptd(int key) | |
+{ | |
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS) | |
+ return NULL; | |
+ | |
+ return current->ptd[key]; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_get_ptd); | |
+ | |
+int ipipe_virtualize_irq(struct ipipe_domain *ipd, | |
+ unsigned int irq, | |
+ ipipe_irq_handler_t handler, | |
+ void *cookie, | |
+ ipipe_irq_ackfn_t ackfn, | |
+ unsigned int modemask) | |
+{ | |
+ if (handler == NULL) { | |
+ ipipe_free_irq(ipd, irq); | |
+ return 0; | |
+ } | |
+ | |
+ return ipipe_request_irq(ipd, irq, handler, cookie, ackfn); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_virtualize_irq); | |
+ | |
+static int null_handler(unsigned int event, | |
+ struct ipipe_domain *from, void *data) | |
+{ | |
+ /* | |
+ * Legacy mode users will trap all events, at worst most | |
+ * frequent ones. Therefore it is actually faster to run a | |
+ * dummy handler once in a while rather than testing for a | |
+ * null handler pointer each time an event is fired. | |
+ */ | |
+ return 0; | |
+} | |
+ | |
+ipipe_event_handler_t ipipe_catch_event(struct ipipe_domain *ipd, | |
+ unsigned int event, | |
+ ipipe_event_handler_t handler) | |
+{ | |
+ ipipe_event_handler_t oldhandler; | |
+ int n, enables = 0; | |
+ | |
+ if (event & IPIPE_EVENT_SELF) { | |
+ event &= ~IPIPE_EVENT_SELF; | |
+ IPIPE_WARN(event >= IPIPE_NR_FAULTS); | |
+ } | |
+ | |
+ if (event >= IPIPE_NR_EVENTS) | |
+ return NULL; | |
+ | |
+ /* | |
+ * It makes no sense to run a SETSCHED notification handler | |
+ * over the head domain, this introduces a useless domain | |
+ * switch for doing work which ought to be root specific. | |
+ * Unfortunately, some client domains using the legacy | |
+ * interface still ask for this, so we silently fix their | |
+ * request. This prevents ipipe_set_hooks() from yelling at us | |
+ * because of an attempt to enable kernel event notifications | |
+ * for the head domain. | |
+ */ | |
+ if (event == IPIPE_EVENT_SETSCHED) | |
+ ipd = ipipe_root_domain; | |
+ | |
+ oldhandler = ipd->legacy.handlers[event]; | |
+ ipd->legacy.handlers[event] = handler ?: null_handler; | |
+ | |
+ for (n = 0; n < IPIPE_NR_FAULTS; n++) { | |
+ if (ipd->legacy.handlers[n] != null_handler) { | |
+ enables |= __IPIPE_TRAP_E; | |
+ break; | |
+ } | |
+ } | |
+ | |
+ for (n = IPIPE_FIRST_EVENT; n < IPIPE_LAST_EVENT; n++) { | |
+ if (ipd->legacy.handlers[n] != null_handler) { | |
+ enables |= __IPIPE_KEVENT_E; | |
+ break; | |
+ } | |
+ } | |
+ | |
+ if (ipd->legacy.handlers[IPIPE_EVENT_SYSCALL] != null_handler) | |
+ enables |= __IPIPE_SYSCALL_E; | |
+ | |
+ ipipe_set_hooks(ipd, enables); | |
+ | |
+ return oldhandler == null_handler ? NULL : oldhandler; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_catch_event); | |
+ | |
+int ipipe_setscheduler_root(struct task_struct *p, int policy, int prio) | |
+{ | |
+ struct sched_param param = { .sched_priority = prio }; | |
+ return sched_setscheduler_nocheck(p, policy, ¶m); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_setscheduler_root); | |
+ | |
+int ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs) | |
+{ | |
+ const int event = IPIPE_EVENT_SYSCALL; | |
+ return ipipe_current_domain->legacy.handlers[event](event, ipd, regs); | |
+} | |
+ | |
+int ipipe_trap_hook(struct ipipe_trap_data *data) | |
+{ | |
+ struct ipipe_domain *ipd = ipipe_head_domain; | |
+ struct pt_regs *regs = data->regs; | |
+ int ex = data->exception; | |
+ | |
+ return ipd->legacy.handlers[ex](ex, ipd, regs); | |
+} | |
+ | |
+int ipipe_kevent_hook(int kevent, void *data) | |
+{ | |
+ unsigned int event = IPIPE_FIRST_EVENT + kevent; | |
+ struct ipipe_domain *ipd = ipipe_root_domain; | |
+ | |
+ return ipd->legacy.handlers[event](event, ipd, data); | |
+} | |
+ | |
+void __ipipe_legacy_init_stage(struct ipipe_domain *ipd) | |
+{ | |
+ int n; | |
+ | |
+ for (n = 0; n < IPIPE_NR_EVENTS; n++) | |
+ ipd->legacy.handlers[n] = null_handler; | |
+ | |
+ if (ipd == &ipipe_root) { | |
+ ipd->legacy.domid = IPIPE_ROOT_ID; | |
+ ipd->legacy.priority = IPIPE_ROOT_PRIO; | |
+ } | |
+} | |
diff --git a/kernel/ipipe/core.c b/kernel/ipipe/core.c | |
new file mode 100644 | |
index 0000000..43bd79f | |
--- /dev/null | |
+++ b/kernel/ipipe/core.c | |
@@ -0,0 +1,1800 @@ | |
+/* -*- linux-c -*- | |
+ * linux/kernel/ipipe/core.c | |
+ * | |
+ * Copyright (C) 2002-2012 Philippe Gerum. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ * | |
+ * Architecture-independent I-PIPE core support. | |
+ */ | |
+#include <linux/version.h> | |
+#include <linux/module.h> | |
+#include <linux/kernel.h> | |
+#include <linux/sched.h> | |
+#include <linux/kallsyms.h> | |
+#include <linux/bitops.h> | |
+#include <linux/tick.h> | |
+#include <linux/interrupt.h> | |
+#ifdef CONFIG_PROC_FS | |
+#include <linux/proc_fs.h> | |
+#include <linux/seq_file.h> | |
+#endif /* CONFIG_PROC_FS */ | |
+#include <linux/ipipe_trace.h> | |
+#include <linux/ipipe.h> | |
+#include <ipipe/setup.h> | |
+ | |
+struct ipipe_domain ipipe_root; | |
+EXPORT_SYMBOL_GPL(ipipe_root); | |
+ | |
+struct ipipe_domain *ipipe_head_domain = &ipipe_root; | |
+EXPORT_SYMBOL_GPL(ipipe_head_domain); | |
+ | |
+#ifdef CONFIG_SMP | |
+static __initdata struct ipipe_percpu_domain_data bootup_context = { | |
+ .status = IPIPE_STALL_MASK, | |
+ .domain = &ipipe_root, | |
+}; | |
+#else | |
+#define bootup_context ipipe_percpu.root | |
+#endif /* !CONFIG_SMP */ | |
+ | |
+DEFINE_PER_CPU(struct ipipe_percpu_data, ipipe_percpu) = { | |
+ .root = { | |
+ .status = IPIPE_STALL_MASK, | |
+ .domain = &ipipe_root, | |
+ }, | |
+ .curr = &bootup_context, | |
+ .hrtimer_irq = -1, | |
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT | |
+ .context_check = 1, | |
+#endif | |
+}; | |
+EXPORT_PER_CPU_SYMBOL(ipipe_percpu); | |
+ | |
+/* Up to 2k of pending work data per CPU. */ | |
+#define WORKBUF_SIZE 2048 | |
+static DEFINE_PER_CPU_ALIGNED(unsigned char[WORKBUF_SIZE], work_buf); | |
+static DEFINE_PER_CPU(void *, work_tail); | |
+static unsigned int __ipipe_work_virq; | |
+ | |
+static void __ipipe_do_work(unsigned int virq, void *cookie); | |
+ | |
+#ifdef CONFIG_SMP | |
+ | |
+#define IPIPE_CRITICAL_TIMEOUT 1000000 | |
+static cpumask_t __ipipe_cpu_sync_map; | |
+static cpumask_t __ipipe_cpu_lock_map; | |
+static cpumask_t __ipipe_cpu_pass_map; | |
+static unsigned long __ipipe_critical_lock; | |
+static IPIPE_DEFINE_SPINLOCK(__ipipe_cpu_barrier); | |
+static atomic_t __ipipe_critical_count = ATOMIC_INIT(0); | |
+static void (*__ipipe_cpu_sync) (void); | |
+ | |
+#else /* !CONFIG_SMP */ | |
+/* | |
+ * Create an alias to the unique root status, so that arch-dep code | |
+ * may get fast access to this percpu variable including from | |
+ * assembly. A hard-coded assumption is that root.status appears at | |
+ * offset #0 of the ipipe_percpu struct. | |
+ */ | |
+extern unsigned long __ipipe_root_status | |
+__attribute__((alias(__stringify(ipipe_percpu)))); | |
+EXPORT_SYMBOL_GPL(__ipipe_root_status); | |
+ | |
+#endif /* !CONFIG_SMP */ | |
+ | |
+IPIPE_DEFINE_SPINLOCK(__ipipe_lock); | |
+ | |
+static unsigned long __ipipe_virtual_irq_map; | |
+ | |
+#ifdef CONFIG_PRINTK | |
+unsigned int __ipipe_printk_virq; | |
+int __ipipe_printk_bypass; | |
+#endif /* CONFIG_PRINTK */ | |
+ | |
+#ifdef CONFIG_PROC_FS | |
+ | |
+struct proc_dir_entry *ipipe_proc_root; | |
+ | |
+static int __ipipe_version_info_show(struct seq_file *p, void *data) | |
+{ | |
+ seq_printf(p, "%d\n", IPIPE_CORE_RELEASE); | |
+ return 0; | |
+} | |
+ | |
+static int __ipipe_version_info_open(struct inode *inode, struct file *file) | |
+{ | |
+ return single_open(file, __ipipe_version_info_show, NULL); | |
+} | |
+ | |
+static const struct file_operations __ipipe_version_proc_ops = { | |
+ .open = __ipipe_version_info_open, | |
+ .read = seq_read, | |
+ .llseek = seq_lseek, | |
+ .release = single_release, | |
+}; | |
+ | |
+static int __ipipe_common_info_show(struct seq_file *p, void *data) | |
+{ | |
+ struct ipipe_domain *ipd = (struct ipipe_domain *)p->private; | |
+ char handling, lockbit, virtuality; | |
+ unsigned long ctlbits; | |
+ unsigned int irq; | |
+ | |
+ seq_printf(p, " +--- Handled\n"); | |
+ seq_printf(p, " |+-- Locked\n"); | |
+ seq_printf(p, " ||+- Virtual\n"); | |
+ seq_printf(p, " [IRQ] ||| Handler\n"); | |
+ | |
+ mutex_lock(&ipd->mutex); | |
+ | |
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++) { | |
+ ctlbits = ipd->irqs[irq].control; | |
+ /* | |
+ * There might be a hole between the last external IRQ | |
+ * and the first virtual one; skip it. | |
+ */ | |
+ if (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq)) | |
+ continue; | |
+ | |
+ if (ipipe_virtual_irq_p(irq) | |
+ && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)) | |
+ /* Non-allocated virtual IRQ; skip it. */ | |
+ continue; | |
+ | |
+ if (ctlbits & IPIPE_HANDLE_MASK) | |
+ handling = 'H'; | |
+ else | |
+ handling = '.'; | |
+ | |
+ if (ctlbits & IPIPE_LOCK_MASK) | |
+ lockbit = 'L'; | |
+ else | |
+ lockbit = '.'; | |
+ | |
+ if (ipipe_virtual_irq_p(irq)) | |
+ virtuality = 'V'; | |
+ else | |
+ virtuality = '.'; | |
+ | |
+ if (ctlbits & IPIPE_HANDLE_MASK) | |
+ seq_printf(p, " %4u: %c%c%c %pf\n", | |
+ irq, handling, lockbit, virtuality, | |
+ ipd->irqs[irq].handler); | |
+ else | |
+ seq_printf(p, " %4u: %c%c%c\n", | |
+ irq, handling, lockbit, virtuality); | |
+ } | |
+ | |
+ mutex_unlock(&ipd->mutex); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static int __ipipe_common_info_open(struct inode *inode, struct file *file) | |
+{ | |
+ return single_open(file, __ipipe_common_info_show, PDE_DATA(inode)); | |
+} | |
+ | |
+static const struct file_operations __ipipe_info_proc_ops = { | |
+ .owner = THIS_MODULE, | |
+ .open = __ipipe_common_info_open, | |
+ .read = seq_read, | |
+ .llseek = seq_lseek, | |
+ .release = single_release, | |
+}; | |
+ | |
+void add_domain_proc(struct ipipe_domain *ipd) | |
+{ | |
+ proc_create_data(ipd->name, 0444, ipipe_proc_root, | |
+ &__ipipe_info_proc_ops, ipd); | |
+} | |
+ | |
+void remove_domain_proc(struct ipipe_domain *ipd) | |
+{ | |
+ remove_proc_entry(ipd->name, ipipe_proc_root); | |
+} | |
+ | |
+void __init __ipipe_init_proc(void) | |
+{ | |
+ ipipe_proc_root = proc_mkdir("ipipe", NULL); | |
+ proc_create("version", 0444, ipipe_proc_root, | |
+ &__ipipe_version_proc_ops); | |
+ add_domain_proc(ipipe_root_domain); | |
+ | |
+ __ipipe_init_tracer(); | |
+} | |
+ | |
+#else | |
+ | |
+static inline void add_domain_proc(struct ipipe_domain *ipd) | |
+{ | |
+} | |
+ | |
+static inline void remove_domain_proc(struct ipipe_domain *ipd) | |
+{ | |
+} | |
+ | |
+#endif /* CONFIG_PROC_FS */ | |
+ | |
+static void init_stage(struct ipipe_domain *ipd) | |
+{ | |
+ memset(&ipd->irqs, 0, sizeof(ipd->irqs)); | |
+ mutex_init(&ipd->mutex); | |
+ __ipipe_legacy_init_stage(ipd); | |
+ __ipipe_hook_critical_ipi(ipd); | |
+} | |
+ | |
+static inline int root_context_offset(void) | |
+{ | |
+ void root_context_not_at_start_of_ipipe_percpu(void); | |
+ | |
+ /* ipipe_percpu.root must be found at offset #0. */ | |
+ | |
+ if (offsetof(struct ipipe_percpu_data, root)) | |
+ root_context_not_at_start_of_ipipe_percpu(); | |
+ | |
+ return 0; | |
+} | |
+ | |
+#ifdef CONFIG_SMP | |
+ | |
+static inline void fixup_percpu_data(void) | |
+{ | |
+ struct ipipe_percpu_data *p; | |
+ int cpu; | |
+ | |
+ /* | |
+ * ipipe_percpu.curr cannot be assigned statically to | |
+ * &ipipe_percpu.root, due to the dynamic nature of percpu | |
+ * data. So we make ipipe_percpu.curr refer to a temporary | |
+ * boot up context in static memory, until we can fixup all | |
+ * context pointers in this routine, after per-cpu areas have | |
+ * been eventually set up. The temporary context data is | |
+ * copied to per_cpu(ipipe_percpu, 0).root in the same move. | |
+ * | |
+ * Obviously, this code must run over the boot CPU, before SMP | |
+ * operations start. | |
+ */ | |
+ BUG_ON(smp_processor_id() || !irqs_disabled()); | |
+ | |
+ per_cpu(ipipe_percpu, 0).root = bootup_context; | |
+ | |
+ for_each_possible_cpu(cpu) { | |
+ p = &per_cpu(ipipe_percpu, cpu); | |
+ p->curr = &p->root; | |
+ } | |
+} | |
+ | |
+#else /* !CONFIG_SMP */ | |
+ | |
+static inline void fixup_percpu_data(void) { } | |
+ | |
+#endif /* CONFIG_SMP */ | |
+ | |
+void __init __ipipe_init_early(void) | |
+{ | |
+ struct ipipe_domain *ipd = &ipipe_root; | |
+ int cpu; | |
+ | |
+ fixup_percpu_data(); | |
+ | |
+ /* | |
+ * A lightweight registration code for the root domain. We are | |
+ * running on the boot CPU, hw interrupts are off, and | |
+ * secondary CPUs are still lost in space. | |
+ */ | |
+ ipd->name = "Linux"; | |
+ ipd->context_offset = root_context_offset(); | |
+ init_stage(ipd); | |
+ | |
+ /* | |
+ * Do the early init stuff. First we do the per-arch pipeline | |
+ * core setup, then we run the per-client setup code. At this | |
+ * point, the kernel does not provide much services yet: be | |
+ * careful. | |
+ */ | |
+ __ipipe_early_core_setup(); | |
+ __ipipe_early_client_setup(); | |
+ | |
+#ifdef CONFIG_PRINTK | |
+ __ipipe_printk_virq = ipipe_alloc_virq(); | |
+ ipd->irqs[__ipipe_printk_virq].handler = __ipipe_flush_printk; | |
+ ipd->irqs[__ipipe_printk_virq].cookie = NULL; | |
+ ipd->irqs[__ipipe_printk_virq].ackfn = NULL; | |
+ ipd->irqs[__ipipe_printk_virq].control = IPIPE_HANDLE_MASK; | |
+#endif /* CONFIG_PRINTK */ | |
+ | |
+ __ipipe_work_virq = ipipe_alloc_virq(); | |
+ ipd->irqs[__ipipe_work_virq].handler = __ipipe_do_work; | |
+ ipd->irqs[__ipipe_work_virq].cookie = NULL; | |
+ ipd->irqs[__ipipe_work_virq].ackfn = NULL; | |
+ ipd->irqs[__ipipe_work_virq].control = IPIPE_HANDLE_MASK; | |
+ | |
+ for_each_possible_cpu(cpu) | |
+ per_cpu(work_tail, cpu) = per_cpu(work_buf, cpu); | |
+} | |
+ | |
+void __init __ipipe_init(void) | |
+{ | |
+ /* Now we may engage the pipeline. */ | |
+ __ipipe_enable_pipeline(); | |
+ | |
+ pr_info("Interrupt pipeline (release #%d)\n", IPIPE_CORE_RELEASE); | |
+} | |
+ | |
+static inline void init_head_stage(struct ipipe_domain *ipd) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ int cpu; | |
+ | |
+ /* Must be set first, used in ipipe_percpu_context(). */ | |
+ ipd->context_offset = offsetof(struct ipipe_percpu_data, head); | |
+ | |
+ for_each_online_cpu(cpu) { | |
+ p = ipipe_percpu_context(ipd, cpu); | |
+ memset(p, 0, sizeof(*p)); | |
+ p->domain = ipd; | |
+ } | |
+ | |
+ init_stage(ipd); | |
+} | |
+ | |
+void ipipe_register_head(struct ipipe_domain *ipd, const char *name) | |
+{ | |
+ BUG_ON(!ipipe_root_p || ipd == &ipipe_root); | |
+ | |
+ ipd->name = name; | |
+ init_head_stage(ipd); | |
+ barrier(); | |
+ ipipe_head_domain = ipd; | |
+ add_domain_proc(ipd); | |
+ | |
+ pr_info("I-pipe: head domain %s registered.\n", name); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_register_head); | |
+ | |
+void ipipe_unregister_head(struct ipipe_domain *ipd) | |
+{ | |
+ BUG_ON(!ipipe_root_p || ipd != ipipe_head_domain); | |
+ | |
+ ipipe_head_domain = &ipipe_root; | |
+ smp_mb(); | |
+ mutex_lock(&ipd->mutex); | |
+ remove_domain_proc(ipd); | |
+ mutex_unlock(&ipd->mutex); | |
+ | |
+ pr_info("I-pipe: head domain %s unregistered.\n", ipd->name); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_unregister_head); | |
+ | |
+void ipipe_unstall_root(void) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ | |
+ hard_local_irq_disable(); | |
+ | |
+ /* This helps catching bad usage from assembly call sites. */ | |
+ ipipe_root_only(); | |
+ | |
+ p = ipipe_this_cpu_root_context(); | |
+ | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+ | |
+ if (unlikely(__ipipe_ipending_p(p))) | |
+ __ipipe_sync_stage(); | |
+ | |
+ hard_local_irq_enable(); | |
+} | |
+EXPORT_SYMBOL(ipipe_unstall_root); | |
+ | |
+void ipipe_restore_root(unsigned long x) | |
+{ | |
+ ipipe_root_only(); | |
+ | |
+ if (x) | |
+ ipipe_stall_root(); | |
+ else | |
+ ipipe_unstall_root(); | |
+} | |
+EXPORT_SYMBOL(ipipe_restore_root); | |
+ | |
+void __ipipe_restore_root_nosync(unsigned long x) | |
+{ | |
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_root_context(); | |
+ | |
+ if (raw_irqs_disabled_flags(x)) { | |
+ __set_bit(IPIPE_STALL_FLAG, &p->status); | |
+ trace_hardirqs_off(); | |
+ } else { | |
+ trace_hardirqs_on(); | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+ } | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_restore_root_nosync); | |
+ | |
+void ipipe_unstall_head(void) | |
+{ | |
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context(); | |
+ | |
+ hard_local_irq_disable(); | |
+ | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+ | |
+ if (unlikely(__ipipe_ipending_p(p))) | |
+ __ipipe_sync_pipeline(ipipe_head_domain); | |
+ | |
+ hard_local_irq_enable(); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_unstall_head); | |
+ | |
+void __ipipe_restore_head(unsigned long x) /* hw interrupt off */ | |
+{ | |
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context(); | |
+ | |
+ if (x) { | |
+#ifdef CONFIG_DEBUG_KERNEL | |
+ static int warned; | |
+ if (!warned && | |
+ __test_and_set_bit(IPIPE_STALL_FLAG, &p->status)) { | |
+ /* | |
+ * Already stalled albeit ipipe_restore_head() | |
+ * should have detected it? Send a warning once. | |
+ */ | |
+ hard_local_irq_enable(); | |
+ warned = 1; | |
+ pr_warning("I-pipe: ipipe_restore_head() " | |
+ "optimization failed.\n"); | |
+ dump_stack(); | |
+ hard_local_irq_disable(); | |
+ } | |
+#else /* !CONFIG_DEBUG_KERNEL */ | |
+ __set_bit(IPIPE_STALL_FLAG, &p->status); | |
+#endif /* CONFIG_DEBUG_KERNEL */ | |
+ } else { | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+ if (unlikely(__ipipe_ipending_p(p))) | |
+ __ipipe_sync_pipeline(ipipe_head_domain); | |
+ hard_local_irq_enable(); | |
+ } | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_restore_head); | |
+ | |
+void __ipipe_spin_lock_irq(ipipe_spinlock_t *lock) | |
+{ | |
+ hard_local_irq_disable(); | |
+ arch_spin_lock(&lock->arch_lock); | |
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_spin_lock_irq); | |
+ | |
+void __ipipe_spin_unlock_irq(ipipe_spinlock_t *lock) | |
+{ | |
+ arch_spin_unlock(&lock->arch_lock); | |
+ __clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); | |
+ hard_local_irq_enable(); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_spin_unlock_irq); | |
+ | |
+unsigned long __ipipe_spin_lock_irqsave(ipipe_spinlock_t *lock) | |
+{ | |
+ unsigned long flags; | |
+ int s; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ arch_spin_lock(&lock->arch_lock); | |
+ s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); | |
+ | |
+ return arch_mangle_irq_bits(s, flags); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_spin_lock_irqsave); | |
+ | |
+int __ipipe_spin_trylock_irqsave(ipipe_spinlock_t *lock, | |
+ unsigned long *x) | |
+{ | |
+ unsigned long flags; | |
+ int s; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ if (!arch_spin_trylock(&lock->arch_lock)) { | |
+ hard_local_irq_restore(flags); | |
+ return 0; | |
+ } | |
+ s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); | |
+ *x = arch_mangle_irq_bits(s, flags); | |
+ | |
+ return 1; | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_spin_trylock_irqsave); | |
+ | |
+void __ipipe_spin_unlock_irqrestore(ipipe_spinlock_t *lock, | |
+ unsigned long x) | |
+{ | |
+ arch_spin_unlock(&lock->arch_lock); | |
+ if (!arch_demangle_irq_bits(&x)) | |
+ __clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); | |
+ hard_local_irq_restore(x); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_spin_unlock_irqrestore); | |
+ | |
+int __ipipe_spin_trylock_irq(ipipe_spinlock_t *lock) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ if (!arch_spin_trylock(&lock->arch_lock)) { | |
+ hard_local_irq_restore(flags); | |
+ return 0; | |
+ } | |
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); | |
+ | |
+ return 1; | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_spin_trylock_irq); | |
+ | |
+void __ipipe_spin_unlock_irqbegin(ipipe_spinlock_t *lock) | |
+{ | |
+ arch_spin_unlock(&lock->arch_lock); | |
+} | |
+ | |
+void __ipipe_spin_unlock_irqcomplete(unsigned long x) | |
+{ | |
+ if (!arch_demangle_irq_bits(&x)) | |
+ __clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); | |
+ hard_local_irq_restore(x); | |
+} | |
+ | |
+#ifdef __IPIPE_3LEVEL_IRQMAP | |
+ | |
+/* Must be called hw IRQs off. */ | |
+static inline void __ipipe_set_irq_held(struct ipipe_percpu_domain_data *p, | |
+ unsigned int irq) | |
+{ | |
+ __set_bit(irq, p->irqheld_map); | |
+ p->irqall[irq]++; | |
+} | |
+ | |
+/* Must be called hw IRQs off. */ | |
+void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq) | |
+{ | |
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_context(ipd); | |
+ int l0b, l1b; | |
+ | |
+ IPIPE_WARN_ONCE(!hard_irqs_disabled()); | |
+ | |
+ l0b = irq / (BITS_PER_LONG * BITS_PER_LONG); | |
+ l1b = irq / BITS_PER_LONG; | |
+ | |
+ if (likely(!test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))) { | |
+ __set_bit(irq, p->irqpend_lomap); | |
+ __set_bit(l1b, p->irqpend_mdmap); | |
+ __set_bit(l0b, &p->irqpend_himap); | |
+ } else | |
+ __set_bit(irq, p->irqheld_map); | |
+ | |
+ p->irqall[irq]++; | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_set_irq_pending); | |
+ | |
+/* Must be called hw IRQs off. */ | |
+void __ipipe_lock_irq(unsigned int irq) | |
+{ | |
+ struct ipipe_domain *ipd = ipipe_root_domain; | |
+ struct ipipe_percpu_domain_data *p; | |
+ int l0b, l1b; | |
+ | |
+ IPIPE_WARN_ONCE(!hard_irqs_disabled()); | |
+ | |
+ /* | |
+ * Interrupts requested by a registered head domain cannot be | |
+ * locked, since this would make no sense: interrupts are | |
+ * globally masked at CPU level when the head domain is | |
+ * stalled, so there is no way we could encounter the | |
+ * situation IRQ locks are handling. | |
+ */ | |
+ if (test_and_set_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) | |
+ return; | |
+ | |
+ l0b = irq / (BITS_PER_LONG * BITS_PER_LONG); | |
+ l1b = irq / BITS_PER_LONG; | |
+ | |
+ p = ipipe_this_cpu_context(ipd); | |
+ if (__test_and_clear_bit(irq, p->irqpend_lomap)) { | |
+ __set_bit(irq, p->irqheld_map); | |
+ if (p->irqpend_lomap[l1b] == 0) { | |
+ __clear_bit(l1b, p->irqpend_mdmap); | |
+ if (p->irqpend_mdmap[l0b] == 0) | |
+ __clear_bit(l0b, &p->irqpend_himap); | |
+ } | |
+ } | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_lock_irq); | |
+ | |
+/* Must be called hw IRQs off. */ | |
+void __ipipe_unlock_irq(unsigned int irq) | |
+{ | |
+ struct ipipe_domain *ipd = ipipe_root_domain; | |
+ struct ipipe_percpu_domain_data *p; | |
+ int l0b, l1b, cpu; | |
+ | |
+ IPIPE_WARN_ONCE(!hard_irqs_disabled()); | |
+ | |
+ if (!test_and_clear_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) | |
+ return; | |
+ | |
+ l0b = irq / (BITS_PER_LONG * BITS_PER_LONG); | |
+ l1b = irq / BITS_PER_LONG; | |
+ | |
+ for_each_online_cpu(cpu) { | |
+ p = ipipe_this_cpu_root_context(); | |
+ if (test_and_clear_bit(irq, p->irqheld_map)) { | |
+ /* We need atomic ops here: */ | |
+ set_bit(irq, p->irqpend_lomap); | |
+ set_bit(l1b, p->irqpend_mdmap); | |
+ set_bit(l0b, &p->irqpend_himap); | |
+ } | |
+ } | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_unlock_irq); | |
+ | |
+static inline int __ipipe_next_irq(struct ipipe_percpu_domain_data *p) | |
+{ | |
+ int l0b, l1b, l2b; | |
+ unsigned long l0m, l1m, l2m; | |
+ unsigned int irq; | |
+ | |
+ l0m = p->irqpend_himap; | |
+ if (unlikely(l0m == 0)) | |
+ return -1; | |
+ | |
+ l0b = __ipipe_ffnz(l0m); | |
+ l1m = p->irqpend_mdmap[l0b]; | |
+ if (unlikely(l1m == 0)) | |
+ return -1; | |
+ | |
+ l1b = __ipipe_ffnz(l1m) + l0b * BITS_PER_LONG; | |
+ l2m = p->irqpend_lomap[l1b]; | |
+ if (unlikely(l2m == 0)) | |
+ return -1; | |
+ | |
+ l2b = __ipipe_ffnz(l2m); | |
+ irq = l1b * BITS_PER_LONG + l2b; | |
+ | |
+ __clear_bit(irq, p->irqpend_lomap); | |
+ if (p->irqpend_lomap[l1b] == 0) { | |
+ __clear_bit(l1b, p->irqpend_mdmap); | |
+ if (p->irqpend_mdmap[l0b] == 0) | |
+ __clear_bit(l0b, &p->irqpend_himap); | |
+ } | |
+ | |
+ return irq; | |
+} | |
+ | |
+#else /* __IPIPE_2LEVEL_IRQMAP */ | |
+ | |
+/* Must be called hw IRQs off. */ | |
+static inline void __ipipe_set_irq_held(struct ipipe_percpu_domain_data *p, | |
+ unsigned int irq) | |
+{ | |
+ __set_bit(irq, p->irqheld_map); | |
+ p->irqall[irq]++; | |
+} | |
+ | |
+/* Must be called hw IRQs off. */ | |
+void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq) | |
+{ | |
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_context(ipd); | |
+ int l0b = irq / BITS_PER_LONG; | |
+ | |
+ IPIPE_WARN_ONCE(!hard_irqs_disabled()); | |
+ | |
+ if (likely(!test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))) { | |
+ __set_bit(irq, p->irqpend_lomap); | |
+ __set_bit(l0b, &p->irqpend_himap); | |
+ } else | |
+ __set_bit(irq, p->irqheld_map); | |
+ | |
+ p->irqall[irq]++; | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_set_irq_pending); | |
+ | |
+/* Must be called hw IRQs off. */ | |
+void __ipipe_lock_irq(unsigned int irq) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ int l0b = irq / BITS_PER_LONG; | |
+ | |
+ IPIPE_WARN_ONCE(!hard_irqs_disabled()); | |
+ | |
+ if (test_and_set_bit(IPIPE_LOCK_FLAG, | |
+ &ipipe_root_domain->irqs[irq].control)) | |
+ return; | |
+ | |
+ p = ipipe_this_cpu_root_context(); | |
+ if (__test_and_clear_bit(irq, p->irqpend_lomap)) { | |
+ __set_bit(irq, p->irqheld_map); | |
+ if (p->irqpend_lomap[l0b] == 0) | |
+ __clear_bit(l0b, &p->irqpend_himap); | |
+ } | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_lock_irq); | |
+ | |
+/* Must be called hw IRQs off. */ | |
+void __ipipe_unlock_irq(unsigned int irq) | |
+{ | |
+ struct ipipe_domain *ipd = ipipe_root_domain; | |
+ struct ipipe_percpu_domain_data *p; | |
+ int l0b = irq / BITS_PER_LONG, cpu; | |
+ | |
+ IPIPE_WARN_ONCE(!hard_irqs_disabled()); | |
+ | |
+ if (!test_and_clear_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) | |
+ return; | |
+ | |
+ for_each_online_cpu(cpu) { | |
+ p = ipipe_percpu_context(ipd, cpu); | |
+ if (test_and_clear_bit(irq, p->irqheld_map)) { | |
+ /* We need atomic ops here: */ | |
+ set_bit(irq, p->irqpend_lomap); | |
+ set_bit(l0b, &p->irqpend_himap); | |
+ } | |
+ } | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_unlock_irq); | |
+ | |
+static inline int __ipipe_next_irq(struct ipipe_percpu_domain_data *p) | |
+{ | |
+ unsigned long l0m, l1m; | |
+ int l0b, l1b; | |
+ | |
+ l0m = p->irqpend_himap; | |
+ if (unlikely(l0m == 0)) | |
+ return -1; | |
+ | |
+ l0b = __ipipe_ffnz(l0m); | |
+ l1m = p->irqpend_lomap[l0b]; | |
+ if (unlikely(l1m == 0)) | |
+ return -1; | |
+ | |
+ l1b = __ipipe_ffnz(l1m); | |
+ __clear_bit(l1b, &p->irqpend_lomap[l0b]); | |
+ if (p->irqpend_lomap[l0b] == 0) | |
+ __clear_bit(l0b, &p->irqpend_himap); | |
+ | |
+ return l0b * BITS_PER_LONG + l1b; | |
+} | |
+ | |
+#endif /* __IPIPE_2LEVEL_IRQMAP */ | |
+ | |
+void __ipipe_do_sync_pipeline(struct ipipe_domain *top) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ struct ipipe_domain *ipd; | |
+ | |
+ /* We must enter over the root domain. */ | |
+ IPIPE_WARN_ONCE(__ipipe_current_domain != ipipe_root_domain); | |
+ ipd = top; | |
+next: | |
+ p = ipipe_this_cpu_context(ipd); | |
+ if (test_bit(IPIPE_STALL_FLAG, &p->status)) | |
+ return; | |
+ | |
+ if (__ipipe_ipending_p(p)) { | |
+ if (ipd == ipipe_root_domain) | |
+ __ipipe_sync_stage(); | |
+ else { | |
+ /* Switching to head. */ | |
+ p->coflags &= ~__IPIPE_ALL_R; | |
+ __ipipe_set_current_context(p); | |
+ __ipipe_sync_stage(); | |
+ __ipipe_set_current_domain(ipipe_root_domain); | |
+ } | |
+ } | |
+ | |
+ if (ipd != ipipe_root_domain) { | |
+ ipd = ipipe_root_domain; | |
+ goto next; | |
+ } | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_do_sync_pipeline); | |
+ | |
+unsigned int ipipe_alloc_virq(void) | |
+{ | |
+ unsigned long flags, irq = 0; | |
+ int ipos; | |
+ | |
+ spin_lock_irqsave(&__ipipe_lock, flags); | |
+ | |
+ if (__ipipe_virtual_irq_map != ~0) { | |
+ ipos = ffz(__ipipe_virtual_irq_map); | |
+ set_bit(ipos, &__ipipe_virtual_irq_map); | |
+ irq = ipos + IPIPE_VIRQ_BASE; | |
+ } | |
+ | |
+ spin_unlock_irqrestore(&__ipipe_lock, flags); | |
+ | |
+ return irq; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_alloc_virq); | |
+ | |
+void ipipe_free_virq(unsigned int virq) | |
+{ | |
+ clear_bit(virq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map); | |
+ smp_mb__after_clear_bit(); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_free_virq); | |
+ | |
+int ipipe_request_irq(struct ipipe_domain *ipd, | |
+ unsigned int irq, | |
+ ipipe_irq_handler_t handler, | |
+ void *cookie, | |
+ ipipe_irq_ackfn_t ackfn) | |
+{ | |
+ unsigned long flags; | |
+ int ret = 0; | |
+ | |
+#ifndef CONFIG_IPIPE_LEGACY | |
+ ipipe_root_only(); | |
+#endif /* CONFIG_IPIPE_LEGACY */ | |
+ | |
+ if (handler == NULL || | |
+ (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq))) | |
+ return -EINVAL; | |
+ | |
+ spin_lock_irqsave(&__ipipe_lock, flags); | |
+ | |
+ if (ipd->irqs[irq].handler) { | |
+ ret = -EBUSY; | |
+ goto out; | |
+ } | |
+ | |
+ if (ackfn == NULL) | |
+ ackfn = ipipe_root_domain->irqs[irq].ackfn; | |
+ | |
+ ipd->irqs[irq].handler = handler; | |
+ ipd->irqs[irq].cookie = cookie; | |
+ ipd->irqs[irq].ackfn = ackfn; | |
+ ipd->irqs[irq].control = IPIPE_HANDLE_MASK; | |
+ | |
+ if (irq < IPIPE_NR_ROOT_IRQS) | |
+ __ipipe_enable_irqdesc(ipd, irq); | |
+out: | |
+ spin_unlock_irqrestore(&__ipipe_lock, flags); | |
+ | |
+ return ret; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_request_irq); | |
+ | |
+void ipipe_free_irq(struct ipipe_domain *ipd, | |
+ unsigned int irq) | |
+{ | |
+ unsigned long flags; | |
+ | |
+#ifndef CONFIG_IPIPE_LEGACY | |
+ ipipe_root_only(); | |
+#endif /* CONFIG_IPIPE_LEGACY */ | |
+ | |
+ spin_lock_irqsave(&__ipipe_lock, flags); | |
+ | |
+ if (ipd->irqs[irq].handler == NULL) | |
+ goto out; | |
+ | |
+ ipd->irqs[irq].handler = NULL; | |
+ ipd->irqs[irq].cookie = NULL; | |
+ ipd->irqs[irq].ackfn = NULL; | |
+ ipd->irqs[irq].control = 0; | |
+ | |
+ if (irq < IPIPE_NR_ROOT_IRQS) | |
+ __ipipe_disable_irqdesc(ipd, irq); | |
+out: | |
+ spin_unlock_irqrestore(&__ipipe_lock, flags); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_free_irq); | |
+ | |
+void ipipe_set_hooks(struct ipipe_domain *ipd, int enables) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ unsigned long flags; | |
+ int cpu, wait; | |
+ | |
+ if (ipd == ipipe_root_domain) { | |
+ IPIPE_WARN(enables & __IPIPE_TRAP_E); | |
+ enables &= ~__IPIPE_TRAP_E; | |
+ } else { | |
+ IPIPE_WARN(enables & __IPIPE_KEVENT_E); | |
+ enables &= ~__IPIPE_KEVENT_E; | |
+ } | |
+ | |
+ flags = ipipe_critical_enter(NULL); | |
+ | |
+ for_each_online_cpu(cpu) { | |
+ p = ipipe_percpu_context(ipd, cpu); | |
+ p->coflags &= ~__IPIPE_ALL_E; | |
+ p->coflags |= enables; | |
+ } | |
+ | |
+ wait = (enables ^ __IPIPE_ALL_E) << __IPIPE_SHIFT_R; | |
+ if (wait == 0 || !__ipipe_root_p) { | |
+ ipipe_critical_exit(flags); | |
+ return; | |
+ } | |
+ | |
+ ipipe_this_cpu_context(ipd)->coflags &= ~wait; | |
+ | |
+ ipipe_critical_exit(flags); | |
+ | |
+ /* | |
+ * In case we cleared some hooks over the root domain, we have | |
+ * to wait for any ongoing execution to finish, since our | |
+ * caller might subsequently unmap the target domain code. | |
+ * | |
+ * We synchronize with the relevant __ipipe_notify_*() | |
+ * helpers, disabling all hooks before we start waiting for | |
+ * completion on all CPUs. | |
+ */ | |
+ for_each_online_cpu(cpu) { | |
+ while (ipipe_percpu_context(ipd, cpu)->coflags & wait) | |
+ schedule_timeout_interruptible(HZ / 50); | |
+ } | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_set_hooks); | |
+ | |
+int __weak ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+int __ipipe_notify_syscall(struct pt_regs *regs) | |
+{ | |
+ struct ipipe_domain *caller_domain, *this_domain, *ipd; | |
+ struct ipipe_percpu_domain_data *p; | |
+ unsigned long flags; | |
+ int ret = 0; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ caller_domain = this_domain = __ipipe_current_domain; | |
+ ipd = ipipe_head_domain; | |
+next: | |
+ p = ipipe_this_cpu_context(ipd); | |
+ if (likely(p->coflags & __IPIPE_SYSCALL_E)) { | |
+ __ipipe_set_current_context(p); | |
+ p->coflags |= __IPIPE_SYSCALL_R; | |
+ hard_local_irq_restore(flags); | |
+ ret = ipipe_syscall_hook(caller_domain, regs); | |
+ flags = hard_local_irq_save(); | |
+ p->coflags &= ~__IPIPE_SYSCALL_R; | |
+ if (__ipipe_current_domain != ipd) | |
+ /* Account for domain migration. */ | |
+ this_domain = __ipipe_current_domain; | |
+ else | |
+ __ipipe_set_current_domain(this_domain); | |
+ } | |
+ | |
+ if (this_domain == ipipe_root_domain && | |
+ ipd != ipipe_root_domain && ret == 0) { | |
+ ipd = ipipe_root_domain; | |
+ goto next; | |
+ } | |
+ | |
+ hard_local_irq_restore(flags); | |
+ | |
+ return ret; | |
+} | |
+ | |
+int __weak ipipe_trap_hook(struct ipipe_trap_data *data) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+int __ipipe_notify_trap(int exception, struct pt_regs *regs) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ struct ipipe_trap_data data; | |
+ unsigned long flags; | |
+ int ret = 0; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ | |
+ /* | |
+ * We send a notification about all traps raised over a | |
+ * registered head domain only. | |
+ */ | |
+ if (__ipipe_root_p) | |
+ goto out; | |
+ | |
+ p = ipipe_this_cpu_head_context(); | |
+ if (likely(p->coflags & __IPIPE_TRAP_E)) { | |
+ p->coflags |= __IPIPE_TRAP_R; | |
+ hard_local_irq_restore(flags); | |
+ data.exception = exception; | |
+ data.regs = regs; | |
+ ret = ipipe_trap_hook(&data); | |
+ flags = hard_local_irq_save(); | |
+ p->coflags &= ~__IPIPE_TRAP_R; | |
+ } | |
+out: | |
+ hard_local_irq_restore(flags); | |
+ | |
+ return ret; | |
+} | |
+ | |
+int __weak ipipe_kevent_hook(int kevent, void *data) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+int __ipipe_notify_kevent(int kevent, void *data) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ unsigned long flags; | |
+ int ret = 0; | |
+ | |
+ ipipe_root_only(); | |
+ | |
+ flags = hard_local_irq_save(); | |
+ | |
+ p = ipipe_this_cpu_root_context(); | |
+ if (likely(p->coflags & __IPIPE_KEVENT_E)) { | |
+ p->coflags |= __IPIPE_KEVENT_R; | |
+ hard_local_irq_restore(flags); | |
+ ret = ipipe_kevent_hook(kevent, data); | |
+ flags = hard_local_irq_save(); | |
+ p->coflags &= ~__IPIPE_KEVENT_R; | |
+ } | |
+ | |
+ hard_local_irq_restore(flags); | |
+ | |
+ return ret; | |
+} | |
+ | |
+void __weak ipipe_migration_hook(struct task_struct *p) | |
+{ | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE_LEGACY | |
+ | |
+static inline void complete_domain_migration(void) /* hw IRQs off */ | |
+{ | |
+ current->state &= ~TASK_HARDENING; | |
+} | |
+ | |
+#else /* !CONFIG_IPIPE_LEGACY */ | |
+ | |
+static void complete_domain_migration(void) /* hw IRQs off */ | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ struct ipipe_percpu_data *pd; | |
+ struct task_struct *t; | |
+ | |
+ ipipe_root_only(); | |
+ pd = __this_cpu_ptr(&ipipe_percpu); | |
+ t = pd->task_hijacked; | |
+ if (t == NULL) | |
+ return; | |
+ | |
+ pd->task_hijacked = NULL; | |
+ t->state &= ~TASK_HARDENING; | |
+ if (t->state != TASK_INTERRUPTIBLE) | |
+ /* Migration aborted (by signal). */ | |
+ return; | |
+ | |
+ p = ipipe_this_cpu_head_context(); | |
+ IPIPE_WARN_ONCE(test_bit(IPIPE_STALL_FLAG, &p->status)); | |
+ /* | |
+ * hw IRQs are disabled, but the completion hook assumes the | |
+ * head domain is logically stalled: fix it up. | |
+ */ | |
+ __set_bit(IPIPE_STALL_FLAG, &p->status); | |
+ ipipe_migration_hook(t); | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+ if (__ipipe_ipending_p(p)) | |
+ __ipipe_sync_pipeline(p->domain); | |
+} | |
+ | |
+#endif /* !CONFIG_IPIPE_LEGACY */ | |
+ | |
+void __ipipe_complete_domain_migration(void) | |
+{ | |
+ unsigned long flags; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ complete_domain_migration(); | |
+ hard_local_irq_restore(flags); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_complete_domain_migration); | |
+ | |
+int __ipipe_switch_tail(void) | |
+{ | |
+ int x; | |
+ | |
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ hard_local_irq_disable(); | |
+#endif | |
+ x = __ipipe_root_p; | |
+#ifndef CONFIG_IPIPE_LEGACY | |
+ if (x) | |
+#endif | |
+ complete_domain_migration(); | |
+ | |
+#ifndef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH | |
+ if (x) | |
+#endif | |
+ hard_local_irq_enable(); | |
+ | |
+ return !x; | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE_HAVE_VM_NOTIFIER | |
+void __ipipe_notify_vm_preemption(void) | |
+{ | |
+ struct ipipe_vm_notifier *vmf; | |
+ struct ipipe_percpu_data *p; | |
+ | |
+ ipipe_check_irqoff(); | |
+ p = __ipipe_this_cpu_ptr(&ipipe_percpu); | |
+ vmf = p->vm_notifier; | |
+ if (unlikely(vmf)) | |
+ vmf->handler(vmf); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_notify_vm_preemption); | |
+#endif /* CONFIG_IPIPE_HAVE_VM_NOTIFIER */ | |
+ | |
+static void dispatch_irq_head(unsigned int irq) /* hw interrupts off */ | |
+{ | |
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context(), *old; | |
+ struct ipipe_domain *head = p->domain; | |
+ | |
+ if (unlikely(test_bit(IPIPE_STALL_FLAG, &p->status))) { | |
+ __ipipe_set_irq_pending(head, irq); | |
+ return; | |
+ } | |
+ | |
+ /* Switch to the head domain if not current. */ | |
+ old = __ipipe_current_context; | |
+ if (old != p) | |
+ __ipipe_set_current_context(p); | |
+ | |
+ p->irqall[irq]++; | |
+ __set_bit(IPIPE_STALL_FLAG, &p->status); | |
+ barrier(); | |
+ head->irqs[irq].handler(irq, head->irqs[irq].cookie); | |
+ __ipipe_run_irqtail(irq); | |
+ hard_local_irq_disable(); | |
+ p = ipipe_this_cpu_head_context(); | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+ | |
+ /* Are we still running in the head domain? */ | |
+ if (likely(__ipipe_current_context == p)) { | |
+ /* Did we enter this code over the head domain? */ | |
+ if (old->domain == head) { | |
+ /* Yes, do immediate synchronization. */ | |
+ if (__ipipe_ipending_p(p)) | |
+ __ipipe_sync_stage(); | |
+ return; | |
+ } | |
+ __ipipe_set_current_context(ipipe_this_cpu_root_context()); | |
+ } | |
+ | |
+ /* | |
+ * We must be running over the root domain, synchronize | |
+ * the pipeline for high priority IRQs (slow path). | |
+ */ | |
+ __ipipe_do_sync_pipeline(head); | |
+} | |
+ | |
+void __ipipe_dispatch_irq(unsigned int irq, int flags) /* hw interrupts off */ | |
+{ | |
+ struct ipipe_domain *ipd; | |
+ struct irq_desc *desc; | |
+ unsigned long control; | |
+ int chained_irq; | |
+ | |
+ /* | |
+ * Survival kit when reading this code: | |
+ * | |
+ * - we have two main situations, leading to three cases for | |
+ * handling interrupts: | |
+ * | |
+ * a) the root domain is alone, no registered head domain | |
+ * => all interrupts are delivered via the fast dispatcher. | |
+ * b) a head domain is registered | |
+ * => head domain IRQs go through the fast dispatcher | |
+ * => root domain IRQs go through the interrupt log | |
+ * | |
+ * - when no head domain is registered, ipipe_head_domain == | |
+ * ipipe_root_domain == &ipipe_root. | |
+ * | |
+ * - the caller tells us whether we should acknowledge this | |
+ * IRQ. Even virtual IRQs may require acknowledge on some | |
+ * platforms (e.g. arm/SMP). | |
+ * | |
+ * - the caller tells us whether we may try to run the IRQ log | |
+ * syncer. Typically, demuxed IRQs won't be synced | |
+ * immediately. | |
+ * | |
+ * - multiplex IRQs most likely have a valid acknowledge | |
+ * handler and we may not be called with IPIPE_IRQF_NOACK | |
+ * for them. The ack handler for the multiplex IRQ actually | |
+ * decodes the demuxed interrupts. | |
+ */ | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG | |
+ if (unlikely(irq >= IPIPE_NR_IRQS) || | |
+ (irq < IPIPE_NR_ROOT_IRQS && irq_to_desc(irq) == NULL)) { | |
+ pr_err("I-pipe: spurious interrupt %u\n", irq); | |
+ return; | |
+ } | |
+#endif | |
+ /* | |
+ * CAUTION: on some archs, virtual IRQs may have acknowledge | |
+ * handlers. Multiplex IRQs should have one too. | |
+ */ | |
+ if (unlikely(irq >= IPIPE_NR_ROOT_IRQS)) { | |
+ desc = NULL; | |
+ chained_irq = 0; | |
+ } else { | |
+ desc = irq_to_desc(irq); | |
+ chained_irq = desc ? ipipe_chained_irq_p(desc) : 0; | |
+ } | |
+ if (flags & IPIPE_IRQF_NOACK) | |
+ IPIPE_WARN_ONCE(chained_irq); | |
+ else { | |
+ ipd = ipipe_head_domain; | |
+ control = ipd->irqs[irq].control; | |
+ if ((control & IPIPE_HANDLE_MASK) == 0) | |
+ ipd = ipipe_root_domain; | |
+ if (ipd->irqs[irq].ackfn) | |
+ ipd->irqs[irq].ackfn(irq, desc); | |
+ if (chained_irq) { | |
+ if ((flags & IPIPE_IRQF_NOSYNC) == 0) | |
+ /* Run demuxed IRQ handlers. */ | |
+ goto sync; | |
+ return; | |
+ } | |
+ } | |
+ | |
+ /* | |
+ * Sticky interrupts must be handled early and separately, so | |
+ * that we always process them on the current domain. | |
+ */ | |
+ ipd = __ipipe_current_domain; | |
+ control = ipd->irqs[irq].control; | |
+ if (control & IPIPE_STICKY_MASK) | |
+ goto log; | |
+ | |
+ /* | |
+ * In case we have no registered head domain | |
+ * (i.e. ipipe_head_domain == &ipipe_root), we always go | |
+ * through the interrupt log, and leave the dispatching work | |
+ * ultimately to __ipipe_sync_pipeline(). | |
+ */ | |
+ ipd = ipipe_head_domain; | |
+ control = ipd->irqs[irq].control; | |
+ if (ipd == ipipe_root_domain) | |
+ /* | |
+ * The root domain must handle all interrupts, so | |
+ * testing the HANDLE bit would be pointless. | |
+ */ | |
+ goto log; | |
+ | |
+ if (control & IPIPE_HANDLE_MASK) { | |
+ if (unlikely(flags & IPIPE_IRQF_NOSYNC)) | |
+ __ipipe_set_irq_pending(ipd, irq); | |
+ else | |
+ dispatch_irq_head(irq); | |
+ return; | |
+ } | |
+ | |
+ ipd = ipipe_root_domain; | |
+log: | |
+ __ipipe_set_irq_pending(ipd, irq); | |
+ | |
+ if (flags & IPIPE_IRQF_NOSYNC) | |
+ return; | |
+ | |
+ /* | |
+ * Optimize if we preempted a registered high priority head | |
+ * domain: we don't need to synchronize the pipeline unless | |
+ * there is a pending interrupt for it. | |
+ */ | |
+ if (!__ipipe_root_p && | |
+ !__ipipe_ipending_p(ipipe_this_cpu_head_context())) | |
+ return; | |
+sync: | |
+ __ipipe_sync_pipeline(ipipe_head_domain); | |
+} | |
+ | |
+#ifdef CONFIG_PREEMPT | |
+ | |
+asmlinkage void preempt_schedule_irq(void); | |
+ | |
+asmlinkage void __sched __ipipe_preempt_schedule_irq(void) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ unsigned long flags; | |
+ | |
+ BUG_ON(!hard_irqs_disabled()); | |
+ local_irq_save(flags); | |
+ hard_local_irq_enable(); | |
+ preempt_schedule_irq(); /* Ok, may reschedule now. */ | |
+ hard_local_irq_disable(); | |
+ | |
+ /* | |
+ * Flush any pending interrupt that may have been logged after | |
+ * preempt_schedule_irq() stalled the root stage before | |
+ * returning to us, and now. | |
+ */ | |
+ p = ipipe_this_cpu_root_context(); | |
+ if (unlikely(__ipipe_ipending_p(p))) { | |
+ add_preempt_count(PREEMPT_ACTIVE); | |
+ trace_hardirqs_on(); | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+ __ipipe_sync_stage(); | |
+ sub_preempt_count(PREEMPT_ACTIVE); | |
+ } | |
+ | |
+ __ipipe_restore_root_nosync(flags); | |
+} | |
+ | |
+#else /* !CONFIG_PREEMPT */ | |
+ | |
+#define __ipipe_preempt_schedule_irq() do { } while (0) | |
+ | |
+#endif /* !CONFIG_PREEMPT */ | |
+ | |
+#ifdef CONFIG_TRACE_IRQFLAGS | |
+#define root_stall_after_handler() local_irq_disable() | |
+#else | |
+#define root_stall_after_handler() do { } while (0) | |
+#endif | |
+ | |
+/* | |
+ * __ipipe_do_sync_stage() -- Flush the pending IRQs for the current | |
+ * domain (and processor). This routine flushes the interrupt log (see | |
+ * "Optimistic interrupt protection" from D. Stodolsky et al. for more | |
+ * on the deferred interrupt scheme). Every interrupt that occurred | |
+ * while the pipeline was stalled gets played. | |
+ * | |
+ * WARNING: CPU migration may occur over this routine. | |
+ */ | |
+void __ipipe_do_sync_stage(void) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ struct ipipe_domain *ipd; | |
+ int irq; | |
+ | |
+ p = __ipipe_current_context; | |
+ ipd = p->domain; | |
+ | |
+ __set_bit(IPIPE_STALL_FLAG, &p->status); | |
+ smp_wmb(); | |
+ | |
+ if (ipd == ipipe_root_domain) | |
+ trace_hardirqs_off(); | |
+ | |
+ for (;;) { | |
+ irq = __ipipe_next_irq(p); | |
+ if (irq < 0) | |
+ break; | |
+ /* | |
+ * Make sure the compiler does not reorder wrongly, so | |
+ * that all updates to maps are done before the | |
+ * handler gets called. | |
+ */ | |
+ barrier(); | |
+ | |
+ if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) | |
+ continue; | |
+ | |
+ if (ipd != ipipe_head_domain) | |
+ hard_local_irq_enable(); | |
+ | |
+ if (likely(ipd != ipipe_root_domain)) { | |
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); | |
+ __ipipe_run_irqtail(irq); | |
+ hard_local_irq_disable(); | |
+ } else if (ipipe_virtual_irq_p(irq)) { | |
+ irq_enter(); | |
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); | |
+ irq_exit(); | |
+ root_stall_after_handler(); | |
+ hard_local_irq_disable(); | |
+ while (__ipipe_check_root_resched()) | |
+ __ipipe_preempt_schedule_irq(); | |
+ } else { | |
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); | |
+ root_stall_after_handler(); | |
+ hard_local_irq_disable(); | |
+ } | |
+ | |
+ p = __ipipe_current_context; | |
+ } | |
+ | |
+ if (ipd == ipipe_root_domain) | |
+ trace_hardirqs_on(); | |
+ | |
+ __clear_bit(IPIPE_STALL_FLAG, &p->status); | |
+} | |
+ | |
+#ifdef CONFIG_SMP | |
+ | |
+/* Always called with hw interrupts off. */ | |
+void __ipipe_do_critical_sync(unsigned int irq, void *cookie) | |
+{ | |
+ int cpu = ipipe_processor_id(); | |
+ | |
+ cpu_set(cpu, __ipipe_cpu_sync_map); | |
+ | |
+ /* | |
+ * Now we are in sync with the lock requestor running on | |
+ * another CPU. Enter a spinning wait until he releases the | |
+ * global lock. | |
+ */ | |
+ spin_lock(&__ipipe_cpu_barrier); | |
+ | |
+ /* Got it. Now get out. */ | |
+ | |
+ /* Call the sync routine if any. */ | |
+ if (__ipipe_cpu_sync) | |
+ __ipipe_cpu_sync(); | |
+ | |
+ cpu_set(cpu, __ipipe_cpu_pass_map); | |
+ | |
+ spin_unlock(&__ipipe_cpu_barrier); | |
+ | |
+ cpu_clear(cpu, __ipipe_cpu_sync_map); | |
+} | |
+ | |
+#endif /* CONFIG_SMP */ | |
+ | |
+unsigned long ipipe_critical_enter(void (*syncfn)(void)) | |
+{ | |
+ cpumask_t allbutself __maybe_unused, online __maybe_unused; | |
+ int cpu __maybe_unused, n __maybe_unused; | |
+ unsigned long flags, loops __maybe_unused; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ | |
+ if (num_online_cpus() == 1) | |
+ return flags; | |
+ | |
+#ifdef CONFIG_SMP | |
+ | |
+ cpu = ipipe_processor_id(); | |
+ if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) { | |
+ while (test_and_set_bit(0, &__ipipe_critical_lock)) { | |
+ n = 0; | |
+ hard_local_irq_enable(); | |
+ | |
+ do | |
+ cpu_relax(); | |
+ while (++n < cpu); | |
+ | |
+ hard_local_irq_disable(); | |
+ } | |
+restart: | |
+ online = *cpu_online_mask; | |
+ spin_lock(&__ipipe_cpu_barrier); | |
+ | |
+ __ipipe_cpu_sync = syncfn; | |
+ | |
+ cpus_clear(__ipipe_cpu_pass_map); | |
+ cpu_set(cpu, __ipipe_cpu_pass_map); | |
+ | |
+ /* | |
+ * Send the sync IPI to all processors but the current | |
+ * one. | |
+ */ | |
+ cpus_andnot(allbutself, online, __ipipe_cpu_pass_map); | |
+ ipipe_send_ipi(IPIPE_CRITICAL_IPI, allbutself); | |
+ loops = IPIPE_CRITICAL_TIMEOUT; | |
+ | |
+ while (!cpus_equal(__ipipe_cpu_sync_map, allbutself)) { | |
+ if (--loops > 0) { | |
+ cpu_relax(); | |
+ continue; | |
+ } | |
+ /* | |
+ * We ran into a deadlock due to a contended | |
+ * rwlock. Cancel this round and retry. | |
+ */ | |
+ __ipipe_cpu_sync = NULL; | |
+ | |
+ spin_unlock(&__ipipe_cpu_barrier); | |
+ /* | |
+ * Ensure all CPUs consumed the IPI to avoid | |
+ * running __ipipe_cpu_sync prematurely. This | |
+ * usually resolves the deadlock reason too. | |
+ */ | |
+ while (!cpus_equal(online, __ipipe_cpu_pass_map)) | |
+ cpu_relax(); | |
+ | |
+ goto restart; | |
+ } | |
+ } | |
+ | |
+ atomic_inc(&__ipipe_critical_count); | |
+ | |
+#endif /* CONFIG_SMP */ | |
+ | |
+ return flags; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_critical_enter); | |
+ | |
+void ipipe_critical_exit(unsigned long flags) | |
+{ | |
+ if (num_online_cpus() == 1) { | |
+ hard_local_irq_restore(flags); | |
+ return; | |
+ } | |
+ | |
+#ifdef CONFIG_SMP | |
+ if (atomic_dec_and_test(&__ipipe_critical_count)) { | |
+ spin_unlock(&__ipipe_cpu_barrier); | |
+ while (!cpus_empty(__ipipe_cpu_sync_map)) | |
+ cpu_relax(); | |
+ cpu_clear(ipipe_processor_id(), __ipipe_cpu_lock_map); | |
+ clear_bit(0, &__ipipe_critical_lock); | |
+ smp_mb__after_clear_bit(); | |
+ } | |
+#endif /* CONFIG_SMP */ | |
+ | |
+ hard_local_irq_restore(flags); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_critical_exit); | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT | |
+ | |
+void ipipe_root_only(void) | |
+{ | |
+ struct ipipe_domain *this_domain; | |
+ unsigned long flags; | |
+ | |
+ flags = hard_smp_local_irq_save(); | |
+ | |
+ this_domain = __ipipe_current_domain; | |
+ if (likely(this_domain == ipipe_root_domain && | |
+ !test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status))) { | |
+ hard_smp_local_irq_restore(flags); | |
+ return; | |
+ } | |
+ | |
+ if (!__this_cpu_read(ipipe_percpu.context_check)) { | |
+ hard_smp_local_irq_restore(flags); | |
+ return; | |
+ } | |
+ | |
+ hard_smp_local_irq_restore(flags); | |
+ | |
+ ipipe_prepare_panic(); | |
+ ipipe_trace_panic_freeze(); | |
+ | |
+ if (this_domain != ipipe_root_domain) | |
+ pr_err("I-pipe: Detected illicit call from head domain '%s'\n" | |
+ " into a regular Linux service\n", | |
+ this_domain->name); | |
+ else | |
+ pr_err("I-pipe: Detected stalled head domain, " | |
+ "probably caused by a bug.\n" | |
+ " A critical section may have been " | |
+ "left unterminated.\n"); | |
+ dump_stack(); | |
+ ipipe_trace_panic_dump(); | |
+} | |
+EXPORT_SYMBOL(ipipe_root_only); | |
+ | |
+#endif /* CONFIG_IPIPE_DEBUG_CONTEXT */ | |
+ | |
+#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP) | |
+ | |
+int notrace __ipipe_check_percpu_access(void) | |
+{ | |
+ struct ipipe_percpu_domain_data *p; | |
+ struct ipipe_domain *this_domain; | |
+ unsigned long flags; | |
+ int ret = 0; | |
+ | |
+ flags = hard_local_irq_save_notrace(); | |
+ | |
+ /* | |
+ * Don't use __ipipe_current_domain here, this would recurse | |
+ * indefinitely. | |
+ */ | |
+ this_domain = __this_cpu_read(ipipe_percpu.curr)->domain; | |
+ | |
+ /* | |
+ * Only the root domain may implement preemptive CPU migration | |
+ * of tasks, so anything above in the pipeline should be fine. | |
+ */ | |
+ if (this_domain != ipipe_root_domain) | |
+ goto out; | |
+ | |
+ if (raw_irqs_disabled_flags(flags)) | |
+ goto out; | |
+ | |
+ /* | |
+ * Last chance: hw interrupts were enabled on entry while | |
+ * running over the root domain, but the root stage might be | |
+ * currently stalled, in which case preemption would be | |
+ * disabled, and no migration could occur. | |
+ */ | |
+ if (this_domain == ipipe_root_domain) { | |
+ p = ipipe_this_cpu_root_context(); | |
+ if (test_bit(IPIPE_STALL_FLAG, &p->status)) | |
+ goto out; | |
+ } | |
+ /* | |
+ * Our caller may end up accessing the wrong per-cpu variable | |
+ * instance due to CPU migration; tell it to complain about | |
+ * this. | |
+ */ | |
+ ret = 1; | |
+out: | |
+ hard_local_irq_restore_notrace(flags); | |
+ | |
+ return ret; | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_check_percpu_access); | |
+ | |
+void __ipipe_spin_unlock_debug(unsigned long flags) | |
+{ | |
+ /* | |
+ * We catch a nasty issue where spin_unlock_irqrestore() on a | |
+ * regular kernel spinlock is about to re-enable hw interrupts | |
+ * in a section entered with hw irqs off. This is clearly the | |
+ * sign of a massive breakage coming. Usual suspect is a | |
+ * regular spinlock which was overlooked, used within a | |
+ * section which must run with hw irqs disabled. | |
+ */ | |
+ IPIPE_WARN_ONCE(!raw_irqs_disabled_flags(flags) && hard_irqs_disabled()); | |
+} | |
+EXPORT_SYMBOL(__ipipe_spin_unlock_debug); | |
+ | |
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL && CONFIG_SMP */ | |
+ | |
+void ipipe_prepare_panic(void) | |
+{ | |
+#ifdef CONFIG_PRINTK | |
+ __ipipe_printk_bypass = 1; | |
+#endif | |
+ ipipe_context_check_off(); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_prepare_panic); | |
+ | |
+static void __ipipe_do_work(unsigned int virq, void *cookie) | |
+{ | |
+ struct ipipe_work_header *work; | |
+ unsigned long flags; | |
+ void *curr, *tail; | |
+ int cpu; | |
+ | |
+ /* | |
+ * Work is dispatched in enqueuing order. This interrupt | |
+ * context can't migrate to another CPU. | |
+ */ | |
+ cpu = smp_processor_id(); | |
+ curr = per_cpu(work_buf, cpu); | |
+ | |
+ for (;;) { | |
+ flags = hard_local_irq_save(); | |
+ tail = per_cpu(work_tail, cpu); | |
+ if (curr == tail) { | |
+ per_cpu(work_tail, cpu) = per_cpu(work_buf, cpu); | |
+ hard_local_irq_restore(flags); | |
+ return; | |
+ } | |
+ work = curr; | |
+ curr += work->size; | |
+ hard_local_irq_restore(flags); | |
+ work->handler(work); | |
+ } | |
+} | |
+ | |
+void __ipipe_post_work_root(struct ipipe_work_header *work) | |
+{ | |
+ unsigned long flags; | |
+ void *tail; | |
+ int cpu; | |
+ | |
+ /* | |
+ * Subtle: we want to use the head stall/unstall operators, | |
+ * not the hard_* routines to protect against races. This way, | |
+ * we ensure that a root-based caller will trigger the virq | |
+ * handling immediately when unstalling the head stage, as a | |
+ * result of calling __ipipe_sync_pipeline() under the hood. | |
+ */ | |
+ flags = ipipe_test_and_stall_head(); | |
+ cpu = ipipe_processor_id(); | |
+ tail = per_cpu(work_tail, cpu); | |
+ | |
+ if (WARN_ON_ONCE((unsigned char *)tail + work->size >= | |
+ per_cpu(work_buf, cpu) + WORKBUF_SIZE)) | |
+ goto out; | |
+ | |
+ /* Work handling is deferred, so data has to be copied. */ | |
+ memcpy(tail, work, work->size); | |
+ per_cpu(work_tail, cpu) = tail + work->size; | |
+ ipipe_post_irq_root(__ipipe_work_virq); | |
+out: | |
+ ipipe_restore_head(flags); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_post_work_root); | |
+ | |
+#ifdef CONFIG_KGDB | |
+bool __ipipe_probe_access; | |
+ | |
+long ipipe_probe_kernel_read(void *dst, void *src, size_t size) | |
+{ | |
+ long ret; | |
+ mm_segment_t old_fs = get_fs(); | |
+ | |
+ set_fs(KERNEL_DS); | |
+ __ipipe_probe_access = true; | |
+ barrier(); | |
+ ret = __copy_from_user_inatomic(dst, | |
+ (__force const void __user *)src, size); | |
+ barrier(); | |
+ __ipipe_probe_access = false; | |
+ set_fs(old_fs); | |
+ | |
+ return ret ? -EFAULT : 0; | |
+} | |
+ | |
+long ipipe_probe_kernel_write(void *dst, void *src, size_t size) | |
+{ | |
+ long ret; | |
+ mm_segment_t old_fs = get_fs(); | |
+ | |
+ set_fs(KERNEL_DS); | |
+ __ipipe_probe_access = true; | |
+ barrier(); | |
+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); | |
+ barrier(); | |
+ __ipipe_probe_access = false; | |
+ set_fs(old_fs); | |
+ | |
+ return ret ? -EFAULT : 0; | |
+} | |
+#endif /* CONFIG_KGDB */ | |
+ | |
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || defined(CONFIG_PROVE_LOCKING) || \ | |
+ defined(CONFIG_PREEMPT_VOLUNTARY) || defined(CONFIG_IPIPE_DEBUG_CONTEXT) | |
+void __ipipe_uaccess_might_fault(void) | |
+{ | |
+ struct ipipe_percpu_domain_data *pdd; | |
+ struct ipipe_domain *ipd; | |
+ unsigned long flags; | |
+ | |
+ flags = hard_local_irq_save(); | |
+ ipd = __ipipe_current_domain; | |
+ if (ipd == ipipe_root_domain) { | |
+ hard_local_irq_restore(flags); | |
+ might_fault(); | |
+ return; | |
+ } | |
+ | |
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT | |
+ pdd = ipipe_this_cpu_context(ipd); | |
+ WARN_ON_ONCE(hard_irqs_disabled_flags(flags) | |
+ || test_bit(IPIPE_STALL_FLAG, &pdd->status)); | |
+#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */ | |
+ (void)pdd; | |
+#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */ | |
+ hard_local_irq_restore(flags); | |
+ | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_uaccess_might_fault); | |
+#endif | |
diff --git a/kernel/ipipe/timer.c b/kernel/ipipe/timer.c | |
new file mode 100644 | |
index 0000000..578101e | |
--- /dev/null | |
+++ b/kernel/ipipe/timer.c | |
@@ -0,0 +1,493 @@ | |
+/* -*- linux-c -*- | |
+ * linux/kernel/ipipe/timer.c | |
+ * | |
+ * Copyright (C) 2012 Gilles Chanteperdrix | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ * | |
+ * I-pipe timer request interface. | |
+ */ | |
+#include <linux/ipipe.h> | |
+#include <linux/percpu.h> | |
+#include <linux/irqdesc.h> | |
+#include <linux/cpumask.h> | |
+#include <linux/spinlock.h> | |
+#include <linux/ipipe_tickdev.h> | |
+#include <linux/interrupt.h> | |
+#include <linux/export.h> | |
+ | |
+unsigned long __ipipe_hrtimer_freq; | |
+ | |
+static LIST_HEAD(timers); | |
+static IPIPE_DEFINE_SPINLOCK(lock); | |
+ | |
+static DEFINE_PER_CPU(struct ipipe_timer *, percpu_timer); | |
+ | |
+#ifdef CONFIG_GENERIC_CLOCKEVENTS | |
+/* | |
+ * Default request method: switch to oneshot mode if supported. | |
+ */ | |
+static void ipipe_timer_default_request(struct ipipe_timer *timer, int steal) | |
+{ | |
+ struct clock_event_device *evtdev = timer->host_timer; | |
+ | |
+ if (!(evtdev->features & CLOCK_EVT_FEAT_ONESHOT)) | |
+ return; | |
+ | |
+ if (evtdev->mode != CLOCK_EVT_MODE_ONESHOT) { | |
+ evtdev->set_mode(CLOCK_EVT_MODE_ONESHOT, evtdev); | |
+ evtdev->set_next_event(timer->freq / HZ, evtdev); | |
+ } | |
+} | |
+ | |
+/* | |
+ * Default release method: return the timer to the mode it had when | |
+ * starting. | |
+ */ | |
+static void ipipe_timer_default_release(struct ipipe_timer *timer) | |
+{ | |
+ struct clock_event_device *evtdev = timer->host_timer; | |
+ | |
+ evtdev->set_mode(evtdev->mode, evtdev); | |
+ if (evtdev->mode == CLOCK_EVT_MODE_ONESHOT) | |
+ evtdev->set_next_event(timer->freq / HZ, evtdev); | |
+} | |
+ | |
+void ipipe_host_timer_register(struct clock_event_device *evtdev) | |
+{ | |
+ struct ipipe_timer *timer = evtdev->ipipe_timer; | |
+ | |
+ if (timer == NULL) | |
+ return; | |
+ | |
+ if (timer->request == NULL) | |
+ timer->request = ipipe_timer_default_request; | |
+ | |
+ /* | |
+ * By default, use the same method as linux timer, on ARM at | |
+ * least, most set_next_event methods are safe to be called | |
+ * from Xenomai domain anyway. | |
+ */ | |
+ if (timer->set == NULL) { | |
+ timer->timer_set = evtdev; | |
+ timer->set = (typeof(timer->set))evtdev->set_next_event; | |
+ } | |
+ | |
+ if (timer->release == NULL) | |
+ timer->release = ipipe_timer_default_release; | |
+ | |
+ if (timer->name == NULL) | |
+ timer->name = evtdev->name; | |
+ | |
+ if (timer->rating == 0) | |
+ timer->rating = evtdev->rating; | |
+ | |
+ timer->freq = (1000000000ULL * evtdev->mult) >> evtdev->shift; | |
+ | |
+ if (timer->min_delay_ticks == 0) | |
+ timer->min_delay_ticks = | |
+ (evtdev->min_delta_ns * evtdev->mult) >> evtdev->shift; | |
+ | |
+ if (timer->cpumask == NULL) | |
+ timer->cpumask = evtdev->cpumask; | |
+ | |
+ timer->host_timer = evtdev; | |
+ | |
+ ipipe_timer_register(timer); | |
+} | |
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */ | |
+ | |
+/* | |
+ * register a timer: maintain them in a list sorted by rating | |
+ */ | |
+void ipipe_timer_register(struct ipipe_timer *timer) | |
+{ | |
+ struct ipipe_timer *t; | |
+ unsigned long flags; | |
+ | |
+ if (timer->timer_set == NULL) | |
+ timer->timer_set = timer; | |
+ | |
+ if (timer->cpumask == NULL) | |
+ timer->cpumask = cpumask_of(smp_processor_id()); | |
+ | |
+ spin_lock_irqsave(&lock, flags); | |
+ | |
+ list_for_each_entry(t, &timers, link) { | |
+ if (t->rating <= timer->rating) { | |
+ __list_add(&timer->link, t->link.prev, &t->link); | |
+ goto done; | |
+ } | |
+ } | |
+ list_add_tail(&timer->link, &timers); | |
+ done: | |
+ spin_unlock_irqrestore(&lock, flags); | |
+} | |
+ | |
+static void ipipe_timer_request_sync(void) | |
+{ | |
+ struct ipipe_timer *timer = __ipipe_this_cpu_read(percpu_timer); | |
+ struct clock_event_device *evtdev; | |
+ int steal; | |
+ | |
+ if (!timer) | |
+ return; | |
+ | |
+ evtdev = timer->host_timer; | |
+ | |
+#ifdef CONFIG_GENERIC_CLOCKEVENTS | |
+ steal = evtdev != NULL && evtdev->mode != CLOCK_EVT_MODE_UNUSED; | |
+#else /* !CONFIG_GENERIC_CLOCKEVENTS */ | |
+ steal = 1; | |
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ | |
+ | |
+ timer->request(timer, steal); | |
+} | |
+ | |
+/* Set up a timer as per-cpu timer for ipipe */ | |
+static void install_pcpu_timer(unsigned cpu, unsigned hrclock_freq, | |
+ struct ipipe_timer *t) { | |
+ unsigned hrtimer_freq; | |
+ unsigned long long tmp; | |
+ | |
+ if (__ipipe_hrtimer_freq == 0) | |
+ __ipipe_hrtimer_freq = t->freq; | |
+ | |
+ per_cpu(ipipe_percpu.hrtimer_irq, cpu) = t->irq; | |
+ per_cpu(percpu_timer, cpu) = t; | |
+ | |
+ hrtimer_freq = t->freq; | |
+ if (__ipipe_hrclock_freq > UINT_MAX) | |
+ hrtimer_freq /= 1000; | |
+ | |
+ t->c2t_integ = hrtimer_freq / hrclock_freq; | |
+ tmp = (((unsigned long long) | |
+ (hrtimer_freq % hrclock_freq)) << 32) | |
+ + hrclock_freq - 1; | |
+ do_div(tmp, hrclock_freq); | |
+ t->c2t_frac = tmp; | |
+} | |
+ | |
+static void select_root_only_timer(unsigned cpu, unsigned hrclock_khz, | |
+ const struct cpumask *mask, | |
+ struct ipipe_timer *t) { | |
+ unsigned icpu; | |
+ struct clock_event_device *evtdev; | |
+ | |
+ /* | |
+ * If no ipipe-supported CPU shares an interrupt with the | |
+ * timer, we do not need to care about it. | |
+ */ | |
+ for_each_cpu(icpu, mask) { | |
+ if (t->irq == per_cpu(ipipe_percpu.hrtimer_irq, icpu)) { | |
+#ifdef CONFIG_GENERIC_CLOCKEVENTS | |
+ evtdev = t->host_timer; | |
+ if (evtdev && evtdev->mode == CLOCK_EVT_MODE_SHUTDOWN) | |
+ continue; | |
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */ | |
+ goto found; | |
+ } | |
+ } | |
+ | |
+ return; | |
+ | |
+found: | |
+ install_pcpu_timer(cpu, hrclock_khz, t); | |
+} | |
+ | |
+/* | |
+ * Choose per-cpu timers with the highest rating by traversing the | |
+ * rating-sorted list for each CPU. | |
+ */ | |
+int ipipe_select_timers(const struct cpumask *mask) | |
+{ | |
+ unsigned hrclock_freq; | |
+ unsigned long long tmp; | |
+ struct ipipe_timer *t; | |
+ struct clock_event_device *evtdev; | |
+ unsigned long flags; | |
+ unsigned cpu; | |
+ cpumask_t fixup; | |
+ | |
+ if (!__ipipe_hrclock_ok()) { | |
+ printk("I-pipe: high-resolution clock not working\n"); | |
+ return -ENODEV; | |
+ } | |
+ | |
+ if (__ipipe_hrclock_freq > UINT_MAX) { | |
+ tmp = __ipipe_hrclock_freq; | |
+ do_div(tmp, 1000); | |
+ hrclock_freq = tmp; | |
+ } else | |
+ hrclock_freq = __ipipe_hrclock_freq; | |
+ | |
+ spin_lock_irqsave(&lock, flags); | |
+ | |
+ /* First, choose timers for the CPUs handled by ipipe */ | |
+ for_each_cpu(cpu, mask) { | |
+ list_for_each_entry(t, &timers, link) { | |
+ if (!cpumask_test_cpu(cpu, t->cpumask)) | |
+ continue; | |
+ | |
+#ifdef CONFIG_GENERIC_CLOCKEVENTS | |
+ evtdev = t->host_timer; | |
+ if (evtdev && evtdev->mode == CLOCK_EVT_MODE_SHUTDOWN) | |
+ continue; | |
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */ | |
+ goto found; | |
+ } | |
+ | |
+ printk("I-pipe: could not find timer for cpu #%d\n", | |
+ cpu); | |
+ goto err_remove_all; | |
+found: | |
+ install_pcpu_timer(cpu, hrclock_freq, t); | |
+ } | |
+ | |
+ /* | |
+ * Second, check if we need to fix up any CPUs not supported | |
+ * by ipipe (but by Linux) whose interrupt may need to be | |
+ * forwarded because they have the same IRQ as an ipipe-enabled | |
+ * timer. | |
+ */ | |
+ cpumask_andnot(&fixup, cpu_online_mask, mask); | |
+ | |
+ for_each_cpu(cpu, &fixup) { | |
+ list_for_each_entry(t, &timers, link) { | |
+ if (!cpumask_test_cpu(cpu, t->cpumask)) | |
+ continue; | |
+ | |
+ select_root_only_timer(cpu, hrclock_freq, mask, t); | |
+ } | |
+ } | |
+ | |
+ spin_unlock_irqrestore(&lock, flags); | |
+ | |
+ flags = ipipe_critical_enter(ipipe_timer_request_sync); | |
+ ipipe_timer_request_sync(); | |
+ ipipe_critical_exit(flags); | |
+ | |
+ return 0; | |
+ | |
+err_remove_all: | |
+ spin_unlock_irqrestore(&lock, flags); | |
+ | |
+ for_each_cpu(cpu, mask) { | |
+ per_cpu(ipipe_percpu.hrtimer_irq, cpu) = -1; | |
+ per_cpu(percpu_timer, cpu) = NULL; | |
+ } | |
+ __ipipe_hrtimer_freq = 0; | |
+ | |
+ return -ENODEV; | |
+} | |
+ | |
+static void ipipe_timer_release_sync(void) | |
+{ | |
+ struct ipipe_timer *timer = __ipipe_this_cpu_read(percpu_timer); | |
+ | |
+ if (timer) | |
+ timer->release(timer); | |
+} | |
+ | |
+void ipipe_timers_release(void) | |
+{ | |
+ unsigned long flags; | |
+ unsigned cpu; | |
+ | |
+ flags = ipipe_critical_enter(ipipe_timer_release_sync); | |
+ ipipe_timer_release_sync(); | |
+ ipipe_critical_exit(flags); | |
+ | |
+ for_each_online_cpu(cpu) { | |
+ per_cpu(ipipe_percpu.hrtimer_irq, cpu) = -1; | |
+ per_cpu(percpu_timer, cpu) = NULL; | |
+ __ipipe_hrtimer_freq = 0; | |
+ } | |
+} | |
+ | |
+static void __ipipe_ack_hrtimer_irq(unsigned int irq, struct irq_desc *desc) | |
+{ | |
+ struct ipipe_timer *timer = __ipipe_this_cpu_read(percpu_timer); | |
+ | |
+ if (desc) | |
+ desc->ipipe_ack(irq, desc); | |
+ if (timer->ack) | |
+ timer->ack(); | |
+ if (desc) | |
+ desc->ipipe_end(irq, desc); | |
+} | |
+ | |
+int ipipe_timer_start(void (*tick_handler)(void), | |
+ void (*emumode)(enum clock_event_mode mode, | |
+ struct clock_event_device *cdev), | |
+ int (*emutick)(unsigned long evt, | |
+ struct clock_event_device *cdev), | |
+ unsigned cpu) | |
+{ | |
+ struct clock_event_device *evtdev; | |
+ struct ipipe_timer *timer; | |
+ struct irq_desc *desc; | |
+ unsigned long flags; | |
+ int steal, ret; | |
+ | |
+ timer = per_cpu(percpu_timer, cpu); | |
+ evtdev = timer->host_timer; | |
+ | |
+ flags = ipipe_critical_enter(NULL); | |
+ | |
+ if (cpu == 0 || timer->irq != per_cpu(ipipe_percpu.hrtimer_irq, 0)) { | |
+ ret = ipipe_request_irq(ipipe_head_domain, timer->irq, | |
+ (ipipe_irq_handler_t)tick_handler, | |
+ NULL, __ipipe_ack_hrtimer_irq); | |
+ if (ret < 0) | |
+ goto done; | |
+ } | |
+ | |
+#ifdef CONFIG_GENERIC_CLOCKEVENTS | |
+ steal = evtdev != NULL && evtdev->mode != CLOCK_EVT_MODE_UNUSED; | |
+ if (steal && evtdev->ipipe_stolen == 0) { | |
+ timer->real_mult = evtdev->mult; | |
+ timer->real_shift = evtdev->shift; | |
+ timer->real_set_mode = evtdev->set_mode; | |
+ timer->real_set_next_event = evtdev->set_next_event; | |
+ evtdev->mult = 1; | |
+ evtdev->shift = 0; | |
+ evtdev->set_mode = emumode; | |
+ evtdev->set_next_event = emutick; | |
+ evtdev->ipipe_stolen = 1; | |
+ } | |
+ | |
+ ret = evtdev ? evtdev->mode : CLOCK_EVT_MODE_UNUSED; | |
+#else /* CONFIG_GENERIC_CLOCKEVENTS */ | |
+ steal = 1; | |
+ ret = 0; | |
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */ | |
+ | |
+ done: | |
+ ipipe_critical_exit(flags); | |
+ | |
+ desc = irq_to_desc(timer->irq); | |
+ if (desc && irqd_irq_disabled(&desc->irq_data)) | |
+ ipipe_enable_irq(timer->irq); | |
+ | |
+ return ret; | |
+} | |
+ | |
+void ipipe_timer_stop(unsigned cpu) | |
+{ | |
+ unsigned long __maybe_unused flags; | |
+ struct clock_event_device *evtdev; | |
+ struct ipipe_timer *timer; | |
+ struct irq_desc *desc; | |
+ | |
+ timer = per_cpu(percpu_timer, cpu); | |
+ evtdev = timer->host_timer; | |
+ | |
+ desc = irq_to_desc(timer->irq); | |
+ if (desc && irqd_irq_disabled(&desc->irq_data)) | |
+ ipipe_disable_irq(timer->irq); | |
+ | |
+#ifdef CONFIG_GENERIC_CLOCKEVENTS | |
+ if (evtdev) { | |
+ flags = ipipe_critical_enter(NULL); | |
+ | |
+ if (evtdev->ipipe_stolen) { | |
+ evtdev->mult = timer->real_mult; | |
+ evtdev->shift = timer->real_shift; | |
+ evtdev->set_mode = timer->real_set_mode; | |
+ evtdev->set_next_event = timer->real_set_next_event; | |
+ timer->real_mult = timer->real_shift = 0; | |
+ timer->real_set_mode = NULL; | |
+ timer->real_set_next_event = NULL; | |
+ evtdev->ipipe_stolen = 0; | |
+ } | |
+ | |
+ ipipe_critical_exit(flags); | |
+ } | |
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */ | |
+ | |
+ ipipe_free_irq(ipipe_head_domain, timer->irq); | |
+} | |
+ | |
+void ipipe_timer_set(unsigned long cdelay) | |
+{ | |
+ unsigned long tdelay; | |
+ struct ipipe_timer *t; | |
+ | |
+ t = __ipipe_this_cpu_read(percpu_timer); | |
+ | |
+ /* | |
+ * Even though some architectures may use a 64 bits delay | |
+ * here, we voluntarily limit to 32 bits, 4 billions ticks | |
+ * should be enough for now. Would a timer needs more, an | |
+ * extra call to the tick handler would simply occur after 4 | |
+ * billions ticks. | |
+ */ | |
+ if (cdelay > UINT_MAX) | |
+ cdelay = UINT_MAX; | |
+ | |
+ tdelay = cdelay; | |
+ if (t->c2t_integ != 1) | |
+ tdelay *= t->c2t_integ; | |
+ if (t->c2t_frac) | |
+ tdelay += ((unsigned long long)cdelay * t->c2t_frac) >> 32; | |
+ | |
+ if (tdelay < t->min_delay_ticks | |
+ || t->set(tdelay, t->timer_set) < 0) | |
+ ipipe_raise_irq(t->irq); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_timer_set); | |
+ | |
+const char *ipipe_timer_name(void) | |
+{ | |
+ return per_cpu(percpu_timer, 0)->name; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_timer_name); | |
+ | |
+unsigned ipipe_timer_ns2ticks(struct ipipe_timer *timer, unsigned ns) | |
+{ | |
+ unsigned long long tmp; | |
+ BUG_ON(!timer->freq); | |
+ tmp = (unsigned long long)ns * timer->freq; | |
+ do_div(tmp, 1000000000); | |
+ return tmp; | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT | |
+/* | |
+ * NOTE: The architecture specific code must only call this function | |
+ * when a clocksource suitable for CLOCK_HOST_REALTIME is enabled. | |
+ * The event receiver is responsible for providing proper locking. | |
+ */ | |
+void ipipe_update_hostrt(struct timekeeper *tk) | |
+{ | |
+ struct ipipe_hostrt_data data; | |
+ struct timespec xt; | |
+ | |
+ xt = tk_xtime(tk); | |
+ ipipe_root_only(); | |
+ data.live = 1; | |
+ data.cycle_last = tk->clock->cycle_last; | |
+ data.mask = tk->clock->mask; | |
+ data.mult = tk->mult; | |
+ data.shift = tk->shift; | |
+ data.wall_time_sec = xt.tv_sec; | |
+ data.wall_time_nsec = xt.tv_nsec; | |
+ data.wall_to_monotonic = tk->wall_to_monotonic; | |
+ __ipipe_notify_kevent(IPIPE_KEVT_HOSTRT, &data); | |
+} | |
+ | |
+#endif /* CONFIG_IPIPE_HAVE_HOSTRT */ | |
diff --git a/kernel/ipipe/tracer.c b/kernel/ipipe/tracer.c | |
new file mode 100644 | |
index 0000000..5cce0bc | |
--- /dev/null | |
+++ b/kernel/ipipe/tracer.c | |
@@ -0,0 +1,1447 @@ | |
+/* -*- linux-c -*- | |
+ * kernel/ipipe/tracer.c | |
+ * | |
+ * Copyright (C) 2005 Luotao Fu. | |
+ * 2005-2008 Jan Kiszka. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License as published by | |
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | |
+ * USA; either version 2 of the License, or (at your option) any later | |
+ * version. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program; if not, write to the Free Software | |
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
+ */ | |
+ | |
+#include <linux/kernel.h> | |
+#include <linux/module.h> | |
+#include <linux/version.h> | |
+#include <linux/kallsyms.h> | |
+#include <linux/seq_file.h> | |
+#include <linux/proc_fs.h> | |
+#include <linux/ctype.h> | |
+#include <linux/vmalloc.h> | |
+#include <linux/pid.h> | |
+#include <linux/vermagic.h> | |
+#include <linux/sched.h> | |
+#include <linux/ipipe.h> | |
+#include <linux/ftrace.h> | |
+#include <asm/uaccess.h> | |
+ | |
+#define IPIPE_TRACE_PATHS 4 /* <!> Do not lower below 3 */ | |
+#define IPIPE_DEFAULT_ACTIVE 0 | |
+#define IPIPE_DEFAULT_MAX 1 | |
+#define IPIPE_DEFAULT_FROZEN 2 | |
+ | |
+#define IPIPE_TRACE_POINTS (1 << CONFIG_IPIPE_TRACE_SHIFT) | |
+#define WRAP_POINT_NO(point) ((point) & (IPIPE_TRACE_POINTS-1)) | |
+ | |
+#define IPIPE_DEFAULT_PRE_TRACE 10 | |
+#define IPIPE_DEFAULT_POST_TRACE 10 | |
+#define IPIPE_DEFAULT_BACK_TRACE 100 | |
+ | |
+#define IPIPE_DELAY_NOTE 1000 /* in nanoseconds */ | |
+#define IPIPE_DELAY_WARN 10000 /* in nanoseconds */ | |
+ | |
+#define IPIPE_TFLG_NMI_LOCK 0x0001 | |
+#define IPIPE_TFLG_NMI_HIT 0x0002 | |
+#define IPIPE_TFLG_NMI_FREEZE_REQ 0x0004 | |
+ | |
+#define IPIPE_TFLG_HWIRQ_OFF 0x0100 | |
+#define IPIPE_TFLG_FREEZING 0x0200 | |
+#define IPIPE_TFLG_CURRDOM_SHIFT 10 /* bits 10..11: current domain */ | |
+#define IPIPE_TFLG_CURRDOM_MASK 0x0C00 | |
+#define IPIPE_TFLG_DOMSTATE_SHIFT 12 /* bits 12..15: domain stalled? */ | |
+#define IPIPE_TFLG_DOMSTATE_BITS 3 | |
+ | |
+#define IPIPE_TFLG_DOMAIN_STALLED(point, n) \ | |
+ (point->flags & (1 << (n + IPIPE_TFLG_DOMSTATE_SHIFT))) | |
+#define IPIPE_TFLG_CURRENT_DOMAIN(point) \ | |
+ ((point->flags & IPIPE_TFLG_CURRDOM_MASK) >> IPIPE_TFLG_CURRDOM_SHIFT) | |
+ | |
+struct ipipe_trace_point { | |
+ short type; | |
+ short flags; | |
+ unsigned long eip; | |
+ unsigned long parent_eip; | |
+ unsigned long v; | |
+ unsigned long long timestamp; | |
+}; | |
+ | |
+struct ipipe_trace_path { | |
+ volatile int flags; | |
+ int dump_lock; /* separated from flags due to cross-cpu access */ | |
+ int trace_pos; /* next point to fill */ | |
+ int begin, end; /* finalised path begin and end */ | |
+ int post_trace; /* non-zero when in post-trace phase */ | |
+ unsigned long long length; /* max path length in cycles */ | |
+ unsigned long nmi_saved_eip; /* for deferred requests from NMIs */ | |
+ unsigned long nmi_saved_parent_eip; | |
+ unsigned long nmi_saved_v; | |
+ struct ipipe_trace_point point[IPIPE_TRACE_POINTS]; | |
+} ____cacheline_aligned_in_smp; | |
+ | |
+enum ipipe_trace_type | |
+{ | |
+ IPIPE_TRACE_FUNC = 0, | |
+ IPIPE_TRACE_BEGIN, | |
+ IPIPE_TRACE_END, | |
+ IPIPE_TRACE_FREEZE, | |
+ IPIPE_TRACE_SPECIAL, | |
+ IPIPE_TRACE_PID, | |
+ IPIPE_TRACE_EVENT, | |
+}; | |
+ | |
+#define IPIPE_TYPE_MASK 0x0007 | |
+#define IPIPE_TYPE_BITS 3 | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_VMALLOC | |
+static DEFINE_PER_CPU(struct ipipe_trace_path *, trace_path); | |
+#else /* !CONFIG_IPIPE_TRACE_VMALLOC */ | |
+static DEFINE_PER_CPU(struct ipipe_trace_path, trace_path[IPIPE_TRACE_PATHS]) = | |
+ { [0 ... IPIPE_TRACE_PATHS-1] = { .begin = -1, .end = -1 } }; | |
+#endif /* CONFIG_IPIPE_TRACE_VMALLOC */ | |
+ | |
+int ipipe_trace_enable = 0; | |
+ | |
+static DEFINE_PER_CPU(int, active_path) = { IPIPE_DEFAULT_ACTIVE }; | |
+static DEFINE_PER_CPU(int, max_path) = { IPIPE_DEFAULT_MAX }; | |
+static DEFINE_PER_CPU(int, frozen_path) = { IPIPE_DEFAULT_FROZEN }; | |
+static IPIPE_DEFINE_SPINLOCK(global_path_lock); | |
+static int pre_trace = IPIPE_DEFAULT_PRE_TRACE; | |
+static int post_trace = IPIPE_DEFAULT_POST_TRACE; | |
+static int back_trace = IPIPE_DEFAULT_BACK_TRACE; | |
+static int verbose_trace = 1; | |
+static unsigned long trace_overhead; | |
+ | |
+static unsigned long trigger_begin; | |
+static unsigned long trigger_end; | |
+ | |
+static DEFINE_MUTEX(out_mutex); | |
+static struct ipipe_trace_path *print_path; | |
+#ifdef CONFIG_IPIPE_TRACE_PANIC | |
+static struct ipipe_trace_path *panic_path; | |
+#endif /* CONFIG_IPIPE_TRACE_PANIC */ | |
+static int print_pre_trace; | |
+static int print_post_trace; | |
+ | |
+ | |
+static long __ipipe_signed_tsc2us(long long tsc); | |
+static void | |
+__ipipe_trace_point_type(char *buf, struct ipipe_trace_point *point); | |
+static void __ipipe_print_symname(struct seq_file *m, unsigned long eip); | |
+ | |
+static inline void store_states(struct ipipe_domain *ipd, | |
+ struct ipipe_trace_point *point, int pos) | |
+{ | |
+ if (test_bit(IPIPE_STALL_FLAG, &ipipe_this_cpu_context(ipd)->status)) | |
+ point->flags |= 1 << (pos + IPIPE_TFLG_DOMSTATE_SHIFT); | |
+ | |
+ if (ipd == __ipipe_current_domain) | |
+ point->flags |= pos << IPIPE_TFLG_CURRDOM_SHIFT; | |
+} | |
+ | |
+static notrace void | |
+__ipipe_store_domain_states(struct ipipe_trace_point *point) | |
+{ | |
+ store_states(ipipe_root_domain, point, 0); | |
+ if (ipipe_head_domain != ipipe_root_domain) | |
+ store_states(ipipe_head_domain, point, 1); | |
+} | |
+ | |
+static notrace int __ipipe_get_free_trace_path(int old, int cpu) | |
+{ | |
+ int new_active = old; | |
+ struct ipipe_trace_path *tp; | |
+ | |
+ do { | |
+ if (++new_active == IPIPE_TRACE_PATHS) | |
+ new_active = 0; | |
+ tp = &per_cpu(trace_path, cpu)[new_active]; | |
+ } while (new_active == per_cpu(max_path, cpu) || | |
+ new_active == per_cpu(frozen_path, cpu) || | |
+ tp->dump_lock); | |
+ | |
+ return new_active; | |
+} | |
+ | |
+static notrace void | |
+__ipipe_migrate_pre_trace(struct ipipe_trace_path *new_tp, | |
+ struct ipipe_trace_path *old_tp, int old_pos) | |
+{ | |
+ int i; | |
+ | |
+ new_tp->trace_pos = pre_trace+1; | |
+ | |
+ for (i = new_tp->trace_pos; i > 0; i--) | |
+ memcpy(&new_tp->point[WRAP_POINT_NO(new_tp->trace_pos-i)], | |
+ &old_tp->point[WRAP_POINT_NO(old_pos-i)], | |
+ sizeof(struct ipipe_trace_point)); | |
+ | |
+ /* mark the end (i.e. the point before point[0]) invalid */ | |
+ new_tp->point[IPIPE_TRACE_POINTS-1].eip = 0; | |
+} | |
+ | |
+static notrace struct ipipe_trace_path * | |
+__ipipe_trace_end(int cpu, struct ipipe_trace_path *tp, int pos) | |
+{ | |
+ struct ipipe_trace_path *old_tp = tp; | |
+ long active = per_cpu(active_path, cpu); | |
+ unsigned long long length; | |
+ | |
+ /* do we have a new worst case? */ | |
+ length = tp->point[tp->end].timestamp - | |
+ tp->point[tp->begin].timestamp; | |
+ if (length > per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)].length) { | |
+ /* we need protection here against other cpus trying | |
+ to start a proc dump */ | |
+ spin_lock(&global_path_lock); | |
+ | |
+ /* active path holds new worst case */ | |
+ tp->length = length; | |
+ per_cpu(max_path, cpu) = active; | |
+ | |
+ /* find next unused trace path */ | |
+ active = __ipipe_get_free_trace_path(active, cpu); | |
+ | |
+ spin_unlock(&global_path_lock); | |
+ | |
+ tp = &per_cpu(trace_path, cpu)[active]; | |
+ | |
+ /* migrate last entries for pre-tracing */ | |
+ __ipipe_migrate_pre_trace(tp, old_tp, pos); | |
+ } | |
+ | |
+ return tp; | |
+} | |
+ | |
+static notrace struct ipipe_trace_path * | |
+__ipipe_trace_freeze(int cpu, struct ipipe_trace_path *tp, int pos) | |
+{ | |
+ struct ipipe_trace_path *old_tp = tp; | |
+ long active = per_cpu(active_path, cpu); | |
+ int n; | |
+ | |
+ /* frozen paths have no core (begin=end) */ | |
+ tp->begin = tp->end; | |
+ | |
+ /* we need protection here against other cpus trying | |
+ * to set their frozen path or to start a proc dump */ | |
+ spin_lock(&global_path_lock); | |
+ | |
+ per_cpu(frozen_path, cpu) = active; | |
+ | |
+ /* find next unused trace path */ | |
+ active = __ipipe_get_free_trace_path(active, cpu); | |
+ | |
+ /* check if this is the first frozen path */ | |
+ for_each_possible_cpu(n) { | |
+ if (n != cpu && | |
+ per_cpu(trace_path, n)[per_cpu(frozen_path, n)].end >= 0) | |
+ tp->end = -1; | |
+ } | |
+ | |
+ spin_unlock(&global_path_lock); | |
+ | |
+ tp = &per_cpu(trace_path, cpu)[active]; | |
+ | |
+ /* migrate last entries for pre-tracing */ | |
+ __ipipe_migrate_pre_trace(tp, old_tp, pos); | |
+ | |
+ return tp; | |
+} | |
+ | |
+void notrace | |
+__ipipe_trace(enum ipipe_trace_type type, unsigned long eip, | |
+ unsigned long parent_eip, unsigned long v) | |
+{ | |
+ struct ipipe_trace_path *tp, *old_tp; | |
+ int pos, next_pos, begin; | |
+ struct ipipe_trace_point *point; | |
+ unsigned long flags; | |
+ int cpu; | |
+ | |
+ flags = hard_local_irq_save_notrace(); | |
+ | |
+ cpu = ipipe_processor_id(); | |
+ restart: | |
+ tp = old_tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)]; | |
+ | |
+ /* here starts a race window with NMIs - catched below */ | |
+ | |
+ /* check for NMI recursion */ | |
+ if (unlikely(tp->flags & IPIPE_TFLG_NMI_LOCK)) { | |
+ tp->flags |= IPIPE_TFLG_NMI_HIT; | |
+ | |
+ /* first freeze request from NMI context? */ | |
+ if ((type == IPIPE_TRACE_FREEZE) && | |
+ !(tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ)) { | |
+ /* save arguments and mark deferred freezing */ | |
+ tp->flags |= IPIPE_TFLG_NMI_FREEZE_REQ; | |
+ tp->nmi_saved_eip = eip; | |
+ tp->nmi_saved_parent_eip = parent_eip; | |
+ tp->nmi_saved_v = v; | |
+ } | |
+ return; /* no need for restoring flags inside IRQ */ | |
+ } | |
+ | |
+ /* clear NMI events and set lock (atomically per cpu) */ | |
+ tp->flags = (tp->flags & ~(IPIPE_TFLG_NMI_HIT | | |
+ IPIPE_TFLG_NMI_FREEZE_REQ)) | |
+ | IPIPE_TFLG_NMI_LOCK; | |
+ | |
+ /* check active_path again - some nasty NMI may have switched | |
+ * it meanwhile */ | |
+ if (unlikely(tp != | |
+ &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)])) { | |
+ /* release lock on wrong path and restart */ | |
+ tp->flags &= ~IPIPE_TFLG_NMI_LOCK; | |
+ | |
+ /* there is no chance that the NMI got deferred | |
+ * => no need to check for pending freeze requests */ | |
+ goto restart; | |
+ } | |
+ | |
+ /* get the point buffer */ | |
+ pos = tp->trace_pos; | |
+ point = &tp->point[pos]; | |
+ | |
+ /* store all trace point data */ | |
+ point->type = type; | |
+ point->flags = hard_irqs_disabled_flags(flags) ? IPIPE_TFLG_HWIRQ_OFF : 0; | |
+ point->eip = eip; | |
+ point->parent_eip = parent_eip; | |
+ point->v = v; | |
+ ipipe_read_tsc(point->timestamp); | |
+ | |
+ __ipipe_store_domain_states(point); | |
+ | |
+ /* forward to next point buffer */ | |
+ next_pos = WRAP_POINT_NO(pos+1); | |
+ tp->trace_pos = next_pos; | |
+ | |
+ /* only mark beginning if we haven't started yet */ | |
+ begin = tp->begin; | |
+ if (unlikely(type == IPIPE_TRACE_BEGIN) && (begin < 0)) | |
+ tp->begin = pos; | |
+ | |
+ /* end of critical path, start post-trace if not already started */ | |
+ if (unlikely(type == IPIPE_TRACE_END) && | |
+ (begin >= 0) && !tp->post_trace) | |
+ tp->post_trace = post_trace + 1; | |
+ | |
+ /* freeze only if the slot is free and we are not already freezing */ | |
+ if ((unlikely(type == IPIPE_TRACE_FREEZE) || | |
+ (unlikely(eip >= trigger_begin && eip <= trigger_end) && | |
+ type == IPIPE_TRACE_FUNC)) && | |
+ per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)].begin < 0 && | |
+ !(tp->flags & IPIPE_TFLG_FREEZING)) { | |
+ tp->post_trace = post_trace + 1; | |
+ tp->flags |= IPIPE_TFLG_FREEZING; | |
+ } | |
+ | |
+ /* enforce end of trace in case of overflow */ | |
+ if (unlikely(WRAP_POINT_NO(next_pos + 1) == begin)) { | |
+ tp->end = pos; | |
+ goto enforce_end; | |
+ } | |
+ | |
+ /* stop tracing this path if we are in post-trace and | |
+ * a) that phase is over now or | |
+ * b) a new TRACE_BEGIN came in but we are not freezing this path */ | |
+ if (unlikely((tp->post_trace > 0) && ((--tp->post_trace == 0) || | |
+ ((type == IPIPE_TRACE_BEGIN) && | |
+ !(tp->flags & IPIPE_TFLG_FREEZING))))) { | |
+ /* store the path's end (i.e. excluding post-trace) */ | |
+ tp->end = WRAP_POINT_NO(pos - post_trace + tp->post_trace); | |
+ | |
+ enforce_end: | |
+ if (tp->flags & IPIPE_TFLG_FREEZING) | |
+ tp = __ipipe_trace_freeze(cpu, tp, pos); | |
+ else | |
+ tp = __ipipe_trace_end(cpu, tp, pos); | |
+ | |
+ /* reset the active path, maybe already start a new one */ | |
+ tp->begin = (type == IPIPE_TRACE_BEGIN) ? | |
+ WRAP_POINT_NO(tp->trace_pos - 1) : -1; | |
+ tp->end = -1; | |
+ tp->post_trace = 0; | |
+ tp->flags = 0; | |
+ | |
+ /* update active_path not earlier to avoid races with NMIs */ | |
+ per_cpu(active_path, cpu) = tp - per_cpu(trace_path, cpu); | |
+ } | |
+ | |
+ /* we still have old_tp and point, | |
+ * let's reset NMI lock and check for catches */ | |
+ old_tp->flags &= ~IPIPE_TFLG_NMI_LOCK; | |
+ if (unlikely(old_tp->flags & IPIPE_TFLG_NMI_HIT)) { | |
+ /* well, this late tagging may not immediately be visible for | |
+ * other cpus already dumping this path - a minor issue */ | |
+ point->flags |= IPIPE_TFLG_NMI_HIT; | |
+ | |
+ /* handle deferred freezing from NMI context */ | |
+ if (old_tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ) | |
+ __ipipe_trace(IPIPE_TRACE_FREEZE, old_tp->nmi_saved_eip, | |
+ old_tp->nmi_saved_parent_eip, | |
+ old_tp->nmi_saved_v); | |
+ } | |
+ | |
+ hard_local_irq_restore_notrace(flags); | |
+} | |
+ | |
+static unsigned long __ipipe_global_path_lock(void) | |
+{ | |
+ unsigned long flags; | |
+ int cpu; | |
+ struct ipipe_trace_path *tp; | |
+ | |
+ spin_lock_irqsave(&global_path_lock, flags); | |
+ | |
+ cpu = ipipe_processor_id(); | |
+ restart: | |
+ tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)]; | |
+ | |
+ /* here is small race window with NMIs - catched below */ | |
+ | |
+ /* clear NMI events and set lock (atomically per cpu) */ | |
+ tp->flags = (tp->flags & ~(IPIPE_TFLG_NMI_HIT | | |
+ IPIPE_TFLG_NMI_FREEZE_REQ)) | |
+ | IPIPE_TFLG_NMI_LOCK; | |
+ | |
+ /* check active_path again - some nasty NMI may have switched | |
+ * it meanwhile */ | |
+ if (tp != &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)]) { | |
+ /* release lock on wrong path and restart */ | |
+ tp->flags &= ~IPIPE_TFLG_NMI_LOCK; | |
+ | |
+ /* there is no chance that the NMI got deferred | |
+ * => no need to check for pending freeze requests */ | |
+ goto restart; | |
+ } | |
+ | |
+ return flags; | |
+} | |
+ | |
+static void __ipipe_global_path_unlock(unsigned long flags) | |
+{ | |
+ int cpu; | |
+ struct ipipe_trace_path *tp; | |
+ | |
+ /* release spinlock first - it's not involved in the NMI issue */ | |
+ __ipipe_spin_unlock_irqbegin(&global_path_lock); | |
+ | |
+ cpu = ipipe_processor_id(); | |
+ tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)]; | |
+ | |
+ tp->flags &= ~IPIPE_TFLG_NMI_LOCK; | |
+ | |
+ /* handle deferred freezing from NMI context */ | |
+ if (tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ) | |
+ __ipipe_trace(IPIPE_TRACE_FREEZE, tp->nmi_saved_eip, | |
+ tp->nmi_saved_parent_eip, tp->nmi_saved_v); | |
+ | |
+ /* See __ipipe_spin_lock_irqsave() and friends. */ | |
+ __ipipe_spin_unlock_irqcomplete(flags); | |
+} | |
+ | |
+void notrace asmlinkage | |
+ipipe_trace_asm(enum ipipe_trace_type type, unsigned long eip, | |
+ unsigned long parent_eip, unsigned long v) | |
+{ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ __ipipe_trace(type, eip, parent_eip, v); | |
+} | |
+ | |
+void notrace ipipe_trace_begin(unsigned long v) | |
+{ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ __ipipe_trace(IPIPE_TRACE_BEGIN, __BUILTIN_RETURN_ADDRESS0, | |
+ __BUILTIN_RETURN_ADDRESS1, v); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_begin); | |
+ | |
+void notrace ipipe_trace_end(unsigned long v) | |
+{ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ __ipipe_trace(IPIPE_TRACE_END, __BUILTIN_RETURN_ADDRESS0, | |
+ __BUILTIN_RETURN_ADDRESS1, v); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_end); | |
+ | |
+void notrace ipipe_trace_freeze(unsigned long v) | |
+{ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ __ipipe_trace(IPIPE_TRACE_FREEZE, __BUILTIN_RETURN_ADDRESS0, | |
+ __BUILTIN_RETURN_ADDRESS1, v); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_freeze); | |
+ | |
+void notrace ipipe_trace_special(unsigned char id, unsigned long v) | |
+{ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ __ipipe_trace(IPIPE_TRACE_SPECIAL | (id << IPIPE_TYPE_BITS), | |
+ __BUILTIN_RETURN_ADDRESS0, | |
+ __BUILTIN_RETURN_ADDRESS1, v); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_special); | |
+ | |
+void notrace ipipe_trace_pid(pid_t pid, short prio) | |
+{ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ __ipipe_trace(IPIPE_TRACE_PID | (prio << IPIPE_TYPE_BITS), | |
+ __BUILTIN_RETURN_ADDRESS0, | |
+ __BUILTIN_RETURN_ADDRESS1, pid); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_pid); | |
+ | |
+void notrace ipipe_trace_event(unsigned char id, unsigned long delay_tsc) | |
+{ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ __ipipe_trace(IPIPE_TRACE_EVENT | (id << IPIPE_TYPE_BITS), | |
+ __BUILTIN_RETURN_ADDRESS0, | |
+ __BUILTIN_RETURN_ADDRESS1, delay_tsc); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_event); | |
+ | |
+int ipipe_trace_max_reset(void) | |
+{ | |
+ int cpu; | |
+ unsigned long flags; | |
+ struct ipipe_trace_path *path; | |
+ int ret = 0; | |
+ | |
+ flags = __ipipe_global_path_lock(); | |
+ | |
+ for_each_possible_cpu(cpu) { | |
+ path = &per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)]; | |
+ | |
+ if (path->dump_lock) { | |
+ ret = -EBUSY; | |
+ break; | |
+ } | |
+ | |
+ path->begin = -1; | |
+ path->end = -1; | |
+ path->trace_pos = 0; | |
+ path->length = 0; | |
+ } | |
+ | |
+ __ipipe_global_path_unlock(flags); | |
+ | |
+ return ret; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_max_reset); | |
+ | |
+int ipipe_trace_frozen_reset(void) | |
+{ | |
+ int cpu; | |
+ unsigned long flags; | |
+ struct ipipe_trace_path *path; | |
+ int ret = 0; | |
+ | |
+ flags = __ipipe_global_path_lock(); | |
+ | |
+ for_each_online_cpu(cpu) { | |
+ path = &per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)]; | |
+ | |
+ if (path->dump_lock) { | |
+ ret = -EBUSY; | |
+ break; | |
+ } | |
+ | |
+ path->begin = -1; | |
+ path->end = -1; | |
+ path->trace_pos = 0; | |
+ path->length = 0; | |
+ } | |
+ | |
+ __ipipe_global_path_unlock(flags); | |
+ | |
+ return ret; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_frozen_reset); | |
+ | |
+static void | |
+__ipipe_get_task_info(char *task_info, struct ipipe_trace_point *point, | |
+ int trylock) | |
+{ | |
+ struct task_struct *task = NULL; | |
+ char buf[8]; | |
+ int i; | |
+ int locked = 1; | |
+ | |
+ if (trylock) { | |
+ if (!read_trylock(&tasklist_lock)) | |
+ locked = 0; | |
+ } else | |
+ read_lock(&tasklist_lock); | |
+ | |
+ if (locked) | |
+ task = find_task_by_pid_ns((pid_t)point->v, &init_pid_ns); | |
+ | |
+ if (task) | |
+ strncpy(task_info, task->comm, 11); | |
+ else | |
+ strcpy(task_info, "-<?>-"); | |
+ | |
+ if (locked) | |
+ read_unlock(&tasklist_lock); | |
+ | |
+ for (i = strlen(task_info); i < 11; i++) | |
+ task_info[i] = ' '; | |
+ | |
+ sprintf(buf, " %d ", point->type >> IPIPE_TYPE_BITS); | |
+ strcpy(task_info + (11 - strlen(buf)), buf); | |
+} | |
+ | |
+static void | |
+__ipipe_get_event_date(char *buf,struct ipipe_trace_path *path, | |
+ struct ipipe_trace_point *point) | |
+{ | |
+ long time; | |
+ int type; | |
+ | |
+ time = __ipipe_signed_tsc2us(point->timestamp - | |
+ path->point[path->begin].timestamp + point->v); | |
+ type = point->type >> IPIPE_TYPE_BITS; | |
+ | |
+ if (type == 0) | |
+ /* | |
+ * Event type #0 is predefined, stands for the next | |
+ * timer tick. | |
+ */ | |
+ sprintf(buf, "tick@%-6ld", time); | |
+ else | |
+ sprintf(buf, "%3d@%-7ld", type, time); | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_PANIC | |
+ | |
+void ipipe_trace_panic_freeze(void) | |
+{ | |
+ unsigned long flags; | |
+ int cpu; | |
+ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ | |
+ ipipe_trace_enable = 0; | |
+ flags = hard_local_irq_save_notrace(); | |
+ | |
+ cpu = ipipe_processor_id(); | |
+ | |
+ panic_path = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)]; | |
+ | |
+ hard_local_irq_restore(flags); | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_panic_freeze); | |
+ | |
+void ipipe_trace_panic_dump(void) | |
+{ | |
+ int cnt = back_trace; | |
+ int start, pos; | |
+ char buf[16]; | |
+ | |
+ if (!panic_path) | |
+ return; | |
+ | |
+ ipipe_context_check_off(); | |
+ | |
+ printk("I-pipe tracer log (%d points):\n", cnt); | |
+ | |
+ start = pos = WRAP_POINT_NO(panic_path->trace_pos-1); | |
+ | |
+ while (cnt-- > 0) { | |
+ struct ipipe_trace_point *point = &panic_path->point[pos]; | |
+ long time; | |
+ char info[16]; | |
+ int i; | |
+ | |
+ printk(" %c", | |
+ (point->flags & IPIPE_TFLG_HWIRQ_OFF) ? '|' : ' '); | |
+ | |
+ for (i = IPIPE_TFLG_DOMSTATE_BITS; i >= 0; i--) | |
+ printk("%c", | |
+ (IPIPE_TFLG_CURRENT_DOMAIN(point) == i) ? | |
+ (IPIPE_TFLG_DOMAIN_STALLED(point, i) ? | |
+ '#' : '+') : | |
+ (IPIPE_TFLG_DOMAIN_STALLED(point, i) ? | |
+ '*' : ' ')); | |
+ | |
+ if (!point->eip) | |
+ printk("-<invalid>-\n"); | |
+ else { | |
+ __ipipe_trace_point_type(buf, point); | |
+ printk("%s", buf); | |
+ | |
+ switch (point->type & IPIPE_TYPE_MASK) { | |
+ case IPIPE_TRACE_FUNC: | |
+ printk(" "); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_PID: | |
+ __ipipe_get_task_info(info, | |
+ point, 1); | |
+ printk("%s", info); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_EVENT: | |
+ __ipipe_get_event_date(info, | |
+ panic_path, point); | |
+ printk("%s", info); | |
+ break; | |
+ | |
+ default: | |
+ printk("0x%08lx ", point->v); | |
+ } | |
+ | |
+ time = __ipipe_signed_tsc2us(point->timestamp - | |
+ panic_path->point[start].timestamp); | |
+ printk(" %5ld ", time); | |
+ | |
+ __ipipe_print_symname(NULL, point->eip); | |
+ printk(" ("); | |
+ __ipipe_print_symname(NULL, point->parent_eip); | |
+ printk(")\n"); | |
+ } | |
+ pos = WRAP_POINT_NO(pos - 1); | |
+ } | |
+ | |
+ panic_path = NULL; | |
+} | |
+EXPORT_SYMBOL_GPL(ipipe_trace_panic_dump); | |
+ | |
+#endif /* CONFIG_IPIPE_TRACE_PANIC */ | |
+ | |
+ | |
+/* --- /proc output --- */ | |
+ | |
+static notrace int __ipipe_in_critical_trpath(long point_no) | |
+{ | |
+ return ((WRAP_POINT_NO(point_no-print_path->begin) < | |
+ WRAP_POINT_NO(print_path->end-print_path->begin)) || | |
+ ((print_path->end == print_path->begin) && | |
+ (WRAP_POINT_NO(point_no-print_path->end) > | |
+ print_post_trace))); | |
+} | |
+ | |
+static long __ipipe_signed_tsc2us(long long tsc) | |
+{ | |
+ unsigned long long abs_tsc; | |
+ long us; | |
+ | |
+ /* ipipe_tsc2us works on unsigned => handle sign separately */ | |
+ abs_tsc = (tsc >= 0) ? tsc : -tsc; | |
+ us = ipipe_tsc2us(abs_tsc); | |
+ if (tsc < 0) | |
+ return -us; | |
+ else | |
+ return us; | |
+} | |
+ | |
+static void | |
+__ipipe_trace_point_type(char *buf, struct ipipe_trace_point *point) | |
+{ | |
+ switch (point->type & IPIPE_TYPE_MASK) { | |
+ case IPIPE_TRACE_FUNC: | |
+ strcpy(buf, "func "); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_BEGIN: | |
+ strcpy(buf, "begin "); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_END: | |
+ strcpy(buf, "end "); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_FREEZE: | |
+ strcpy(buf, "freeze "); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_SPECIAL: | |
+ sprintf(buf, "(0x%02x) ", | |
+ point->type >> IPIPE_TYPE_BITS); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_PID: | |
+ sprintf(buf, "[%5d] ", (pid_t)point->v); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_EVENT: | |
+ sprintf(buf, "event "); | |
+ break; | |
+ } | |
+} | |
+ | |
+static void | |
+__ipipe_print_pathmark(struct seq_file *m, struct ipipe_trace_point *point) | |
+{ | |
+ char mark = ' '; | |
+ int point_no = point - print_path->point; | |
+ int i; | |
+ | |
+ if (print_path->end == point_no) | |
+ mark = '<'; | |
+ else if (print_path->begin == point_no) | |
+ mark = '>'; | |
+ else if (__ipipe_in_critical_trpath(point_no)) | |
+ mark = ':'; | |
+ seq_printf(m, "%c%c", mark, | |
+ (point->flags & IPIPE_TFLG_HWIRQ_OFF) ? '|' : ' '); | |
+ | |
+ if (!verbose_trace) | |
+ return; | |
+ | |
+ for (i = IPIPE_TFLG_DOMSTATE_BITS; i >= 0; i--) | |
+ seq_printf(m, "%c", | |
+ (IPIPE_TFLG_CURRENT_DOMAIN(point) == i) ? | |
+ (IPIPE_TFLG_DOMAIN_STALLED(point, i) ? | |
+ '#' : '+') : | |
+ (IPIPE_TFLG_DOMAIN_STALLED(point, i) ? '*' : ' ')); | |
+} | |
+ | |
+static void | |
+__ipipe_print_delay(struct seq_file *m, struct ipipe_trace_point *point) | |
+{ | |
+ unsigned long delay = 0; | |
+ int next; | |
+ char *mark = " "; | |
+ | |
+ next = WRAP_POINT_NO(point+1 - print_path->point); | |
+ | |
+ if (next != print_path->trace_pos) | |
+ delay = ipipe_tsc2ns(print_path->point[next].timestamp - | |
+ point->timestamp); | |
+ | |
+ if (__ipipe_in_critical_trpath(point - print_path->point)) { | |
+ if (delay > IPIPE_DELAY_WARN) | |
+ mark = "! "; | |
+ else if (delay > IPIPE_DELAY_NOTE) | |
+ mark = "+ "; | |
+ } | |
+ seq_puts(m, mark); | |
+ | |
+ if (verbose_trace) | |
+ seq_printf(m, "%3lu.%03lu%c ", delay/1000, delay%1000, | |
+ (point->flags & IPIPE_TFLG_NMI_HIT) ? 'N' : ' '); | |
+ else | |
+ seq_puts(m, " "); | |
+} | |
+ | |
+static void __ipipe_print_symname(struct seq_file *m, unsigned long eip) | |
+{ | |
+ char namebuf[KSYM_NAME_LEN+1]; | |
+ unsigned long size, offset; | |
+ const char *sym_name; | |
+ char *modname; | |
+ | |
+ sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf); | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_PANIC | |
+ if (!m) { | |
+ /* panic dump */ | |
+ if (sym_name) { | |
+ printk("%s+0x%lx", sym_name, offset); | |
+ if (modname) | |
+ printk(" [%s]", modname); | |
+ } else | |
+ printk("<%08lx>", eip); | |
+ } else | |
+#endif /* CONFIG_IPIPE_TRACE_PANIC */ | |
+ { | |
+ if (sym_name) { | |
+ if (verbose_trace) { | |
+ seq_printf(m, "%s+0x%lx", sym_name, offset); | |
+ if (modname) | |
+ seq_printf(m, " [%s]", modname); | |
+ } else | |
+ seq_puts(m, sym_name); | |
+ } else | |
+ seq_printf(m, "<%08lx>", eip); | |
+ } | |
+} | |
+ | |
+static void __ipipe_print_headline(struct seq_file *m) | |
+{ | |
+ const char *name[2]; | |
+ | |
+ seq_printf(m, "Calibrated minimum trace-point overhead: %lu.%03lu " | |
+ "us\n\n", trace_overhead/1000, trace_overhead%1000); | |
+ | |
+ if (verbose_trace) { | |
+ name[0] = ipipe_root_domain->name; | |
+ if (ipipe_head_domain != ipipe_root_domain) | |
+ name[1] = ipipe_head_domain->name; | |
+ else | |
+ name[1] = "<unused>"; | |
+ | |
+ seq_printf(m, | |
+ " +----- Hard IRQs ('|': locked)\n" | |
+ " |+-- %s\n" | |
+ " ||+- %s%s\n" | |
+ " ||| +---------- " | |
+ "Delay flag ('+': > %d us, '!': > %d us)\n" | |
+ " ||| | +- " | |
+ "NMI noise ('N')\n" | |
+ " ||| | |\n" | |
+ " Type User Val. Time Delay Function " | |
+ "(Parent)\n", | |
+ name[1], name[0], | |
+ " ('*': domain stalled, '+': current, " | |
+ "'#': current+stalled)", | |
+ IPIPE_DELAY_NOTE/1000, IPIPE_DELAY_WARN/1000); | |
+ } else | |
+ seq_printf(m, | |
+ " +--------------- Hard IRQs ('|': locked)\n" | |
+ " | +- Delay flag " | |
+ "('+': > %d us, '!': > %d us)\n" | |
+ " | |\n" | |
+ " Type Time Function (Parent)\n", | |
+ IPIPE_DELAY_NOTE/1000, IPIPE_DELAY_WARN/1000); | |
+} | |
+ | |
+static void *__ipipe_max_prtrace_start(struct seq_file *m, loff_t *pos) | |
+{ | |
+ loff_t n = *pos; | |
+ | |
+ mutex_lock(&out_mutex); | |
+ | |
+ if (!n) { | |
+ struct ipipe_trace_path *tp; | |
+ unsigned long length_usecs; | |
+ int points, cpu; | |
+ unsigned long flags; | |
+ | |
+ /* protect against max_path/frozen_path updates while we | |
+ * haven't locked our target path, also avoid recursively | |
+ * taking global_path_lock from NMI context */ | |
+ flags = __ipipe_global_path_lock(); | |
+ | |
+ /* find the longest of all per-cpu paths */ | |
+ print_path = NULL; | |
+ for_each_online_cpu(cpu) { | |
+ tp = &per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)]; | |
+ if ((print_path == NULL) || | |
+ (tp->length > print_path->length)) { | |
+ print_path = tp; | |
+ break; | |
+ } | |
+ } | |
+ print_path->dump_lock = 1; | |
+ | |
+ __ipipe_global_path_unlock(flags); | |
+ | |
+ /* does this path actually contain data? */ | |
+ if (print_path->end == print_path->begin) | |
+ return NULL; | |
+ | |
+ /* number of points inside the critical path */ | |
+ points = WRAP_POINT_NO(print_path->end-print_path->begin+1); | |
+ | |
+ /* pre- and post-tracing length, post-trace length was frozen | |
+ in __ipipe_trace, pre-trace may have to be reduced due to | |
+ buffer overrun */ | |
+ print_pre_trace = pre_trace; | |
+ print_post_trace = WRAP_POINT_NO(print_path->trace_pos - | |
+ print_path->end - 1); | |
+ if (points+pre_trace+print_post_trace > IPIPE_TRACE_POINTS - 1) | |
+ print_pre_trace = IPIPE_TRACE_POINTS - 1 - points - | |
+ print_post_trace; | |
+ | |
+ length_usecs = ipipe_tsc2us(print_path->length); | |
+ seq_printf(m, "I-pipe worst-case tracing service on %s/ipipe release #%d\n" | |
+ "-------------------------------------------------------------\n", | |
+ UTS_RELEASE, IPIPE_CORE_RELEASE); | |
+ seq_printf(m, "CPU: %d, Begin: %lld cycles, Trace Points: " | |
+ "%d (-%d/+%d), Length: %lu us\n", | |
+ cpu, print_path->point[print_path->begin].timestamp, | |
+ points, print_pre_trace, print_post_trace, length_usecs); | |
+ __ipipe_print_headline(m); | |
+ } | |
+ | |
+ /* check if we are inside the trace range */ | |
+ if (n >= WRAP_POINT_NO(print_path->end - print_path->begin + 1 + | |
+ print_pre_trace + print_post_trace)) | |
+ return NULL; | |
+ | |
+ /* return the next point to be shown */ | |
+ return &print_path->point[WRAP_POINT_NO(print_path->begin - | |
+ print_pre_trace + n)]; | |
+} | |
+ | |
+static void *__ipipe_prtrace_next(struct seq_file *m, void *p, loff_t *pos) | |
+{ | |
+ loff_t n = ++*pos; | |
+ | |
+ /* check if we are inside the trace range with the next entry */ | |
+ if (n >= WRAP_POINT_NO(print_path->end - print_path->begin + 1 + | |
+ print_pre_trace + print_post_trace)) | |
+ return NULL; | |
+ | |
+ /* return the next point to be shown */ | |
+ return &print_path->point[WRAP_POINT_NO(print_path->begin - | |
+ print_pre_trace + *pos)]; | |
+} | |
+ | |
+static void __ipipe_prtrace_stop(struct seq_file *m, void *p) | |
+{ | |
+ if (print_path) | |
+ print_path->dump_lock = 0; | |
+ mutex_unlock(&out_mutex); | |
+} | |
+ | |
+static int __ipipe_prtrace_show(struct seq_file *m, void *p) | |
+{ | |
+ long time; | |
+ struct ipipe_trace_point *point = p; | |
+ char buf[16]; | |
+ | |
+ if (!point->eip) { | |
+ seq_puts(m, "-<invalid>-\n"); | |
+ return 0; | |
+ } | |
+ | |
+ __ipipe_print_pathmark(m, point); | |
+ __ipipe_trace_point_type(buf, point); | |
+ seq_puts(m, buf); | |
+ if (verbose_trace) | |
+ switch (point->type & IPIPE_TYPE_MASK) { | |
+ case IPIPE_TRACE_FUNC: | |
+ seq_puts(m, " "); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_PID: | |
+ __ipipe_get_task_info(buf, point, 0); | |
+ seq_puts(m, buf); | |
+ break; | |
+ | |
+ case IPIPE_TRACE_EVENT: | |
+ __ipipe_get_event_date(buf, print_path, point); | |
+ seq_puts(m, buf); | |
+ break; | |
+ | |
+ default: | |
+ seq_printf(m, "0x%08lx ", point->v); | |
+ } | |
+ | |
+ time = __ipipe_signed_tsc2us(point->timestamp - | |
+ print_path->point[print_path->begin].timestamp); | |
+ seq_printf(m, "%5ld", time); | |
+ | |
+ __ipipe_print_delay(m, point); | |
+ __ipipe_print_symname(m, point->eip); | |
+ seq_puts(m, " ("); | |
+ __ipipe_print_symname(m, point->parent_eip); | |
+ seq_puts(m, ")\n"); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static struct seq_operations __ipipe_max_ptrace_ops = { | |
+ .start = __ipipe_max_prtrace_start, | |
+ .next = __ipipe_prtrace_next, | |
+ .stop = __ipipe_prtrace_stop, | |
+ .show = __ipipe_prtrace_show | |
+}; | |
+ | |
+static int __ipipe_max_prtrace_open(struct inode *inode, struct file *file) | |
+{ | |
+ return seq_open(file, &__ipipe_max_ptrace_ops); | |
+} | |
+ | |
+static ssize_t | |
+__ipipe_max_reset(struct file *file, const char __user *pbuffer, | |
+ size_t count, loff_t *data) | |
+{ | |
+ mutex_lock(&out_mutex); | |
+ ipipe_trace_max_reset(); | |
+ mutex_unlock(&out_mutex); | |
+ | |
+ return count; | |
+} | |
+ | |
+static const struct file_operations __ipipe_max_prtrace_fops = { | |
+ .open = __ipipe_max_prtrace_open, | |
+ .read = seq_read, | |
+ .write = __ipipe_max_reset, | |
+ .llseek = seq_lseek, | |
+ .release = seq_release, | |
+}; | |
+ | |
+static void *__ipipe_frozen_prtrace_start(struct seq_file *m, loff_t *pos) | |
+{ | |
+ loff_t n = *pos; | |
+ | |
+ mutex_lock(&out_mutex); | |
+ | |
+ if (!n) { | |
+ struct ipipe_trace_path *tp; | |
+ int cpu; | |
+ unsigned long flags; | |
+ | |
+ /* protect against max_path/frozen_path updates while we | |
+ * haven't locked our target path, also avoid recursively | |
+ * taking global_path_lock from NMI context */ | |
+ flags = __ipipe_global_path_lock(); | |
+ | |
+ /* find the first of all per-cpu frozen paths */ | |
+ print_path = NULL; | |
+ for_each_online_cpu(cpu) { | |
+ tp = &per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)]; | |
+ if (tp->end >= 0) { | |
+ print_path = tp; | |
+ break; | |
+ } | |
+ } | |
+ if (print_path) | |
+ print_path->dump_lock = 1; | |
+ | |
+ __ipipe_global_path_unlock(flags); | |
+ | |
+ if (!print_path) | |
+ return NULL; | |
+ | |
+ /* back- and post-tracing length, post-trace length was frozen | |
+ in __ipipe_trace, back-trace may have to be reduced due to | |
+ buffer overrun */ | |
+ print_pre_trace = back_trace-1; /* substract freeze point */ | |
+ print_post_trace = WRAP_POINT_NO(print_path->trace_pos - | |
+ print_path->end - 1); | |
+ if (1+pre_trace+print_post_trace > IPIPE_TRACE_POINTS - 1) | |
+ print_pre_trace = IPIPE_TRACE_POINTS - 2 - | |
+ print_post_trace; | |
+ | |
+ seq_printf(m, "I-pipe frozen back-tracing service on %s/ipipe release #%d\n" | |
+ "------------------------------------------------------------\n", | |
+ UTS_RELEASE, IPIPE_CORE_RELEASE); | |
+ seq_printf(m, "CPU: %d, Freeze: %lld cycles, Trace Points: %d (+%d)\n", | |
+ cpu, print_path->point[print_path->begin].timestamp, | |
+ print_pre_trace+1, print_post_trace); | |
+ __ipipe_print_headline(m); | |
+ } | |
+ | |
+ /* check if we are inside the trace range */ | |
+ if (n >= print_pre_trace + 1 + print_post_trace) | |
+ return NULL; | |
+ | |
+ /* return the next point to be shown */ | |
+ return &print_path->point[WRAP_POINT_NO(print_path->begin- | |
+ print_pre_trace+n)]; | |
+} | |
+ | |
+static struct seq_operations __ipipe_frozen_ptrace_ops = { | |
+ .start = __ipipe_frozen_prtrace_start, | |
+ .next = __ipipe_prtrace_next, | |
+ .stop = __ipipe_prtrace_stop, | |
+ .show = __ipipe_prtrace_show | |
+}; | |
+ | |
+static int __ipipe_frozen_prtrace_open(struct inode *inode, struct file *file) | |
+{ | |
+ return seq_open(file, &__ipipe_frozen_ptrace_ops); | |
+} | |
+ | |
+static ssize_t | |
+__ipipe_frozen_ctrl(struct file *file, const char __user *pbuffer, | |
+ size_t count, loff_t *data) | |
+{ | |
+ char *end, buf[16]; | |
+ int val; | |
+ int n; | |
+ | |
+ n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count; | |
+ | |
+ if (copy_from_user(buf, pbuffer, n)) | |
+ return -EFAULT; | |
+ | |
+ buf[n] = '\0'; | |
+ val = simple_strtol(buf, &end, 0); | |
+ | |
+ if (((*end != '\0') && !isspace(*end)) || (val < 0)) | |
+ return -EINVAL; | |
+ | |
+ mutex_lock(&out_mutex); | |
+ ipipe_trace_frozen_reset(); | |
+ if (val > 0) | |
+ ipipe_trace_freeze(-1); | |
+ mutex_unlock(&out_mutex); | |
+ | |
+ return count; | |
+} | |
+ | |
+static const struct file_operations __ipipe_frozen_prtrace_fops = { | |
+ .open = __ipipe_frozen_prtrace_open, | |
+ .read = seq_read, | |
+ .write = __ipipe_frozen_ctrl, | |
+ .llseek = seq_lseek, | |
+ .release = seq_release, | |
+}; | |
+ | |
+static int __ipipe_rd_proc_val(struct seq_file *p, void *data) | |
+{ | |
+ seq_printf(p, "%u\n", *(int *)p->private); | |
+ return 0; | |
+} | |
+ | |
+static ssize_t | |
+__ipipe_wr_proc_val(struct file *file, const char __user *buffer, | |
+ size_t count, loff_t *data) | |
+{ | |
+ struct seq_file *p = file->private_data; | |
+ char *end, buf[16]; | |
+ int val; | |
+ int n; | |
+ | |
+ n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count; | |
+ | |
+ if (copy_from_user(buf, buffer, n)) | |
+ return -EFAULT; | |
+ | |
+ buf[n] = '\0'; | |
+ val = simple_strtol(buf, &end, 0); | |
+ | |
+ if (((*end != '\0') && !isspace(*end)) || (val < 0)) | |
+ return -EINVAL; | |
+ | |
+ mutex_lock(&out_mutex); | |
+ *(int *)p->private = val; | |
+ mutex_unlock(&out_mutex); | |
+ | |
+ return count; | |
+} | |
+ | |
+static int __ipipe_rw_proc_val_open(struct inode *inode, struct file *file) | |
+{ | |
+ return single_open(file, __ipipe_rd_proc_val, PDE_DATA(inode)); | |
+} | |
+ | |
+static const struct file_operations __ipipe_rw_proc_val_ops = { | |
+ .open = __ipipe_rw_proc_val_open, | |
+ .read = seq_read, | |
+ .write = __ipipe_wr_proc_val, | |
+ .llseek = seq_lseek, | |
+ .release = single_release, | |
+}; | |
+ | |
+static void __init | |
+__ipipe_create_trace_proc_val(struct proc_dir_entry *trace_dir, | |
+ const char *name, int *value_ptr) | |
+{ | |
+ proc_create_data(name, 0644, trace_dir, &__ipipe_rw_proc_val_ops, | |
+ value_ptr); | |
+} | |
+ | |
+static int __ipipe_rd_trigger(struct seq_file *p, void *data) | |
+{ | |
+ char str[KSYM_SYMBOL_LEN]; | |
+ | |
+ if (trigger_begin) { | |
+ sprint_symbol(str, trigger_begin); | |
+ seq_printf(p, "%s\n", str); | |
+ } | |
+ return 0; | |
+} | |
+ | |
+static ssize_t | |
+__ipipe_wr_trigger(struct file *file, const char __user *buffer, | |
+ size_t count, loff_t *data) | |
+{ | |
+ char buf[KSYM_SYMBOL_LEN]; | |
+ unsigned long begin, end; | |
+ | |
+ if (count > sizeof(buf) - 1) | |
+ count = sizeof(buf) - 1; | |
+ if (copy_from_user(buf, buffer, count)) | |
+ return -EFAULT; | |
+ buf[count] = 0; | |
+ if (buf[count-1] == '\n') | |
+ buf[count-1] = 0; | |
+ | |
+ begin = kallsyms_lookup_name(buf); | |
+ if (!begin || !kallsyms_lookup_size_offset(begin, &end, NULL)) | |
+ return -ENOENT; | |
+ end += begin - 1; | |
+ | |
+ mutex_lock(&out_mutex); | |
+ /* invalidate the current range before setting a new one */ | |
+ trigger_end = 0; | |
+ wmb(); | |
+ ipipe_trace_frozen_reset(); | |
+ | |
+ /* set new range */ | |
+ trigger_begin = begin; | |
+ wmb(); | |
+ trigger_end = end; | |
+ mutex_unlock(&out_mutex); | |
+ | |
+ return count; | |
+} | |
+ | |
+static int __ipipe_rw_trigger_open(struct inode *inode, struct file *file) | |
+{ | |
+ return single_open(file, __ipipe_rd_trigger, NULL); | |
+} | |
+ | |
+static const struct file_operations __ipipe_rw_trigger_ops = { | |
+ .open = __ipipe_rw_trigger_open, | |
+ .read = seq_read, | |
+ .write = __ipipe_wr_trigger, | |
+ .llseek = seq_lseek, | |
+ .release = single_release, | |
+}; | |
+ | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT | |
+static void notrace | |
+ipipe_trace_function(unsigned long ip, unsigned long parent_ip, | |
+ struct ftrace_ops *op, struct pt_regs *regs) | |
+{ | |
+ if (!ipipe_trace_enable) | |
+ return; | |
+ __ipipe_trace(IPIPE_TRACE_FUNC, ip, parent_ip, 0); | |
+} | |
+ | |
+static struct ftrace_ops ipipe_trace_ops = { | |
+ .func = ipipe_trace_function, | |
+ .flags = FTRACE_OPS_FL_IPIPE_EXCLUSIVE, | |
+}; | |
+ | |
+static ssize_t __ipipe_wr_enable(struct file *file, const char __user *buffer, | |
+ size_t count, loff_t *data) | |
+{ | |
+ char *end, buf[16]; | |
+ int val; | |
+ int n; | |
+ | |
+ n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count; | |
+ | |
+ if (copy_from_user(buf, buffer, n)) | |
+ return -EFAULT; | |
+ | |
+ buf[n] = '\0'; | |
+ val = simple_strtol(buf, &end, 0); | |
+ | |
+ if (((*end != '\0') && !isspace(*end)) || (val < 0)) | |
+ return -EINVAL; | |
+ | |
+ mutex_lock(&out_mutex); | |
+ | |
+ if (ipipe_trace_enable) { | |
+ if (!val) | |
+ unregister_ftrace_function(&ipipe_trace_ops); | |
+ } else if (val) | |
+ register_ftrace_function(&ipipe_trace_ops); | |
+ | |
+ ipipe_trace_enable = val; | |
+ | |
+ mutex_unlock(&out_mutex); | |
+ | |
+ return count; | |
+} | |
+ | |
+static const struct file_operations __ipipe_rw_enable_ops = { | |
+ .open = __ipipe_rw_proc_val_open, | |
+ .read = seq_read, | |
+ .write = __ipipe_wr_enable, | |
+ .llseek = seq_lseek, | |
+ .release = single_release, | |
+}; | |
+#endif /* CONFIG_IPIPE_TRACE_MCOUNT */ | |
+ | |
+extern struct proc_dir_entry *ipipe_proc_root; | |
+ | |
+void __init __ipipe_init_tracer(void) | |
+{ | |
+ struct proc_dir_entry *trace_dir; | |
+ unsigned long long start, end, min = ULLONG_MAX; | |
+ int i; | |
+#ifdef CONFIG_IPIPE_TRACE_VMALLOC | |
+ int cpu, path; | |
+#endif /* CONFIG_IPIPE_TRACE_VMALLOC */ | |
+ | |
+ if (!__ipipe_hrclock_ok()) | |
+ return; | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_VMALLOC | |
+ for_each_possible_cpu(cpu) { | |
+ struct ipipe_trace_path *tp_buf; | |
+ | |
+ tp_buf = vmalloc_node(sizeof(struct ipipe_trace_path) * | |
+ IPIPE_TRACE_PATHS, cpu_to_node(cpu)); | |
+ if (!tp_buf) { | |
+ pr_err("I-pipe: " | |
+ "insufficient memory for trace buffer.\n"); | |
+ return; | |
+ } | |
+ memset(tp_buf, 0, | |
+ sizeof(struct ipipe_trace_path) * IPIPE_TRACE_PATHS); | |
+ for (path = 0; path < IPIPE_TRACE_PATHS; path++) { | |
+ tp_buf[path].begin = -1; | |
+ tp_buf[path].end = -1; | |
+ } | |
+ per_cpu(trace_path, cpu) = tp_buf; | |
+ } | |
+#endif /* CONFIG_IPIPE_TRACE_VMALLOC */ | |
+ | |
+ /* Calculate minimum overhead of __ipipe_trace() */ | |
+ hard_local_irq_disable(); | |
+ for (i = 0; i < 100; i++) { | |
+ ipipe_read_tsc(start); | |
+ __ipipe_trace(IPIPE_TRACE_FUNC, __BUILTIN_RETURN_ADDRESS0, | |
+ __BUILTIN_RETURN_ADDRESS1, 0); | |
+ ipipe_read_tsc(end); | |
+ | |
+ end -= start; | |
+ if (end < min) | |
+ min = end; | |
+ } | |
+ hard_local_irq_enable(); | |
+ trace_overhead = ipipe_tsc2ns(min); | |
+ | |
+#ifdef CONFIG_IPIPE_TRACE_ENABLE | |
+ ipipe_trace_enable = 1; | |
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT | |
+ ftrace_enabled = 1; | |
+ register_ftrace_function(&ipipe_trace_ops); | |
+#endif /* CONFIG_IPIPE_TRACE_MCOUNT */ | |
+#endif /* CONFIG_IPIPE_TRACE_ENABLE */ | |
+ | |
+ trace_dir = proc_mkdir("trace", ipipe_proc_root); | |
+ | |
+ proc_create("max", 0644, trace_dir, &__ipipe_max_prtrace_fops); | |
+ proc_create("frozen", 0644, trace_dir, &__ipipe_frozen_prtrace_fops); | |
+ | |
+ proc_create("trigger", 0644, trace_dir, &__ipipe_rw_trigger_ops); | |
+ | |
+ __ipipe_create_trace_proc_val(trace_dir, "pre_trace_points", | |
+ &pre_trace); | |
+ __ipipe_create_trace_proc_val(trace_dir, "post_trace_points", | |
+ &post_trace); | |
+ __ipipe_create_trace_proc_val(trace_dir, "back_trace_points", | |
+ &back_trace); | |
+ __ipipe_create_trace_proc_val(trace_dir, "verbose", | |
+ &verbose_trace); | |
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT | |
+ proc_create_data("enable", 0644, trace_dir, &__ipipe_rw_enable_ops, | |
+ &ipipe_trace_enable); | |
+#else /* !CONFIG_IPIPE_TRACE_MCOUNT */ | |
+ __ipipe_create_trace_proc_val(trace_dir, "enable", | |
+ &ipipe_trace_enable); | |
+#endif /* !CONFIG_IPIPE_TRACE_MCOUNT */ | |
+} | |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c | |
index cbd97ce..f0f8718 100644 | |
--- a/kernel/irq/chip.c | |
+++ b/kernel/irq/chip.c | |
@@ -15,6 +15,7 @@ | |
#include <linux/module.h> | |
#include <linux/interrupt.h> | |
#include <linux/kernel_stat.h> | |
+#include <linux/ipipe.h> | |
#include <trace/events/irq.h> | |
@@ -180,8 +181,10 @@ int irq_startup(struct irq_desc *desc, bool resend) | |
desc->depth = 0; | |
if (desc->irq_data.chip->irq_startup) { | |
+ unsigned long flags = hard_cond_local_irq_save(); | |
ret = desc->irq_data.chip->irq_startup(&desc->irq_data); | |
irq_state_clr_masked(desc); | |
+ hard_cond_local_irq_restore(flags); | |
} else { | |
irq_enable(desc); | |
} | |
@@ -205,12 +208,14 @@ void irq_shutdown(struct irq_desc *desc) | |
void irq_enable(struct irq_desc *desc) | |
{ | |
+ unsigned long flags = hard_cond_local_irq_save(); | |
irq_state_clr_disabled(desc); | |
if (desc->irq_data.chip->irq_enable) | |
desc->irq_data.chip->irq_enable(&desc->irq_data); | |
else | |
desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
irq_state_clr_masked(desc); | |
+ hard_cond_local_irq_restore(flags); | |
} | |
void irq_disable(struct irq_desc *desc) | |
@@ -224,11 +229,13 @@ void irq_disable(struct irq_desc *desc) | |
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) | |
{ | |
+ unsigned long flags = hard_cond_local_irq_save(); | |
if (desc->irq_data.chip->irq_enable) | |
desc->irq_data.chip->irq_enable(&desc->irq_data); | |
else | |
desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
cpumask_set_cpu(cpu, desc->percpu_enabled); | |
+ hard_cond_local_irq_restore(flags); | |
} | |
void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
@@ -262,9 +269,13 @@ void mask_irq(struct irq_desc *desc) | |
void unmask_irq(struct irq_desc *desc) | |
{ | |
+ unsigned long flags; | |
+ | |
if (desc->irq_data.chip->irq_unmask) { | |
+ flags = hard_cond_local_irq_save(); | |
desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
irq_state_clr_masked(desc); | |
+ hard_cond_local_irq_restore(flags); | |
} | |
} | |
@@ -385,7 +396,9 @@ void | |
handle_level_irq(unsigned int irq, struct irq_desc *desc) | |
{ | |
raw_spin_lock(&desc->lock); | |
+#ifndef CONFIG_IPIPE | |
mask_ack_irq(desc); | |
+#endif | |
if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | |
if (!irq_check_poll(desc)) | |
@@ -422,6 +435,15 @@ static inline void preflow_handler(struct irq_desc *desc) | |
static inline void preflow_handler(struct irq_desc *desc) { } | |
#endif | |
+#ifdef CONFIG_IPIPE | |
+static void cond_release_fasteoi_irq(struct irq_desc *desc) | |
+{ | |
+ if (desc->irq_data.chip->irq_release && | |
+ !irqd_irq_disabled(&desc->irq_data) && !desc->threads_oneshot) | |
+ desc->irq_data.chip->irq_release(&desc->irq_data); | |
+} | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
/** | |
* handle_fasteoi_irq - irq handler for transparent controllers | |
* @irq: the interrupt number | |
@@ -454,17 +476,26 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |
goto out; | |
} | |
+#ifndef CONFIG_IPIPE | |
if (desc->istate & IRQS_ONESHOT) | |
mask_irq(desc); | |
+#endif | |
preflow_handler(desc); | |
handle_irq_event(desc); | |
+#ifdef CONFIG_IPIPE | |
+ /* IRQCHIP_EOI_IF_HANDLED is ignored as I-pipe ends it early on | |
+ * acceptance. */ | |
+ cond_release_fasteoi_irq(desc); | |
+out_eoi: | |
+#else /* !CONFIG_IPIPE */ | |
if (desc->istate & IRQS_ONESHOT) | |
cond_unmask_irq(desc); | |
out_eoi: | |
desc->irq_data.chip->irq_eoi(&desc->irq_data); | |
+#endif /* !CONFIG_IPIPE */ | |
out_unlock: | |
raw_spin_unlock(&desc->lock); | |
return; | |
@@ -512,7 +543,9 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |
kstat_incr_irqs_this_cpu(irq, desc); | |
/* Start handling the irq */ | |
+#ifndef CONFIG_IPIPE | |
desc->irq_data.chip->irq_ack(&desc->irq_data); | |
+#endif | |
do { | |
if (unlikely(!desc->action)) { | |
@@ -600,6 +633,18 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |
kstat_incr_irqs_this_cpu(irq, desc); | |
+#ifdef CONFIG_IPIPE | |
+ (void)chip; | |
+ | |
+ handle_irq_event_percpu(desc, desc->action); | |
+ | |
+ if ((desc->percpu_enabled == NULL || | |
+ cpumask_test_cpu(smp_processor_id(), desc->percpu_enabled)) && | |
+ !irqd_irq_masked(&desc->irq_data) && | |
+ !desc->threads_oneshot && | |
+ desc->ipipe_end) | |
+ desc->ipipe_end(desc->irq_data.irq, desc); | |
+#else | |
if (chip->irq_ack) | |
chip->irq_ack(&desc->irq_data); | |
@@ -607,6 +652,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |
if (chip->irq_eoi) | |
chip->irq_eoi(&desc->irq_data); | |
+#endif | |
} | |
/** | |
@@ -630,17 +676,144 @@ void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | |
kstat_incr_irqs_this_cpu(irq, desc); | |
+#ifndef CONFIG_IPIPE | |
if (chip->irq_ack) | |
chip->irq_ack(&desc->irq_data); | |
+#else | |
+ (void)chip; | |
+#endif | |
trace_irq_handler_entry(irq, action); | |
res = action->handler(irq, dev_id); | |
trace_irq_handler_exit(irq, action, res); | |
+#ifndef CONFIG_IPIPE | |
if (chip->irq_eoi) | |
chip->irq_eoi(&desc->irq_data); | |
+#else | |
+ if ((desc->percpu_enabled == NULL || | |
+ cpumask_test_cpu(smp_processor_id(), desc->percpu_enabled)) && | |
+ !irqd_irq_masked(&desc->irq_data) && | |
+ !desc->threads_oneshot && | |
+ desc->ipipe_end) | |
+ desc->ipipe_end(desc->irq_data.irq, desc); | |
+#endif | |
+} | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+void __ipipe_ack_level_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ mask_ack_irq(desc); | |
+} | |
+ | |
+void __ipipe_end_level_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
+} | |
+ | |
+void __ipipe_ack_fasteoi_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ desc->irq_data.chip->irq_hold(&desc->irq_data); | |
} | |
+void __ipipe_end_fasteoi_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ if (desc->irq_data.chip->irq_release) | |
+ desc->irq_data.chip->irq_release(&desc->irq_data); | |
+} | |
+ | |
+void __ipipe_ack_edge_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ desc->irq_data.chip->irq_ack(&desc->irq_data); | |
+} | |
+ | |
+void __ipipe_ack_percpu_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ if (desc->irq_data.chip->irq_ack) | |
+ desc->irq_data.chip->irq_ack(&desc->irq_data); | |
+ | |
+ if (desc->irq_data.chip->irq_eoi) | |
+ desc->irq_data.chip->irq_eoi(&desc->irq_data); | |
+} | |
+ | |
+void __ipipe_nop_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+} | |
+ | |
+void __ipipe_chained_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ /* | |
+ * XXX: Do NOT fold this into __ipipe_nop_irq(), see | |
+ * ipipe_chained_irq_p(). | |
+ */ | |
+} | |
+ | |
+static void __ipipe_ack_bad_irq(unsigned irq, struct irq_desc *desc) | |
+{ | |
+ handle_bad_irq(irq, desc); | |
+ WARN_ON_ONCE(1); | |
+} | |
+ | |
+irq_flow_handler_t | |
+__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained) | |
+{ | |
+ if (unlikely(handle == NULL)) { | |
+ desc->ipipe_ack = __ipipe_ack_bad_irq; | |
+ desc->ipipe_end = __ipipe_nop_irq; | |
+ } else { | |
+ if (is_chained) { | |
+ desc->ipipe_ack = handle; | |
+ desc->ipipe_end = __ipipe_nop_irq; | |
+ handle = __ipipe_chained_irq; | |
+ } else if (handle == handle_simple_irq) { | |
+ desc->ipipe_ack = __ipipe_nop_irq; | |
+ desc->ipipe_end = __ipipe_nop_irq; | |
+ } else if (handle == handle_level_irq) { | |
+ desc->ipipe_ack = __ipipe_ack_level_irq; | |
+ desc->ipipe_end = __ipipe_end_level_irq; | |
+ } else if (handle == handle_edge_irq) { | |
+ desc->ipipe_ack = __ipipe_ack_edge_irq; | |
+ desc->ipipe_end = __ipipe_nop_irq; | |
+ } else if (handle == handle_fasteoi_irq) { | |
+ desc->ipipe_ack = __ipipe_ack_fasteoi_irq; | |
+ desc->ipipe_end = __ipipe_end_fasteoi_irq; | |
+ } else if (handle == handle_percpu_irq || | |
+ handle == handle_percpu_devid_irq) { | |
+ if (irq_desc_get_chip(desc) && | |
+ irq_desc_get_chip(desc)->irq_hold) { | |
+ desc->ipipe_ack = __ipipe_ack_fasteoi_irq; | |
+ desc->ipipe_end = __ipipe_end_fasteoi_irq; | |
+ } else { | |
+ desc->ipipe_ack = __ipipe_ack_percpu_irq; | |
+ desc->ipipe_end = __ipipe_nop_irq; | |
+ } | |
+ } else if (irq_desc_get_chip(desc) == &no_irq_chip) { | |
+ desc->ipipe_ack = __ipipe_nop_irq; | |
+ desc->ipipe_end = __ipipe_nop_irq; | |
+ } else { | |
+ desc->ipipe_ack = __ipipe_ack_bad_irq; | |
+ desc->ipipe_end = __ipipe_nop_irq; | |
+ } | |
+ } | |
+ | |
+ /* Suppress intermediate trampoline routine. */ | |
+ ipipe_root_domain->irqs[desc->irq_data.irq].ackfn = desc->ipipe_ack; | |
+ | |
+ return handle; | |
+} | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+irq_flow_handler_t | |
+__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained) | |
+{ | |
+ return handle; | |
+} | |
+ | |
+#endif /* !CONFIG_IPIPE */ | |
+EXPORT_SYMBOL_GPL(__fixup_irq_handler); | |
+ | |
void | |
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |
const char *name) | |
@@ -658,6 +831,8 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |
goto out; | |
} | |
+ handle = __fixup_irq_handler(desc, handle, is_chained); | |
+ | |
/* Uninstall? */ | |
if (handle == handle_bad_irq) { | |
if (desc->irq_data.chip != &no_irq_chip) | |
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c | |
index c89295a..3c308ca 100644 | |
--- a/kernel/irq/generic-chip.c | |
+++ b/kernel/irq/generic-chip.c | |
@@ -40,11 +40,12 @@ void irq_gc_mask_disable_reg(struct irq_data *d) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = 1 << (d->irq - gc->irq_base); | |
+ unsigned long flags; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable); | |
gc->mask_cache &= ~mask; | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
} | |
/** | |
@@ -58,11 +59,12 @@ void irq_gc_mask_set_bit(struct irq_data *d) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = 1 << (d->irq - gc->irq_base); | |
+ unsigned long flags; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
gc->mask_cache |= mask; | |
irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
} | |
/** | |
@@ -76,11 +78,12 @@ void irq_gc_mask_clr_bit(struct irq_data *d) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = 1 << (d->irq - gc->irq_base); | |
+ unsigned long flags; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
gc->mask_cache &= ~mask; | |
irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
} | |
/** | |
@@ -94,11 +97,12 @@ void irq_gc_unmask_enable_reg(struct irq_data *d) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = 1 << (d->irq - gc->irq_base); | |
+ unsigned long flags; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable); | |
gc->mask_cache |= mask; | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
} | |
/** | |
@@ -109,10 +113,11 @@ void irq_gc_ack_set_bit(struct irq_data *d) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = 1 << (d->irq - gc->irq_base); | |
+ unsigned long flags; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
} | |
/** | |
@@ -123,10 +128,11 @@ void irq_gc_ack_clr_bit(struct irq_data *d) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = ~(1 << (d->irq - gc->irq_base)); | |
+ unsigned long flags; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
} | |
/** | |
@@ -137,11 +143,12 @@ void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = 1 << (d->irq - gc->irq_base); | |
+ unsigned long flags; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask); | |
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
} | |
/** | |
@@ -152,10 +159,11 @@ void irq_gc_eoi(struct irq_data *d) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = 1 << (d->irq - gc->irq_base); | |
+ unsigned long flags; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi); | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
} | |
/** | |
@@ -170,16 +178,17 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on) | |
{ | |
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | |
u32 mask = 1 << (d->irq - gc->irq_base); | |
+ unsigned long flags; | |
if (!(mask & gc->wake_enabled)) | |
return -EINVAL; | |
- irq_gc_lock(gc); | |
+ flags = irq_gc_lock(gc); | |
if (on) | |
gc->wake_active |= mask; | |
else | |
gc->wake_active &= ~mask; | |
- irq_gc_unlock(gc); | |
+ irq_gc_unlock(gc, flags); | |
return 0; | |
} | |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c | |
index 192a302..7e9ab4de1f 100644 | |
--- a/kernel/irq/irqdesc.c | |
+++ b/kernel/irq/irqdesc.c | |
@@ -270,10 +270,12 @@ int __init early_irq_init(void) | |
return arch_early_irq_init(); | |
} | |
+#ifndef CONFIG_IPIPE | |
struct irq_desc *irq_to_desc(unsigned int irq) | |
{ | |
return (irq < NR_IRQS) ? irq_desc + irq : NULL; | |
} | |
+#endif /* CONFIG_IPIPE */ | |
static void free_desc(unsigned int irq) | |
{ | |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c | |
index dc4db32..f61872e 100644 | |
--- a/kernel/irq/manage.c | |
+++ b/kernel/irq/manage.c | |
@@ -716,9 +716,14 @@ again: | |
desc->threads_oneshot &= ~action->thread_mask; | |
+#ifndef CONFIG_IPIPE | |
if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | |
irqd_irq_masked(&desc->irq_data)) | |
unmask_irq(desc); | |
+#else /* CONFIG_IPIPE */ | |
+ if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data)) | |
+ desc->ipipe_end(desc->irq_data.irq, desc); | |
+#endif /* CONFIG_IPIPE */ | |
out_unlock: | |
raw_spin_unlock_irq(&desc->lock); | |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c | |
index 1f3186b..5b710c8 100644 | |
--- a/kernel/lockdep.c | |
+++ b/kernel/lockdep.c | |
@@ -2577,7 +2577,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |
* already enabled, yet we find the hardware thinks they are in fact | |
* enabled.. someone messed up their IRQ state tracing. | |
*/ | |
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | |
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled())) | |
return; | |
/* | |
@@ -2621,7 +2621,7 @@ void trace_hardirqs_off_caller(unsigned long ip) | |
* So we're supposed to get called after you mask local IRQs, but for | |
* some reason the hardware doesn't quite think you did a proper job. | |
*/ | |
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | |
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled())) | |
return; | |
if (curr->hardirqs_enabled) { | |
@@ -2657,7 +2657,7 @@ void trace_softirqs_on(unsigned long ip) | |
* We fancy IRQs being disabled here, see softirq.c, avoids | |
* funny state and nesting things. | |
*/ | |
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | |
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled())) | |
return; | |
if (curr->softirqs_enabled) { | |
@@ -2696,7 +2696,7 @@ void trace_softirqs_off(unsigned long ip) | |
/* | |
* We fancy IRQs being disabled here, see softirq.c | |
*/ | |
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | |
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled())) | |
return; | |
if (curr->softirqs_enabled) { | |
diff --git a/kernel/panic.c b/kernel/panic.c | |
index 167ec09..cd078a7 100644 | |
--- a/kernel/panic.c | |
+++ b/kernel/panic.c | |
@@ -22,6 +22,7 @@ | |
#include <linux/sysrq.h> | |
#include <linux/init.h> | |
#include <linux/nmi.h> | |
+#include <linux/ipipe_trace.h> | |
#define PANIC_TIMER_STEP 100 | |
#define PANIC_BLINK_SPD 18 | |
@@ -352,6 +353,8 @@ void oops_enter(void) | |
{ | |
tracing_off(); | |
/* can't trust the integrity of the kernel anymore: */ | |
+ ipipe_trace_panic_freeze(); | |
+ ipipe_disable_context_check(); | |
debug_locks_off(); | |
do_oops_enter_exit(); | |
} | |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c | |
index b26f5f1..72c124e 100644 | |
--- a/kernel/power/hibernate.c | |
+++ b/kernel/power/hibernate.c | |
@@ -274,6 +274,7 @@ static int create_image(int platform_mode) | |
goto Enable_cpus; | |
local_irq_disable(); | |
+ hard_cond_local_irq_disable(); | |
error = syscore_suspend(); | |
if (error) { | |
@@ -427,6 +428,7 @@ static int resume_target_kernel(bool platform_mode) | |
goto Enable_cpus; | |
local_irq_disable(); | |
+ hard_cond_local_irq_disable(); | |
error = syscore_suspend(); | |
if (error) | |
@@ -542,6 +544,7 @@ int hibernation_platform_enter(void) | |
goto Platform_finish; | |
local_irq_disable(); | |
+ hard_cond_local_irq_disable(); | |
syscore_suspend(); | |
if (pm_wakeup_pending()) { | |
error = -EAGAIN; | |
diff --git a/kernel/printk.c b/kernel/printk.c | |
index d37d45c..793eff0 100644 | |
--- a/kernel/printk.c | |
+++ b/kernel/printk.c | |
@@ -1657,6 +1657,43 @@ asmlinkage int printk_emit(int facility, int level, | |
} | |
EXPORT_SYMBOL(printk_emit); | |
+#ifdef CONFIG_IPIPE | |
+ | |
+extern int __ipipe_printk_bypass; | |
+ | |
+static IPIPE_DEFINE_SPINLOCK(__ipipe_printk_lock); | |
+ | |
+static int __ipipe_printk_fill; | |
+ | |
+static char __ipipe_printk_buf[__LOG_BUF_LEN]; | |
+ | |
+void __ipipe_flush_printk (unsigned virq, void *cookie) | |
+{ | |
+ char *p = __ipipe_printk_buf; | |
+ int len, lmax, out = 0; | |
+ unsigned long flags; | |
+ | |
+ goto start; | |
+ | |
+ do { | |
+ spin_unlock_irqrestore(&__ipipe_printk_lock, flags); | |
+ start: | |
+ lmax = __ipipe_printk_fill; | |
+ while (out < lmax) { | |
+ len = strlen(p) + 1; | |
+ printk("%s",p); | |
+ p += len; | |
+ out += len; | |
+ } | |
+ spin_lock_irqsave(&__ipipe_printk_lock, flags); | |
+ } | |
+ while (__ipipe_printk_fill != lmax); | |
+ | |
+ __ipipe_printk_fill = 0; | |
+ | |
+ spin_unlock_irqrestore(&__ipipe_printk_lock, flags); | |
+} | |
+ | |
/** | |
* printk - print a kernel message | |
* @fmt: format string | |
@@ -1680,6 +1717,59 @@ EXPORT_SYMBOL(printk_emit); | |
*/ | |
asmlinkage int printk(const char *fmt, ...) | |
{ | |
+ int sprintk = 1, cs = -1; | |
+ int r, fbytes, oldcount; | |
+ unsigned long flags; | |
+ va_list args; | |
+ | |
+ va_start(args, fmt); | |
+ | |
+ flags = hard_local_irq_save(); | |
+ | |
+ if (__ipipe_printk_bypass || oops_in_progress) | |
+ cs = ipipe_disable_context_check(); | |
+ else if (__ipipe_current_domain == ipipe_root_domain) { | |
+ if (ipipe_head_domain != ipipe_root_domain && | |
+ (raw_irqs_disabled_flags(flags) || | |
+ test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status))) | |
+ sprintk = 0; | |
+ } else | |
+ sprintk = 0; | |
+ | |
+ hard_local_irq_restore(flags); | |
+ | |
+ if (sprintk) { | |
+ r = vprintk(fmt, args); | |
+ if (cs != -1) | |
+ ipipe_restore_context_check(cs); | |
+ goto out; | |
+ } | |
+ | |
+ spin_lock_irqsave(&__ipipe_printk_lock, flags); | |
+ | |
+ oldcount = __ipipe_printk_fill; | |
+ fbytes = __LOG_BUF_LEN - oldcount; | |
+ if (fbytes > 1) { | |
+ r = vscnprintf(__ipipe_printk_buf + __ipipe_printk_fill, | |
+ fbytes, fmt, args) + 1; | |
+ __ipipe_printk_fill += r; | |
+ } else | |
+ r = 0; | |
+ | |
+ spin_unlock_irqrestore(&__ipipe_printk_lock, flags); | |
+ | |
+ if (oldcount == 0) | |
+ ipipe_raise_irq(__ipipe_printk_virq); | |
+out: | |
+ va_end(args); | |
+ | |
+ return r; | |
+} | |
+ | |
+#else /* !CONFIG_IPIPE */ | |
+ | |
+asmlinkage int printk(const char *fmt, ...) | |
+{ | |
va_list args; | |
int r; | |
@@ -1697,6 +1787,8 @@ asmlinkage int printk(const char *fmt, ...) | |
return r; | |
} | |
+#endif /* CONFIG_IPIPE */ | |
+ | |
EXPORT_SYMBOL(printk); | |
#else /* CONFIG_PRINTK */ | |
@@ -2393,7 +2485,7 @@ EXPORT_SYMBOL(register_console); | |
int unregister_console(struct console *console) | |
{ | |
- struct console *a, *b; | |
+ struct console *a, *b; | |
int res = 1; | |
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE | |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c | |
index c3ae144..13b960a 100644 | |
--- a/kernel/sched/clock.c | |
+++ b/kernel/sched/clock.c | |
@@ -242,7 +242,9 @@ u64 sched_clock_cpu(int cpu) | |
struct sched_clock_data *scd; | |
u64 clock; | |
+#ifndef CONFIG_IPIPE | |
WARN_ON_ONCE(!irqs_disabled()); | |
+#endif | |
if (sched_clock_stable) | |
return sched_clock(); | |
@@ -319,9 +321,9 @@ u64 cpu_clock(int cpu) | |
u64 clock; | |
unsigned long flags; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save_notrace(); | |
clock = sched_clock_cpu(cpu); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore_notrace(flags); | |
return clock; | |
} | |
@@ -338,9 +340,9 @@ u64 local_clock(void) | |
u64 clock; | |
unsigned long flags; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save_notrace(); | |
clock = sched_clock_cpu(smp_processor_id()); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore_notrace(flags); | |
return clock; | |
} | |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c | |
index e8b3350..c5661b2 100644 | |
--- a/kernel/sched/core.c | |
+++ b/kernel/sched/core.c | |
@@ -1423,7 +1423,9 @@ void scheduler_ipi(void) | |
* however a fair share of IPIs are still resched only so this would | |
* somewhat pessimize the simple resched case. | |
*/ | |
+#ifndef IPIPE_ARCH_HAVE_VIRQ_IPI | |
irq_enter(); | |
+#endif | |
tick_nohz_full_check(); | |
sched_ttwu_pending(); | |
@@ -1434,7 +1436,9 @@ void scheduler_ipi(void) | |
this_rq()->idle_balance = 1; | |
raise_softirq_irqoff(SCHED_SOFTIRQ); | |
} | |
+#ifndef IPIPE_ARCH_HAVE_VIRQ_IPI | |
irq_exit(); | |
+#endif | |
} | |
static void ttwu_queue_remote(struct task_struct *p, int cpu) | |
@@ -1489,7 +1493,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |
smp_wmb(); | |
raw_spin_lock_irqsave(&p->pi_lock, flags); | |
- if (!(p->state & state)) | |
+ if (!(p->state & state) || | |
+ (p->state & (TASK_NOWAKEUP|TASK_HARDENING))) | |
goto out; | |
success = 1; /* we're going to change ->state */ | |
@@ -1955,6 +1960,8 @@ asmlinkage void schedule_tail(struct task_struct *prev) | |
{ | |
struct rq *rq = this_rq(); | |
+ __ipipe_complete_domain_migration(); | |
+ | |
finish_task_switch(rq, prev); | |
/* | |
@@ -1975,7 +1982,7 @@ asmlinkage void schedule_tail(struct task_struct *prev) | |
* context_switch - switch to the new MM and the new | |
* thread's register state. | |
*/ | |
-static inline void | |
+static inline int | |
context_switch(struct rq *rq, struct task_struct *prev, | |
struct task_struct *next) | |
{ | |
@@ -2018,12 +2025,18 @@ context_switch(struct rq *rq, struct task_struct *prev, | |
switch_to(prev, next, prev); | |
barrier(); | |
+ | |
+ if (unlikely(__ipipe_switch_tail())) | |
+ return 1; | |
+ | |
/* | |
* this_rq must be evaluated again because prev may have moved | |
* CPUs since it called schedule(), thus the 'rq' on its stack | |
* frame will be invalid. | |
*/ | |
finish_task_switch(this_rq(), prev); | |
+ | |
+ return 0; | |
} | |
/* | |
@@ -2793,6 +2806,7 @@ notrace unsigned long get_parent_ip(unsigned long addr) | |
void __kprobes add_preempt_count(int val) | |
{ | |
+ ipipe_preempt_root_only(); | |
#ifdef CONFIG_DEBUG_PREEMPT | |
/* | |
* Underflow? | |
@@ -2815,6 +2829,7 @@ EXPORT_SYMBOL(add_preempt_count); | |
void __kprobes sub_preempt_count(int val) | |
{ | |
+ ipipe_preempt_root_only(); | |
#ifdef CONFIG_DEBUG_PREEMPT | |
/* | |
* Underflow? | |
@@ -2861,6 +2876,7 @@ static noinline void __schedule_bug(struct task_struct *prev) | |
*/ | |
static inline void schedule_debug(struct task_struct *prev) | |
{ | |
+ ipipe_root_only(); | |
/* | |
* Test if we are atomic. Since do_exit() needs to call into | |
* schedule() atomically, we ignore that path for now. | |
@@ -2947,7 +2963,7 @@ pick_next_task(struct rq *rq) | |
* - return from syscall or exception to user-space | |
* - return from interrupt-handler to user-space | |
*/ | |
-static void __sched __schedule(void) | |
+static int __sched __schedule(void) | |
{ | |
struct task_struct *prev, *next; | |
unsigned long *switch_count; | |
@@ -2961,6 +2977,10 @@ need_resched: | |
rcu_note_context_switch(cpu); | |
prev = rq->curr; | |
+ if (unlikely(prev->state & TASK_HARDENING)) | |
+ /* Pop one disable level -- one still remains. */ | |
+ preempt_enable(); | |
+ | |
schedule_debug(prev); | |
if (sched_feat(HRTICK)) | |
@@ -3007,7 +3027,8 @@ need_resched: | |
rq->curr = next; | |
++*switch_count; | |
- context_switch(rq, prev, next); /* unlocks the rq */ | |
+ if (context_switch(rq, prev, next)) /* unlocks the rq */ | |
+ return 1; /* task hijacked by higher domain */ | |
/* | |
* The context switch have flipped the stack from under us | |
* and restored the local variables which were saved when | |
@@ -3016,14 +3037,18 @@ need_resched: | |
*/ | |
cpu = smp_processor_id(); | |
rq = cpu_rq(cpu); | |
- } else | |
+ } else { | |
+ prev->state &= ~TASK_HARDENING; | |
raw_spin_unlock_irq(&rq->lock); | |
+ } | |
post_schedule(rq); | |
sched_preempt_enable_no_resched(); | |
if (need_resched()) | |
goto need_resched; | |
+ | |
+ return 0; | |
} | |
static inline void sched_submit_work(struct task_struct *tsk) | |
@@ -3088,12 +3113,13 @@ asmlinkage void __sched notrace preempt_schedule(void) | |
* If there is a non-zero preempt_count or interrupts are disabled, | |
* we do not want to preempt the current task. Just return.. | |
*/ | |
- if (likely(ti->preempt_count || irqs_disabled())) | |
+ if (likely(ti->preempt_count || irqs_disabled() || !ipipe_root_p)) | |
return; | |
do { | |
add_preempt_count_notrace(PREEMPT_ACTIVE); | |
- __schedule(); | |
+ if (__schedule()) | |
+ return; | |
sub_preempt_count_notrace(PREEMPT_ACTIVE); | |
/* | |
@@ -3161,6 +3187,8 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | |
{ | |
wait_queue_t *curr, *next; | |
+ ipipe_root_only(); | |
+ | |
list_for_each_entry_safe(curr, next, &q->task_list, task_list) { | |
unsigned flags = curr->flags; | |
@@ -3984,6 +4012,7 @@ recheck: | |
oldprio = p->prio; | |
prev_class = p->sched_class; | |
__setscheduler(rq, p, policy, param->sched_priority); | |
+ __ipipe_report_setsched(p); | |
if (running) | |
p->sched_class->set_curr_task(rq); | |
@@ -4752,6 +4781,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |
/* Set the preempt count _outside_ the spinlocks! */ | |
task_thread_info(idle)->preempt_count = 0; | |
+ ipipe_root_only(); | |
/* | |
* The idle tasks have their own, simple scheduling class: | |
@@ -4817,10 +4847,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |
do_set_cpus_allowed(p, new_mask); | |
/* Can the task run on the task's current CPU? If so, we're done */ | |
- if (cpumask_test_cpu(task_cpu(p), new_mask)) | |
+ if (cpumask_test_cpu(task_cpu(p), new_mask)) { | |
+ __ipipe_report_setaffinity(p, task_cpu(p)); | |
goto out; | |
+ } | |
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); | |
+ __ipipe_report_setaffinity(p, dest_cpu); | |
if (p->on_rq) { | |
struct migration_arg arg = { p, dest_cpu }; | |
/* Need help from migration thread: drop lock and wait. */ | |
@@ -8077,3 +8110,40 @@ void dump_cpu_task(int cpu) | |
pr_info("Task dump for CPU %d:\n", cpu); | |
sched_show_task(cpu_curr(cpu)); | |
} | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+int __ipipe_migrate_head(void) | |
+{ | |
+ struct task_struct *p = current; | |
+ | |
+ preempt_disable(); | |
+ | |
+ IPIPE_WARN_ONCE(__this_cpu_read(ipipe_percpu.task_hijacked) != NULL); | |
+ | |
+ __this_cpu_write(ipipe_percpu.task_hijacked, p); | |
+ set_current_state(TASK_INTERRUPTIBLE | TASK_HARDENING); | |
+ sched_submit_work(p); | |
+ if (likely(__schedule())) | |
+ return 0; | |
+ | |
+ if (signal_pending(p)) | |
+ return -ERESTARTSYS; | |
+ | |
+ BUG(); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_migrate_head); | |
+ | |
+void __ipipe_reenter_root(void) | |
+{ | |
+ struct rq *rq = this_rq(); | |
+ struct task_struct *p; | |
+ | |
+ p = __this_cpu_read(ipipe_percpu.rqlock_owner); | |
+ finish_task_switch(rq, p); | |
+ post_schedule(rq); | |
+ sched_preempt_enable_no_resched(); | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_reenter_root); | |
+ | |
+#endif /* CONFIG_IPIPE */ | |
diff --git a/kernel/signal.c b/kernel/signal.c | |
index 113411b..35d9ba7 100644 | |
--- a/kernel/signal.c | |
+++ b/kernel/signal.c | |
@@ -687,6 +687,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |
void signal_wake_up_state(struct task_struct *t, unsigned int state) | |
{ | |
set_tsk_thread_flag(t, TIF_SIGPENDING); | |
+ | |
+ /* TIF_SIGPENDING must be prior to reporting. */ | |
+ __ipipe_report_sigwake(t); | |
+ | |
/* | |
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable | |
* case. We don't check t->state here because there is a race with it | |
@@ -936,8 +940,11 @@ static inline int wants_signal(int sig, struct task_struct *p) | |
return 0; | |
if (sig == SIGKILL) | |
return 1; | |
- if (task_is_stopped_or_traced(p)) | |
+ if (task_is_stopped_or_traced(p)) { | |
+ if (!signal_pending(p)) | |
+ __ipipe_report_sigwake(p); | |
return 0; | |
+ } | |
return task_curr(p) || !signal_pending(p); | |
} | |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c | |
index 5cdd806..5ab3a87 100644 | |
--- a/kernel/spinlock.c | |
+++ b/kernel/spinlock.c | |
@@ -26,7 +26,9 @@ | |
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | |
* not re-enabled during lock-acquire (which the preempt-spin-ops do): | |
*/ | |
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | |
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || \ | |
+ defined(CONFIG_DEBUG_LOCK_ALLOC) || \ | |
+ defined(CONFIG_IPIPE) | |
/* | |
* The __lock_function inlines are taken from | |
* include/linux/spinlock_api_smp.h | |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c | |
index c6d6400..4a11bfc 100644 | |
--- a/kernel/time/clockevents.c | |
+++ b/kernel/time/clockevents.c | |
@@ -17,6 +17,7 @@ | |
#include <linux/module.h> | |
#include <linux/notifier.h> | |
#include <linux/smp.h> | |
+#include <linux/ipipe_tickdev.h> | |
#include "tick-internal.h" | |
@@ -282,6 +283,9 @@ void clockevents_register_device(struct clock_event_device *dev) | |
unsigned long flags; | |
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | |
+ | |
+ ipipe_host_timer_register(dev); | |
+ | |
if (!dev->cpumask) { | |
WARN_ON(num_possible_cpus() > 1); | |
dev->cpumask = cpumask_of(smp_processor_id()); | |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c | |
index c958338..172d3c8 100644 | |
--- a/kernel/time/clocksource.c | |
+++ b/kernel/time/clocksource.c | |
@@ -30,6 +30,7 @@ | |
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ | |
#include <linux/tick.h> | |
#include <linux/kthread.h> | |
+#include <linux/kallsyms.h> | |
void timecounter_init(struct timecounter *tc, | |
const struct cyclecounter *cc, | |
@@ -249,6 +250,9 @@ static void clocksource_watchdog(unsigned long data) | |
cycle_t csnow, wdnow; | |
int64_t wd_nsec, cs_nsec; | |
int next_cpu, reset_pending; | |
+#ifdef CONFIG_IPIPE | |
+ cycle_t wdref; | |
+#endif | |
spin_lock(&watchdog_lock); | |
if (!watchdog_running) | |
@@ -265,11 +269,24 @@ static void clocksource_watchdog(unsigned long data) | |
continue; | |
} | |
+#ifdef CONFIG_IPIPE | |
+retry: | |
+#endif | |
local_irq_disable(); | |
+#ifdef CONFIG_IPIPE | |
+ wdref = watchdog->read(watchdog); | |
+#endif | |
csnow = cs->read(cs); | |
wdnow = watchdog->read(watchdog); | |
local_irq_enable(); | |
+#ifdef CONFIG_IPIPE | |
+ wd_nsec = clocksource_cyc2ns((wdnow - wdref) & watchdog->mask, | |
+ watchdog->mult, watchdog->shift); | |
+ if (wd_nsec > WATCHDOG_THRESHOLD) | |
+ goto retry; | |
+#endif | |
+ | |
/* Clocksource initialized ? */ | |
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || | |
atomic_read(&watchdog_reset_pending)) { | |
@@ -630,6 +647,95 @@ static int __init clocksource_done_booting(void) | |
} | |
fs_initcall(clocksource_done_booting); | |
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE | |
+unsigned long long __ipipe_cs_freq; | |
+EXPORT_SYMBOL_GPL(__ipipe_cs_freq); | |
+ | |
+struct clocksource *__ipipe_cs; | |
+EXPORT_SYMBOL_GPL(__ipipe_cs); | |
+ | |
+cycle_t (*__ipipe_cs_read)(struct clocksource *cs); | |
+cycle_t __ipipe_cs_last_tsc; | |
+cycle_t __ipipe_cs_mask; | |
+unsigned __ipipe_cs_lat = 0xffffffff; | |
+ | |
+static void ipipe_check_clocksource(struct clocksource *cs) | |
+{ | |
+ cycle_t (*cread)(struct clocksource *cs); | |
+ cycle_t lat, mask, saved; | |
+ unsigned long long freq; | |
+ unsigned long flags; | |
+ unsigned i; | |
+ | |
+ if (cs->ipipe_read) { | |
+ mask = CLOCKSOURCE_MASK(64); | |
+ cread = cs->ipipe_read; | |
+ } else { | |
+ mask = cs->mask; | |
+ cread = cs->read; | |
+ | |
+ if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) == 0) | |
+ return; | |
+ | |
+ /* | |
+ * We only support masks such that cs->mask + 1 is a power of 2, | |
+ * 64 bits masks or masks lesser than 32 bits | |
+ */ | |
+ if (mask != CLOCKSOURCE_MASK(64) | |
+ && ((mask & (mask + 1)) != 0 || mask > 0xffffffff)) | |
+ return; | |
+ } | |
+ | |
+ /* | |
+ * We prefer a clocksource with a better resolution than 1us | |
+ */ | |
+ if (cs->shift <= 34) { | |
+ freq = 1000000000ULL << cs->shift; | |
+ do_div(freq, cs->mult); | |
+ } else { | |
+ freq = 1000000ULL << cs->shift; | |
+ do_div(freq, cs->mult); | |
+ freq *= 1000; | |
+ } | |
+ if (freq < 1000000) | |
+ return; | |
+ | |
+ /* Measure the clocksource latency */ | |
+ flags = hard_local_irq_save(); | |
+ saved = __ipipe_cs_last_tsc; | |
+ lat = cread(cs); | |
+ for (i = 0; i < 10; i++) | |
+ cread(cs); | |
+ lat = cread(cs) - lat; | |
+ __ipipe_cs_last_tsc = saved; | |
+ hard_local_irq_restore(flags); | |
+ lat = (lat * cs->mult) >> cs->shift; | |
+ do_div(lat, i + 1); | |
+ | |
+ if (!strcmp(cs->name, override_name)) | |
+ goto skip_tests; | |
+ | |
+ if (lat > __ipipe_cs_lat) | |
+ return; | |
+ | |
+ if (__ipipe_cs && !strcmp(__ipipe_cs->name, override_name)) | |
+ return; | |
+ | |
+ skip_tests: | |
+ flags = hard_local_irq_save(); | |
+ if (__ipipe_cs_last_tsc == 0) { | |
+ __ipipe_cs_lat = lat; | |
+ __ipipe_cs_freq = freq; | |
+ __ipipe_cs = cs; | |
+ __ipipe_cs_read = cread; | |
+ __ipipe_cs_mask = mask; | |
+ } | |
+ hard_local_irq_restore(flags); | |
+} | |
+#else /* !CONFIG_IPIPE_WANT_CLOCKSOURCE */ | |
+#define ipipe_check_clocksource(cs) do { }while (0) | |
+#endif /* !CONFIG_IPIPE_WANT_CLOCKSOURCE */ | |
+ | |
/* | |
* Enqueue the clocksource sorted by rating | |
*/ | |
@@ -643,6 +749,8 @@ static void clocksource_enqueue(struct clocksource *cs) | |
if (tmp->rating >= cs->rating) | |
entry = &tmp->list; | |
list_add(&cs->list, entry); | |
+ | |
+ ipipe_check_clocksource(cs); | |
} | |
/** | |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c | |
index 7ce5e5a..663510e 100644 | |
--- a/kernel/time/tick-common.c | |
+++ b/kernel/time/tick-common.c | |
@@ -72,7 +72,7 @@ static void tick_periodic(int cpu) | |
write_sequnlock(&jiffies_lock); | |
} | |
- update_process_times(user_mode(get_irq_regs())); | |
+ update_root_process_times(get_irq_regs()); | |
profile_tick(CPU_PROFILING); | |
} | |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c | |
index 4251374..3df1b0b 100644 | |
--- a/kernel/time/tick-sched.c | |
+++ b/kernel/time/tick-sched.c | |
@@ -143,7 +143,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) | |
ts->idle_jiffies++; | |
} | |
#endif | |
- update_process_times(user_mode(regs)); | |
+ update_root_process_times(regs); | |
profile_tick(CPU_PROFILING); | |
} | |
diff --git a/kernel/timer.c b/kernel/timer.c | |
index 15bc1b4..ca04d76 100644 | |
--- a/kernel/timer.c | |
+++ b/kernel/timer.c | |
@@ -1363,6 +1363,25 @@ void update_process_times(int user_tick) | |
run_posix_cpu_timers(p); | |
} | |
+#ifdef CONFIG_IPIPE | |
+ | |
+void update_root_process_times(struct pt_regs *regs) | |
+{ | |
+ int cpu, user_tick = user_mode(regs); | |
+ | |
+ if (__ipipe_root_tick_p(regs)) { | |
+ update_process_times(user_tick); | |
+ return; | |
+ } | |
+ | |
+ run_local_timers(); | |
+ cpu = smp_processor_id(); | |
+ rcu_check_callbacks(cpu, user_tick); | |
+ run_posix_cpu_timers(current); | |
+} | |
+ | |
+#endif | |
+ | |
/* | |
* This function runs timers and the timer-tq in bottom half context. | |
*/ | |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig | |
index 015f85a..b6d06ce 100644 | |
--- a/kernel/trace/Kconfig | |
+++ b/kernel/trace/Kconfig | |
@@ -443,6 +443,7 @@ config DYNAMIC_FTRACE | |
bool "enable/disable function tracing dynamically" | |
depends on FUNCTION_TRACER | |
depends on HAVE_DYNAMIC_FTRACE | |
+ depends on !IPIPE_TRACE_MCOUNT | |
default y | |
help | |
This option will modify all the calls to function tracing | |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c | |
index f23449d..d31462c 100644 | |
--- a/kernel/trace/ftrace.c | |
+++ b/kernel/trace/ftrace.c | |
@@ -32,6 +32,7 @@ | |
#include <linux/list.h> | |
#include <linux/hash.h> | |
#include <linux/rcupdate.h> | |
+#include <linux/ipipe.h> | |
#include <trace/events/sched.h> | |
@@ -280,10 +281,19 @@ static void update_global_ops(void) | |
static void update_ftrace_function(void) | |
{ | |
+ struct ftrace_ops *ops; | |
ftrace_func_t func; | |
update_global_ops(); | |
+ for (ops = ftrace_ops_list; | |
+ ops != &ftrace_list_end; ops = ops->next) | |
+ if (ops->flags & FTRACE_OPS_FL_IPIPE_EXCLUSIVE) { | |
+ function_trace_op = ops; | |
+ ftrace_trace_function = ops->func; | |
+ return; | |
+ } | |
+ | |
/* | |
* If we are at the end of the list and this ops is | |
* recursion safe and not dynamic and the arch supports passing ops, | |
@@ -2002,6 +2012,9 @@ void __weak arch_ftrace_update_code(int command) | |
static void ftrace_run_update_code(int command) | |
{ | |
+#ifdef CONFIG_IPIPE | |
+ unsigned long flags; | |
+#endif /* CONFIG_IPIPE */ | |
int ret; | |
ret = ftrace_arch_code_modify_prepare(); | |
@@ -2020,7 +2033,13 @@ static void ftrace_run_update_code(int command) | |
* is safe. The stop_machine() is the safest, but also | |
* produces the most overhead. | |
*/ | |
+#ifdef CONFIG_IPIPE | |
+ flags = ipipe_critical_enter(NULL); | |
+ __ftrace_modify_code(&command); | |
+ ipipe_critical_exit(flags); | |
+#else /* !CONFIG_IPIPE */ | |
arch_ftrace_update_code(command); | |
+#endif /* !CONFIG_IPIPE */ | |
function_trace_stop--; | |
@@ -4067,10 +4086,10 @@ static int ftrace_process_locs(struct module *mod, | |
* reason to cause large interrupt latencies while we do it. | |
*/ | |
if (!mod) | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
ftrace_update_code(mod); | |
if (!mod) | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
ret = 0; | |
out: | |
mutex_unlock(&ftrace_lock); | |
@@ -4188,9 +4207,9 @@ void __init ftrace_init(void) | |
/* Keep the ftrace pointer to the stub */ | |
addr = (unsigned long)ftrace_stub; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save_notrace(); | |
ftrace_dyn_arch_init(&addr); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore_notrace(flags); | |
/* ftrace_dyn_arch_init places the return code in addr */ | |
if (addr) | |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c | |
index 0582a01..031cf80 100644 | |
--- a/kernel/trace/trace.c | |
+++ b/kernel/trace/trace.c | |
@@ -1974,8 +1974,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |
/* Don't pollute graph traces with trace_vprintk internals */ | |
pause_graph_tracing(); | |
+ flags = hard_local_irq_save(); | |
+ | |
pc = preempt_count(); | |
- preempt_disable_notrace(); | |
tbuffer = get_trace_buf(); | |
if (!tbuffer) { | |
@@ -1988,7 +1989,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) | |
goto out; | |
- local_save_flags(flags); | |
size = sizeof(*entry) + sizeof(u32) * len; | |
buffer = tr->trace_buffer.buffer; | |
event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | |
@@ -2006,7 +2006,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |
} | |
out: | |
- preempt_enable_notrace(); | |
+ hard_local_irq_restore(flags); | |
unpause_graph_tracing(); | |
return len; | |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c | |
index 26dc348..e2da3a6 100644 | |
--- a/kernel/trace/trace_clock.c | |
+++ b/kernel/trace/trace_clock.c | |
@@ -92,7 +92,7 @@ u64 notrace trace_clock_global(void) | |
int this_cpu; | |
u64 now; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save_notrace(); | |
this_cpu = raw_smp_processor_id(); | |
now = sched_clock_cpu(this_cpu); | |
@@ -118,7 +118,7 @@ u64 notrace trace_clock_global(void) | |
arch_spin_unlock(&trace_clock_struct.lock); | |
out: | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore_notrace(flags); | |
return now; | |
} | |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c | |
index c4d6d71..fa900e3 100644 | |
--- a/kernel/trace/trace_functions.c | |
+++ b/kernel/trace/trace_functions.c | |
@@ -105,7 +105,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |
* Need to use raw, since this must be called before the | |
* recursive protection is performed. | |
*/ | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save(); | |
cpu = raw_smp_processor_id(); | |
data = per_cpu_ptr(tr->trace_buffer.data, cpu); | |
disabled = atomic_inc_return(&data->disabled); | |
@@ -125,7 +125,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |
} | |
atomic_dec(&data->disabled); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore(flags); | |
} | |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c | |
index 8388bc9..947043a 100644 | |
--- a/kernel/trace/trace_functions_graph.c | |
+++ b/kernel/trace/trace_functions_graph.c | |
@@ -263,7 +263,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |
(max_depth && trace->depth >= max_depth)) | |
return 0; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save_notrace(); | |
cpu = raw_smp_processor_id(); | |
data = per_cpu_ptr(tr->trace_buffer.data, cpu); | |
disabled = atomic_inc_return(&data->disabled); | |
@@ -275,7 +275,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |
} | |
atomic_dec(&data->disabled); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore_notrace(flags); | |
return ret; | |
} | |
@@ -348,7 +348,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |
int cpu; | |
int pc; | |
- local_irq_save(flags); | |
+ flags = hard_local_irq_save_notrace(); | |
cpu = raw_smp_processor_id(); | |
data = per_cpu_ptr(tr->trace_buffer.data, cpu); | |
disabled = atomic_inc_return(&data->disabled); | |
@@ -357,7 +357,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |
__trace_graph_return(tr, trace, flags, pc); | |
} | |
atomic_dec(&data->disabled); | |
- local_irq_restore(flags); | |
+ hard_local_irq_restore_notrace(flags); | |
} | |
void set_graph_array(struct trace_array *tr) | |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug | |
index 74fdc5c..f99e4e7 100644 | |
--- a/lib/Kconfig.debug | |
+++ b/lib/Kconfig.debug | |
@@ -156,6 +156,8 @@ config DEBUG_SECTION_MISMATCH | |
- Enable verbose reporting from modpost in order to help resolve | |
the section mismatches that are reported. | |
+source "kernel/ipipe/Kconfig.debug" | |
+ | |
config DEBUG_KERNEL | |
bool "Kernel debugging" | |
help | |
diff --git a/lib/atomic64.c b/lib/atomic64.c | |
index 08a4f06..15810f4 100644 | |
--- a/lib/atomic64.c | |
+++ b/lib/atomic64.c | |
@@ -29,15 +29,15 @@ | |
* Ensure each lock is in a separate cacheline. | |
*/ | |
static union { | |
- raw_spinlock_t lock; | |
+ ipipe_spinlock_t lock; | |
char pad[L1_CACHE_BYTES]; | |
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { | |
[0 ... (NR_LOCKS - 1)] = { | |
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), | |
+ .lock = IPIPE_SPIN_LOCK_UNLOCKED, | |
}, | |
}; | |
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v) | |
+static inline ipipe_spinlock_t *lock_addr(const atomic64_t *v) | |
{ | |
unsigned long addr = (unsigned long) v; | |
@@ -49,7 +49,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v) | |
long long atomic64_read(const atomic64_t *v) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
long long val; | |
raw_spin_lock_irqsave(lock, flags); | |
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(atomic64_read); | |
void atomic64_set(atomic64_t *v, long long i) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
raw_spin_lock_irqsave(lock, flags); | |
v->counter = i; | |
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(atomic64_set); | |
void atomic64_add(long long a, atomic64_t *v) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
raw_spin_lock_irqsave(lock, flags); | |
v->counter += a; | |
@@ -84,7 +84,7 @@ EXPORT_SYMBOL(atomic64_add); | |
long long atomic64_add_return(long long a, atomic64_t *v) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
long long val; | |
raw_spin_lock_irqsave(lock, flags); | |
@@ -97,7 +97,7 @@ EXPORT_SYMBOL(atomic64_add_return); | |
void atomic64_sub(long long a, atomic64_t *v) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
raw_spin_lock_irqsave(lock, flags); | |
v->counter -= a; | |
@@ -108,7 +108,7 @@ EXPORT_SYMBOL(atomic64_sub); | |
long long atomic64_sub_return(long long a, atomic64_t *v) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
long long val; | |
raw_spin_lock_irqsave(lock, flags); | |
@@ -121,7 +121,7 @@ EXPORT_SYMBOL(atomic64_sub_return); | |
long long atomic64_dec_if_positive(atomic64_t *v) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
long long val; | |
raw_spin_lock_irqsave(lock, flags); | |
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(atomic64_dec_if_positive); | |
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
long long val; | |
raw_spin_lock_irqsave(lock, flags); | |
@@ -151,7 +151,7 @@ EXPORT_SYMBOL(atomic64_cmpxchg); | |
long long atomic64_xchg(atomic64_t *v, long long new) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
long long val; | |
raw_spin_lock_irqsave(lock, flags); | |
@@ -165,7 +165,7 @@ EXPORT_SYMBOL(atomic64_xchg); | |
int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |
{ | |
unsigned long flags; | |
- raw_spinlock_t *lock = lock_addr(v); | |
+ ipipe_spinlock_t *lock = lock_addr(v); | |
int ret = 0; | |
raw_spin_lock_irqsave(lock, flags); | |
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c | |
index f8e0e53..02175aa3 100644 | |
--- a/lib/bust_spinlocks.c | |
+++ b/lib/bust_spinlocks.c | |
@@ -14,6 +14,7 @@ | |
#include <linux/wait.h> | |
#include <linux/vt_kern.h> | |
#include <linux/console.h> | |
+#include <linux/ipipe_trace.h> | |
void __attribute__((weak)) bust_spinlocks(int yes) | |
@@ -25,6 +26,7 @@ void __attribute__((weak)) bust_spinlocks(int yes) | |
unblank_screen(); | |
#endif | |
console_unblank(); | |
+ ipipe_trace_panic_dump(); | |
if (--oops_in_progress == 0) | |
wake_up_klogd(); | |
} | |
diff --git a/lib/ioremap.c b/lib/ioremap.c | |
index 0c9216c..f5331c0 100644 | |
--- a/lib/ioremap.c | |
+++ b/lib/ioremap.c | |
@@ -10,6 +10,7 @@ | |
#include <linux/sched.h> | |
#include <linux/io.h> | |
#include <linux/export.h> | |
+#include <linux/hardirq.h> | |
#include <asm/cacheflush.h> | |
#include <asm/pgtable.h> | |
@@ -87,7 +88,12 @@ int ioremap_page_range(unsigned long addr, | |
break; | |
} while (pgd++, addr = next, addr != end); | |
- flush_cache_vmap(start, end); | |
+ /* APEI may invoke this for temporarily remapping pages in NMI | |
+ * context - nothing we can and need to propagate globally. */ | |
+ if (!in_nmi()) { | |
+ __ipipe_pin_range_globally(start, end); | |
+ flush_cache_vmap(start, end); | |
+ } | |
return err; | |
} | |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c | |
index 4c0d0e5..886125d 100644 | |
--- a/lib/smp_processor_id.c | |
+++ b/lib/smp_processor_id.c | |
@@ -12,10 +12,13 @@ notrace unsigned int debug_smp_processor_id(void) | |
unsigned long preempt_count = preempt_count(); | |
int this_cpu = raw_smp_processor_id(); | |
+ if (!ipipe_root_p) | |
+ goto out; | |
+ | |
if (likely(preempt_count)) | |
goto out; | |
- if (irqs_disabled()) | |
+ if (irqs_disabled() || hard_irqs_disabled()) | |
goto out; | |
/* | |
diff --git a/mm/Kconfig b/mm/Kconfig | |
index e742d06..32bedf1 100644 | |
--- a/mm/Kconfig | |
+++ b/mm/Kconfig | |
@@ -385,6 +385,7 @@ config NOMMU_INITIAL_TRIM_EXCESS | |
config TRANSPARENT_HUGEPAGE | |
bool "Transparent Hugepage Support" | |
depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE | |
+ depends on !IPIPE | |
select COMPACTION | |
help | |
Transparent Hugepages allows the kernel to use huge pages and | |
diff --git a/mm/memory.c b/mm/memory.c | |
index 5a35443..51eef71 100644 | |
--- a/mm/memory.c | |
+++ b/mm/memory.c | |
@@ -814,6 +814,32 @@ out: | |
return pfn_to_page(pfn); | |
} | |
+static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) | |
+{ | |
+ /* | |
+ * If the source page was a PFN mapping, we don't have | |
+ * a "struct page" for it. We do a best-effort copy by | |
+ * just copying from the original user address. If that | |
+ * fails, we just zero-fill it. Live with it. | |
+ */ | |
+ if (unlikely(!src)) { | |
+ void *kaddr = kmap_atomic(dst); | |
+ void __user *uaddr = (void __user *)(va & PAGE_MASK); | |
+ | |
+ /* | |
+ * This really shouldn't fail, because the page is there | |
+ * in the page tables. But it might just be unreadable, | |
+ * in which case we just give up and fill the result with | |
+ * zeroes. | |
+ */ | |
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) | |
+ clear_page(kaddr); | |
+ kunmap_atomic(kaddr); | |
+ flush_dcache_page(dst); | |
+ } else | |
+ copy_user_highpage(dst, src, va, vma); | |
+} | |
+ | |
/* | |
* copy one vm_area from one task to the other. Assumes the page tables | |
* already present in the new task to be cleared in the whole range | |
@@ -822,8 +848,8 @@ out: | |
static inline unsigned long | |
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
- pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, | |
- unsigned long addr, int *rss) | |
+ pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, | |
+ unsigned long addr, int *rss, struct page *uncow_page) | |
{ | |
unsigned long vm_flags = vma->vm_flags; | |
pte_t pte = *src_pte; | |
@@ -875,6 +901,21 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
* in the parent and the child | |
*/ | |
if (is_cow_mapping(vm_flags)) { | |
+#ifdef CONFIG_IPIPE | |
+ if (uncow_page) { | |
+ struct page *old_page = vm_normal_page(vma, addr, pte); | |
+ cow_user_page(uncow_page, old_page, addr, vma); | |
+ pte = mk_pte(uncow_page, vma->vm_page_prot); | |
+ | |
+ if (vm_flags & VM_SHARED) | |
+ pte = pte_mkclean(pte); | |
+ pte = pte_mkold(pte); | |
+ | |
+ page_add_new_anon_rmap(uncow_page, vma, addr); | |
+ rss[!!PageAnon(uncow_page)]++; | |
+ goto out_set_pte; | |
+ } | |
+#endif /* CONFIG_IPIPE */ | |
ptep_set_wrprotect(src_mm, addr, src_pte); | |
pte = pte_wrprotect(pte); | |
} | |
@@ -912,13 +953,27 @@ int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
int progress = 0; | |
int rss[NR_MM_COUNTERS]; | |
swp_entry_t entry = (swp_entry_t){0}; | |
- | |
+ struct page *uncow_page = NULL; | |
+#ifdef CONFIG_IPIPE | |
+ int do_cow_break = 0; | |
again: | |
+ if (do_cow_break) { | |
+ uncow_page = alloc_page_vma(GFP_HIGHUSER, vma, addr); | |
+ if (uncow_page == NULL) | |
+ return -ENOMEM; | |
+ do_cow_break = 0; | |
+ } | |
+#else | |
+again: | |
+#endif | |
init_rss_vec(rss); | |
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); | |
- if (!dst_pte) | |
+ if (!dst_pte) { | |
+ if (uncow_page) | |
+ page_cache_release(uncow_page); | |
return -ENOMEM; | |
+ } | |
src_pte = pte_offset_map(src_pmd, addr); | |
src_ptl = pte_lockptr(src_mm, src_pmd); | |
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); | |
@@ -941,8 +996,25 @@ again: | |
progress++; | |
continue; | |
} | |
+#ifdef CONFIG_IPIPE | |
+ if (likely(uncow_page == NULL) && likely(pte_present(*src_pte))) { | |
+ if (is_cow_mapping(vma->vm_flags) && | |
+ test_bit(MMF_VM_PINNED, &src_mm->flags) && | |
+ ((vma->vm_flags|src_mm->def_flags) & VM_LOCKED)) { | |
+ arch_leave_lazy_mmu_mode(); | |
+ spin_unlock(src_ptl); | |
+ pte_unmap(src_pte); | |
+ add_mm_rss_vec(dst_mm, rss); | |
+ pte_unmap_unlock(dst_pte, dst_ptl); | |
+ cond_resched(); | |
+ do_cow_break = 1; | |
+ goto again; | |
+ } | |
+ } | |
+#endif | |
entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, | |
- vma, addr, rss); | |
+ vma, addr, rss, uncow_page); | |
+ uncow_page = NULL; | |
if (entry.val) | |
break; | |
progress += 8; | |
@@ -2566,32 +2638,6 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, | |
return same; | |
} | |
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) | |
-{ | |
- /* | |
- * If the source page was a PFN mapping, we don't have | |
- * a "struct page" for it. We do a best-effort copy by | |
- * just copying from the original user address. If that | |
- * fails, we just zero-fill it. Live with it. | |
- */ | |
- if (unlikely(!src)) { | |
- void *kaddr = kmap_atomic(dst); | |
- void __user *uaddr = (void __user *)(va & PAGE_MASK); | |
- | |
- /* | |
- * This really shouldn't fail, because the page is there | |
- * in the page tables. But it might just be unreadable, | |
- * in which case we just give up and fill the result with | |
- * zeroes. | |
- */ | |
- if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) | |
- clear_page(kaddr); | |
- kunmap_atomic(kaddr); | |
- flush_dcache_page(dst); | |
- } else | |
- copy_user_highpage(dst, src, va, vma); | |
-} | |
- | |
/* | |
* This routine handles present pages, when users try to write | |
* to a shared page. It is done by copying the page to a new address | |
@@ -4304,3 +4350,38 @@ void copy_user_huge_page(struct page *dst, struct page *src, | |
} | |
} | |
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | |
+ | |
+#ifdef CONFIG_IPIPE | |
+ | |
+int __ipipe_disable_ondemand_mappings(struct task_struct *tsk) | |
+{ | |
+ struct vm_area_struct *vma; | |
+ struct mm_struct *mm; | |
+ int result = 0; | |
+ | |
+ mm = get_task_mm(tsk); | |
+ if (!mm) | |
+ return -EPERM; | |
+ | |
+ down_write(&mm->mmap_sem); | |
+ if (test_bit(MMF_VM_PINNED, &mm->flags)) | |
+ goto done_mm; | |
+ | |
+ for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
+ if (is_cow_mapping(vma->vm_flags) && | |
+ (vma->vm_flags & VM_WRITE)) { | |
+ result = __ipipe_pin_vma(mm, vma); | |
+ if (result < 0) | |
+ goto done_mm; | |
+ } | |
+ } | |
+ set_bit(MMF_VM_PINNED, &mm->flags); | |
+ | |
+ done_mm: | |
+ up_write(&mm->mmap_sem); | |
+ mmput(mm); | |
+ return result; | |
+} | |
+EXPORT_SYMBOL_GPL(__ipipe_disable_ondemand_mappings); | |
+ | |
+#endif | |
diff --git a/mm/mlock.c b/mm/mlock.c | |
index 79b7cf7..e11b57a 100644 | |
--- a/mm/mlock.c | |
+++ b/mm/mlock.c | |
@@ -587,3 +587,28 @@ void user_shm_unlock(size_t size, struct user_struct *user) | |
spin_unlock(&shmlock_user_lock); | |
free_uid(user); | |
} | |
+ | |
+#ifdef CONFIG_IPIPE | |
+int __ipipe_pin_vma(struct mm_struct *mm, struct vm_area_struct *vma) | |
+{ | |
+ int ret, write, len; | |
+ | |
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | |
+ return 0; | |
+ | |
+ if (!((vma->vm_flags & VM_DONTEXPAND) || | |
+ is_vm_hugetlb_page(vma) || vma == get_gate_vma(mm))) { | |
+ ret = __mlock_vma_pages_range(vma, vma->vm_start, vma->vm_end, | |
+ NULL); | |
+ return ret < 0 ? ret : 0; | |
+ } | |
+ | |
+ write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE; | |
+ len = DIV_ROUND_UP(vma->vm_end, PAGE_SIZE) - vma->vm_start/PAGE_SIZE; | |
+ ret = get_user_pages(current, mm, vma->vm_start, | |
+ len, write, 0, NULL, NULL); | |
+ if (ret < 0) | |
+ return ret; | |
+ return ret == len ? 0 : -EFAULT; | |
+} | |
+#endif | |
diff --git a/mm/mmap.c b/mm/mmap.c | |
index 8d25fdc..f1ff02b 100644 | |
--- a/mm/mmap.c | |
+++ b/mm/mmap.c | |
@@ -44,6 +44,10 @@ | |
#include "internal.h" | |
+#ifndef MMAP_BRK | |
+#define MMAP_BRK 0 | |
+#endif | |
+ | |
#ifndef arch_mmap_check | |
#define arch_mmap_check(addr, len, flags) (0) | |
#endif | |
@@ -2618,7 +2622,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) | |
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; | |
- error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); | |
+ error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED | MAP_BRK); | |
if (error & ~PAGE_MASK) | |
return error; | |
diff --git a/mm/mmu_context.c b/mm/mmu_context.c | |
index 8a8cd02..81a1bf6 100644 | |
--- a/mm/mmu_context.c | |
+++ b/mm/mmu_context.c | |
@@ -21,15 +21,18 @@ void use_mm(struct mm_struct *mm) | |
{ | |
struct mm_struct *active_mm; | |
struct task_struct *tsk = current; | |
+ unsigned long flags; | |
task_lock(tsk); | |
active_mm = tsk->active_mm; | |
+ ipipe_mm_switch_protect(flags); | |
if (active_mm != mm) { | |
atomic_inc(&mm->mm_count); | |
tsk->active_mm = mm; | |
} | |
tsk->mm = mm; | |
- switch_mm(active_mm, mm, tsk); | |
+ __switch_mm(active_mm, mm, tsk); | |
+ ipipe_mm_switch_unprotect(flags); | |
task_unlock(tsk); | |
if (active_mm != mm) | |
diff --git a/mm/mprotect.c b/mm/mprotect.c | |
index 94722a4..b3d03a0 100644 | |
--- a/mm/mprotect.c | |
+++ b/mm/mprotect.c | |
@@ -228,6 +228,12 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, | |
pages = hugetlb_change_protection(vma, start, end, newprot); | |
else | |
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); | |
+#ifdef CONFIG_IPIPE | |
+ if (test_bit(MMF_VM_PINNED, &mm->flags) && | |
+ ((vma->vm_flags | mm->def_flags) & VM_LOCKED) && | |
+ (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) | |
+ __ipipe_pin_vma(mm, vma); | |
+#endif | |
mmu_notifier_invalidate_range_end(mm, start, end); | |
return pages; | |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c | |
index d365724..4545be5 100644 | |
--- a/mm/vmalloc.c | |
+++ b/mm/vmalloc.c | |
@@ -191,6 +191,8 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end, | |
return err; | |
} while (pgd++, addr = next, addr != end); | |
+ __ipipe_pin_range_globally(start, end); | |
+ | |
return nr; | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment