Created
August 1, 2019 15:56
-
-
Save tklengyel/bc4a86e0f20b7c50c730c1b9429d4e2c to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/xen/arch/arm/acpi/boot.c b/xen/arch/arm/acpi/boot.c | |
index 9b29769a10..d3f9b7fde5 100644 | |
--- a/xen/arch/arm/acpi/boot.c | |
+++ b/xen/arch/arm/acpi/boot.c | |
@@ -64,13 +64,15 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) | |
total_cpus++; | |
if ( !enabled ) | |
{ | |
- printk("Skipping disabled CPU entry with 0x%"PRIx64" MPIDR\n", mpidr); | |
+ printk("Skipping disabled CPU entry with 0x%" PRIx64 " MPIDR\n", | |
+ mpidr); | |
return; | |
} | |
- if ( enabled_cpus >= NR_CPUS ) | |
+ if ( enabled_cpus >= NR_CPUS ) | |
{ | |
- printk("NR_CPUS limit of %d reached, Processor %d/0x%"PRIx64" ignored.\n", | |
+ printk("NR_CPUS limit of %d reached, Processor %d/0x%" PRIx64 | |
+ " ignored.\n", | |
NR_CPUS, total_cpus, mpidr); | |
return; | |
} | |
@@ -80,7 +82,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) | |
{ | |
if ( bootcpu_valid ) | |
{ | |
- printk("Firmware bug, duplicate boot CPU MPIDR: 0x%"PRIx64" in MADT\n", | |
+ printk("Firmware bug, duplicate boot CPU MPIDR: 0x%" PRIx64 | |
+ " in MADT\n", | |
mpidr); | |
return; | |
} | |
@@ -97,7 +100,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) | |
{ | |
if ( cpu_logical_map(i) == mpidr ) | |
{ | |
- printk("Firmware bug, duplicate CPU MPIDR: 0x%"PRIx64" in MADT\n", | |
+ printk("Firmware bug, duplicate CPU MPIDR: 0x%" PRIx64 | |
+ " in MADT\n", | |
mpidr); | |
return; | |
} | |
@@ -105,15 +109,14 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) | |
if ( !acpi_psci_present() ) | |
{ | |
- printk("PSCI not present, skipping CPU MPIDR 0x%"PRIx64"\n", | |
- mpidr); | |
+ printk("PSCI not present, skipping CPU MPIDR 0x%" PRIx64 "\n", mpidr); | |
return; | |
} | |
if ( (rc = arch_cpu_init(enabled_cpus, NULL)) < 0 ) | |
{ | |
- printk("cpu%d: init failed (0x%"PRIx64" MPIDR): %d\n", | |
- enabled_cpus, mpidr, rc); | |
+ printk("cpu%d: init failed (0x%" PRIx64 " MPIDR): %d\n", enabled_cpus, | |
+ mpidr, rc); | |
return; | |
} | |
@@ -123,12 +126,11 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) | |
enabled_cpus++; | |
} | |
-static int __init | |
-acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init acpi_parse_gic_cpu_interface( | |
+ struct acpi_subtable_header *header, const unsigned long end) | |
{ | |
struct acpi_madt_generic_interrupt *processor = | |
- container_of(header, struct acpi_madt_generic_interrupt, header); | |
+ container_of(header, struct acpi_madt_generic_interrupt, header); | |
if ( BAD_MADT_ENTRY(processor, end) ) | |
return -EINVAL; | |
@@ -149,7 +151,7 @@ void __init acpi_smp_init_cpus(void) | |
* we need for SMP init | |
*/ | |
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, | |
- acpi_parse_gic_cpu_interface, 0); | |
+ acpi_parse_gic_cpu_interface, 0); | |
if ( count <= 0 ) | |
{ | |
@@ -180,12 +182,13 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) | |
* we only deal with ACPI 6.0 or newer revision to get GIC and SMP | |
* boot protocol configuration data, or we will disable ACPI. | |
*/ | |
- if ( table->revision > 6 | |
- || (table->revision == 6 && fadt->minor_revision >= 0) ) | |
+ if ( table->revision > 6 || | |
+ (table->revision == 6 && fadt->minor_revision >= 0) ) | |
return 0; | |
- printk("Unsupported FADT revision %d.%d, should be 6.0+, will disable ACPI\n", | |
- table->revision, fadt->minor_revision); | |
+ printk( | |
+ "Unsupported FADT revision %d.%d, should be 6.0+, will disable ACPI\n", | |
+ table->revision, fadt->minor_revision); | |
return -EINVAL; | |
} | |
@@ -219,7 +222,7 @@ static int __init dt_scan_depth1_nodes(const void *fdt, int node, | |
* Return 1 as soon as we encounter a node at depth 1 that is | |
* not the /chosen node. | |
*/ | |
- if (depth == 1 && (strcmp(uname, "chosen") != 0)) | |
+ if ( depth == 1 && (strcmp(uname, "chosen") != 0) ) | |
return 1; | |
return 0; | |
} | |
@@ -246,9 +249,10 @@ int __init acpi_boot_table_init(void) | |
* - the device tree is not empty (it has more than just a /chosen node) | |
* and ACPI has not been force enabled (acpi=force) | |
*/ | |
- if ( param_acpi_off || ( !param_acpi_force | |
- && device_tree_for_each_node(device_tree_flattened, | |
- dt_scan_depth1_nodes, NULL))) | |
+ if ( param_acpi_off || | |
+ (!param_acpi_force && | |
+ device_tree_for_each_node(device_tree_flattened, | |
+ dt_scan_depth1_nodes, NULL)) ) | |
goto disable; | |
/* | |
@@ -261,8 +265,8 @@ int __init acpi_boot_table_init(void) | |
error = acpi_table_init(); | |
if ( error ) | |
{ | |
- printk("%s: Unable to initialize table parser (%d)\n", | |
- __FUNCTION__, error); | |
+ printk("%s: Unable to initialize table parser (%d)\n", __FUNCTION__, | |
+ error); | |
goto disable; | |
} | |
diff --git a/xen/arch/arm/acpi/domain_build.c b/xen/arch/arm/acpi/domain_build.c | |
index 1b1cfabb00..d82a714d06 100644 | |
--- a/xen/arch/arm/acpi/domain_build.c | |
+++ b/xen/arch/arm/acpi/domain_build.c | |
@@ -39,8 +39,8 @@ static int __init acpi_iomem_deny_access(struct domain *d) | |
return rc; | |
/* TODO: Deny MMIO access for SMMU, GIC ITS */ | |
- status = acpi_get_table(ACPI_SIG_SPCR, 0, | |
- (struct acpi_table_header **)&spcr); | |
+ status = | |
+ acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&spcr); | |
if ( ACPI_FAILURE(status) ) | |
{ | |
@@ -67,14 +67,14 @@ static int __init acpi_route_spis(struct domain *d) | |
* Route the IRQ to hardware domain and permit the access. | |
* The interrupt type will be set by set by the hardware domain. | |
*/ | |
- for( i = NR_LOCAL_IRQS; i < vgic_num_irqs(d); i++ ) | |
+ for ( i = NR_LOCAL_IRQS; i < vgic_num_irqs(d); i++ ) | |
{ | |
/* | |
* TODO: Exclude the SPIs SMMU uses which should not be routed to | |
* the hardware domain. | |
*/ | |
desc = irq_to_desc(i); | |
- if ( desc->action != NULL) | |
+ if ( desc->action != NULL ) | |
continue; | |
/* XXX: Shall we use a proper devname? */ | |
@@ -89,9 +89,9 @@ static int __init acpi_route_spis(struct domain *d) | |
static int __init acpi_make_hypervisor_node(const struct kernel_info *kinfo, | |
struct membank tbl_add[]) | |
{ | |
- const char compat[] = | |
- "xen,xen-"__stringify(XEN_VERSION)"."__stringify(XEN_SUBVERSION)"\0" | |
- "xen,xen"; | |
+ const char compat[] = "xen,xen-" __stringify(XEN_VERSION) "." __stringify( | |
+ XEN_SUBVERSION) "\0" | |
+ "xen,xen"; | |
int res; | |
/* Convenience alias */ | |
void *fdt = kinfo->fdt; | |
@@ -176,7 +176,7 @@ static int __init create_acpi_dtb(struct kernel_info *kinfo, | |
return 0; | |
- err: | |
+err: | |
printk("Device tree generation failed (%d).\n", ret); | |
xfree(kinfo->fdt); | |
return -EINVAL; | |
@@ -189,27 +189,23 @@ static void __init acpi_map_other_tables(struct domain *d) | |
u64 addr, size; | |
/* Map all ACPI tables to Dom0 using 1:1 mappings. */ | |
- for( i = 0; i < acpi_gbl_root_table_list.count; i++ ) | |
+ for ( i = 0; i < acpi_gbl_root_table_list.count; i++ ) | |
{ | |
addr = acpi_gbl_root_table_list.tables[i].address; | |
size = acpi_gbl_root_table_list.tables[i].length; | |
- res = map_regions_p2mt(d, | |
- gaddr_to_gfn(addr), | |
- PFN_UP(size), | |
- maddr_to_mfn(addr), | |
- p2m_mmio_direct_c); | |
+ res = map_regions_p2mt(d, gaddr_to_gfn(addr), PFN_UP(size), | |
+ maddr_to_mfn(addr), p2m_mmio_direct_c); | |
if ( res ) | |
{ | |
- panic(XENLOG_ERR "Unable to map ACPI region 0x%"PRIx64 | |
- " - 0x%"PRIx64" in domain\n", | |
- addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1); | |
+ panic(XENLOG_ERR "Unable to map ACPI region 0x%" PRIx64 | |
+ " - 0x%" PRIx64 " in domain\n", | |
+ addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1); | |
} | |
} | |
} | |
static int __init acpi_create_rsdp(struct domain *d, struct membank tbl_add[]) | |
{ | |
- | |
struct acpi_table_rsdp *rsdp = NULL; | |
u64 addr; | |
u64 table_size = sizeof(struct acpi_table_rsdp); | |
@@ -217,14 +213,14 @@ static int __init acpi_create_rsdp(struct domain *d, struct membank tbl_add[]) | |
u8 checksum; | |
addr = acpi_os_get_root_pointer(); | |
- if ( !addr ) | |
+ if ( !addr ) | |
{ | |
printk("Unable to get acpi root pointer\n"); | |
return -EINVAL; | |
} | |
rsdp = acpi_os_map_memory(addr, table_size); | |
- base_ptr = d->arch.efi_acpi_table | |
- + acpi_get_table_offset(tbl_add, TBL_RSDP); | |
+ base_ptr = | |
+ d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_RSDP); | |
memcpy(base_ptr, rsdp, table_size); | |
acpi_os_unmap_memory(rsdp, table_size); | |
@@ -234,8 +230,8 @@ static int __init acpi_create_rsdp(struct domain *d, struct membank tbl_add[]) | |
checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, rsdp), table_size); | |
rsdp->checksum = rsdp->checksum - checksum; | |
- tbl_add[TBL_RSDP].start = d->arch.efi_acpi_gpa | |
- + acpi_get_table_offset(tbl_add, TBL_RSDP); | |
+ tbl_add[TBL_RSDP].start = | |
+ d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_RSDP); | |
tbl_add[TBL_RSDP].size = table_size; | |
return 0; | |
@@ -249,7 +245,7 @@ static void __init acpi_xsdt_modify_entry(u64 entry[], | |
struct acpi_table_header *table; | |
u64 size = sizeof(struct acpi_table_header); | |
- for( i = 0; i < entry_count; i++ ) | |
+ for ( i = 0; i < entry_count; i++ ) | |
{ | |
table = acpi_os_map_memory(entry[i], size); | |
if ( ACPI_COMPARE_NAME(table->signature, signature) ) | |
@@ -284,10 +280,10 @@ static int __init acpi_create_xsdt(struct domain *d, struct membank tbl_add[]) | |
/* Add place for STAO table in XSDT table */ | |
table_size = table->length + sizeof(u64); | |
- entry_count = (table->length - sizeof(struct acpi_table_header)) | |
- / sizeof(u64); | |
- base_ptr = d->arch.efi_acpi_table | |
- + acpi_get_table_offset(tbl_add, TBL_XSDT); | |
+ entry_count = | |
+ (table->length - sizeof(struct acpi_table_header)) / sizeof(u64); | |
+ base_ptr = | |
+ d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_XSDT); | |
memcpy(base_ptr, table, table->length); | |
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); | |
acpi_os_unmap_memory(rsdp_tbl, sizeof(struct acpi_table_rsdp)); | |
@@ -303,8 +299,8 @@ static int __init acpi_create_xsdt(struct domain *d, struct membank tbl_add[]) | |
checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, xsdt), table_size); | |
xsdt->header.checksum -= checksum; | |
- tbl_add[TBL_XSDT].start = d->arch.efi_acpi_gpa | |
- + acpi_get_table_offset(tbl_add, TBL_XSDT); | |
+ tbl_add[TBL_XSDT].start = | |
+ d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_XSDT); | |
tbl_add[TBL_XSDT].size = table_size; | |
return 0; | |
@@ -381,7 +377,7 @@ static int __init acpi_create_madt(struct domain *d, struct membank tbl_add[]) | |
} | |
gicd = container_of(header, struct acpi_madt_generic_distributor, header); | |
memcpy(base_ptr + table_size, gicd, | |
- sizeof(struct acpi_madt_generic_distributor)); | |
+ sizeof(struct acpi_madt_generic_distributor)); | |
table_size += sizeof(struct acpi_madt_generic_distributor); | |
/* Add other subtables. */ | |
@@ -424,18 +420,19 @@ static int __init acpi_create_fadt(struct domain *d, struct membank tbl_add[]) | |
} | |
table_size = table->length; | |
- base_ptr = d->arch.efi_acpi_table | |
- + acpi_get_table_offset(tbl_add, TBL_FADT); | |
+ base_ptr = | |
+ d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_FADT); | |
memcpy(base_ptr, table, table_size); | |
fadt = (struct acpi_table_fadt *)base_ptr; | |
/* Set PSCI_COMPLIANT and PSCI_USE_HVC */ | |
- fadt->arm_boot_flags |= (ACPI_FADT_PSCI_COMPLIANT | ACPI_FADT_PSCI_USE_HVC); | |
+ fadt->arm_boot_flags |= | |
+ (ACPI_FADT_PSCI_COMPLIANT | ACPI_FADT_PSCI_USE_HVC); | |
checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, fadt), table_size); | |
fadt->header.checksum -= checksum; | |
- tbl_add[TBL_FADT].start = d->arch.efi_acpi_gpa | |
- + acpi_get_table_offset(tbl_add, TBL_FADT); | |
+ tbl_add[TBL_FADT].start = | |
+ d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_FADT); | |
tbl_add[TBL_FADT].size = table_size; | |
return 0; | |
@@ -485,8 +482,8 @@ static int __init estimate_acpi_efi_size(struct domain *d, | |
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); | |
acpi_size += ROUNDUP(sizeof(struct acpi_table_rsdp), 8); | |
- d->arch.efi_acpi_len = PAGE_ALIGN(ROUNDUP(efi_size, 8) | |
- + ROUNDUP(acpi_size, 8)); | |
+ d->arch.efi_acpi_len = | |
+ PAGE_ALIGN(ROUNDUP(efi_size, 8) + ROUNDUP(acpi_size, 8)); | |
return 0; | |
} | |
@@ -518,7 +515,8 @@ int __init prepare_acpi(struct domain *d, struct kernel_info *kinfo) | |
d->arch.efi_acpi_gpa = kinfo->gnttab_start; | |
if ( kinfo->gnttab_size < d->arch.efi_acpi_len ) | |
{ | |
- printk("The grant table region is not enough to fit the ACPI tables!\n"); | |
+ printk( | |
+ "The grant table region is not enough to fit the ACPI tables!\n"); | |
return -EINVAL; | |
} | |
@@ -547,15 +545,13 @@ int __init prepare_acpi(struct domain *d, struct kernel_info *kinfo) | |
acpi_create_efi_mmap_table(d, &kinfo->mem, tbl_add); | |
/* Map the EFI and ACPI tables to Dom0 */ | |
- rc = map_regions_p2mt(d, | |
- gaddr_to_gfn(d->arch.efi_acpi_gpa), | |
- PFN_UP(d->arch.efi_acpi_len), | |
- virt_to_mfn(d->arch.efi_acpi_table), | |
- p2m_mmio_direct_c); | |
+ rc = map_regions_p2mt( | |
+ d, gaddr_to_gfn(d->arch.efi_acpi_gpa), PFN_UP(d->arch.efi_acpi_len), | |
+ virt_to_mfn(d->arch.efi_acpi_table), p2m_mmio_direct_c); | |
if ( rc != 0 ) | |
{ | |
- printk(XENLOG_ERR "Unable to map EFI/ACPI table 0x%"PRIx64 | |
- " - 0x%"PRIx64" in domain %d\n", | |
+ printk(XENLOG_ERR "Unable to map EFI/ACPI table 0x%" PRIx64 | |
+ " - 0x%" PRIx64 " in domain %d\n", | |
d->arch.efi_acpi_gpa & PAGE_MASK, | |
PAGE_ALIGN(d->arch.efi_acpi_gpa + d->arch.efi_acpi_len) - 1, | |
d->domain_id); | |
diff --git a/xen/arch/arm/acpi/lib.c b/xen/arch/arm/acpi/lib.c | |
index 4fc6e17322..a5f04fe02e 100644 | |
--- a/xen/arch/arm/acpi/lib.c | |
+++ b/xen/arch/arm/acpi/lib.c | |
@@ -40,13 +40,13 @@ char *__acpi_map_table(paddr_t phys, unsigned long size) | |
while ( mapped_size < size ) | |
{ | |
if ( ++idx > FIXMAP_ACPI_END ) | |
- return NULL; /* cannot handle this */ | |
+ return NULL; /* cannot handle this */ | |
phys += PAGE_SIZE; | |
set_fixmap(idx, maddr_to_mfn(phys), PAGE_HYPERVISOR); | |
mapped_size += PAGE_SIZE; | |
} | |
- return ((char *) base + offset); | |
+ return ((char *)base + offset); | |
} | |
/* True to indicate PSCI 0.2+ is implemented */ | |
diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c | |
index 52ed7edf69..5e2240d158 100644 | |
--- a/xen/arch/arm/alternative.c | |
+++ b/xen/arch/arm/alternative.c | |
@@ -40,7 +40,8 @@ | |
extern const struct alt_instr __alt_instructions[], __alt_instructions_end[]; | |
-struct alt_region { | |
+struct alt_region | |
+{ | |
const struct alt_instr *begin; | |
const struct alt_instr *end; | |
}; | |
@@ -67,8 +68,8 @@ static bool branch_insn_requires_update(const struct alt_instr *alt, | |
BUG(); | |
} | |
-static u32 get_alt_insn(const struct alt_instr *alt, | |
- const u32 *insnptr, const u32 *altinsnptr) | |
+static u32 get_alt_insn(const struct alt_instr *alt, const u32 *insnptr, | |
+ const u32 *altinsnptr) | |
{ | |
u32 insn; | |
@@ -97,8 +98,8 @@ static u32 get_alt_insn(const struct alt_instr *alt, | |
} | |
static void patch_alternative(const struct alt_instr *alt, | |
- const uint32_t *origptr, | |
- uint32_t *updptr, int nr_inst) | |
+ const uint32_t *origptr, uint32_t *updptr, | |
+ int nr_inst) | |
{ | |
const uint32_t *replptr; | |
unsigned int i; | |
@@ -159,7 +160,7 @@ static int __apply_alternatives(const struct alt_region *region, | |
/* Ensure the new instructions reached the memory and nuke */ | |
clean_and_invalidate_dcache_va_range(origptr, | |
- (sizeof (*origptr) * nr_inst)); | |
+ (sizeof(*origptr) * nr_inst)); | |
} | |
/* Nuke the instruction cache */ | |
@@ -229,14 +230,15 @@ void __init apply_alternatives_all(void) | |
ASSERT(system_state != SYS_STATE_active); | |
- /* better not try code patching on a live SMP system */ | |
+ /* better not try code patching on a live SMP system */ | |
ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS); | |
/* stop_machine_run should never fail at this stage of the boot */ | |
BUG_ON(ret); | |
} | |
-int apply_alternatives(const struct alt_instr *start, const struct alt_instr *end) | |
+int apply_alternatives(const struct alt_instr *start, | |
+ const struct alt_instr *end) | |
{ | |
const struct alt_region region = { | |
.begin = start, | |
diff --git a/xen/arch/arm/arm32/asm-offsets.c b/xen/arch/arm/arm32/asm-offsets.c | |
index 2116ba5b95..3319bddac2 100644 | |
--- a/xen/arch/arm/arm32/asm-offsets.c | |
+++ b/xen/arch/arm/arm32/asm-offsets.c | |
@@ -12,67 +12,65 @@ | |
#include <asm/current.h> | |
#include <asm/procinfo.h> | |
-#define DEFINE(_sym, _val) \ | |
- asm volatile ("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ | |
- : : "i" (_val) ) | |
-#define BLANK() \ | |
- asm volatile ( "\n.ascii\"==><==\"" : : ) | |
-#define OFFSET(_sym, _str, _mem) \ | |
- DEFINE(_sym, offsetof(_str, _mem)); | |
+#define DEFINE(_sym, _val) \ | |
+ asm volatile("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ | |
+ : \ | |
+ : "i"(_val)) | |
+#define BLANK() asm volatile("\n.ascii\"==><==\"" : :) | |
+#define OFFSET(_sym, _str, _mem) DEFINE(_sym, offsetof(_str, _mem)); | |
void __dummy__(void) | |
{ | |
- OFFSET(UREGS_sp, struct cpu_user_regs, sp); | |
- OFFSET(UREGS_lr, struct cpu_user_regs, lr); | |
- OFFSET(UREGS_pc, struct cpu_user_regs, pc); | |
- OFFSET(UREGS_cpsr, struct cpu_user_regs, cpsr); | |
- OFFSET(UREGS_hsr, struct cpu_user_regs, hsr); | |
+ OFFSET(UREGS_sp, struct cpu_user_regs, sp); | |
+ OFFSET(UREGS_lr, struct cpu_user_regs, lr); | |
+ OFFSET(UREGS_pc, struct cpu_user_regs, pc); | |
+ OFFSET(UREGS_cpsr, struct cpu_user_regs, cpsr); | |
+ OFFSET(UREGS_hsr, struct cpu_user_regs, hsr); | |
- OFFSET(UREGS_LR_usr, struct cpu_user_regs, lr_usr); | |
- OFFSET(UREGS_SP_usr, struct cpu_user_regs, sp_usr); | |
+ OFFSET(UREGS_LR_usr, struct cpu_user_regs, lr_usr); | |
+ OFFSET(UREGS_SP_usr, struct cpu_user_regs, sp_usr); | |
- OFFSET(UREGS_SP_svc, struct cpu_user_regs, sp_svc); | |
- OFFSET(UREGS_LR_svc, struct cpu_user_regs, lr_svc); | |
- OFFSET(UREGS_SPSR_svc, struct cpu_user_regs, spsr_svc); | |
+ OFFSET(UREGS_SP_svc, struct cpu_user_regs, sp_svc); | |
+ OFFSET(UREGS_LR_svc, struct cpu_user_regs, lr_svc); | |
+ OFFSET(UREGS_SPSR_svc, struct cpu_user_regs, spsr_svc); | |
- OFFSET(UREGS_SP_abt, struct cpu_user_regs, sp_abt); | |
- OFFSET(UREGS_LR_abt, struct cpu_user_regs, lr_abt); | |
- OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); | |
+ OFFSET(UREGS_SP_abt, struct cpu_user_regs, sp_abt); | |
+ OFFSET(UREGS_LR_abt, struct cpu_user_regs, lr_abt); | |
+ OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); | |
- OFFSET(UREGS_SP_und, struct cpu_user_regs, sp_und); | |
- OFFSET(UREGS_LR_und, struct cpu_user_regs, lr_und); | |
- OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); | |
+ OFFSET(UREGS_SP_und, struct cpu_user_regs, sp_und); | |
+ OFFSET(UREGS_LR_und, struct cpu_user_regs, lr_und); | |
+ OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); | |
- OFFSET(UREGS_SP_irq, struct cpu_user_regs, sp_irq); | |
- OFFSET(UREGS_LR_irq, struct cpu_user_regs, lr_irq); | |
- OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); | |
+ OFFSET(UREGS_SP_irq, struct cpu_user_regs, sp_irq); | |
+ OFFSET(UREGS_LR_irq, struct cpu_user_regs, lr_irq); | |
+ OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); | |
- OFFSET(UREGS_SP_fiq, struct cpu_user_regs, sp_fiq); | |
- OFFSET(UREGS_LR_fiq, struct cpu_user_regs, lr_fiq); | |
- OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); | |
+ OFFSET(UREGS_SP_fiq, struct cpu_user_regs, sp_fiq); | |
+ OFFSET(UREGS_LR_fiq, struct cpu_user_regs, lr_fiq); | |
+ OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); | |
- OFFSET(UREGS_R8_fiq, struct cpu_user_regs, r8_fiq); | |
- OFFSET(UREGS_R9_fiq, struct cpu_user_regs, r9_fiq); | |
- OFFSET(UREGS_R10_fiq, struct cpu_user_regs, r10_fiq); | |
- OFFSET(UREGS_R11_fiq, struct cpu_user_regs, r11_fiq); | |
- OFFSET(UREGS_R12_fiq, struct cpu_user_regs, r12_fiq); | |
+ OFFSET(UREGS_R8_fiq, struct cpu_user_regs, r8_fiq); | |
+ OFFSET(UREGS_R9_fiq, struct cpu_user_regs, r9_fiq); | |
+ OFFSET(UREGS_R10_fiq, struct cpu_user_regs, r10_fiq); | |
+ OFFSET(UREGS_R11_fiq, struct cpu_user_regs, r11_fiq); | |
+ OFFSET(UREGS_R12_fiq, struct cpu_user_regs, r12_fiq); | |
- OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr); | |
- BLANK(); | |
+ OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr); | |
+ BLANK(); | |
- DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); | |
+ DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); | |
- OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); | |
+ OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); | |
- BLANK(); | |
- DEFINE(PROCINFO_sizeof, sizeof(struct proc_info_list)); | |
- OFFSET(PROCINFO_cpu_val, struct proc_info_list, cpu_val); | |
- OFFSET(PROCINFO_cpu_mask, struct proc_info_list, cpu_mask); | |
- OFFSET(PROCINFO_cpu_init, struct proc_info_list, cpu_init); | |
- | |
- BLANK(); | |
- OFFSET(INITINFO_stack, struct init_info, stack); | |
+ BLANK(); | |
+ DEFINE(PROCINFO_sizeof, sizeof(struct proc_info_list)); | |
+ OFFSET(PROCINFO_cpu_val, struct proc_info_list, cpu_val); | |
+ OFFSET(PROCINFO_cpu_mask, struct proc_info_list, cpu_mask); | |
+ OFFSET(PROCINFO_cpu_init, struct proc_info_list, cpu_init); | |
+ BLANK(); | |
+ OFFSET(INITINFO_stack, struct init_info, stack); | |
} | |
/* | |
diff --git a/xen/arch/arm/arm32/domain.c b/xen/arch/arm/arm32/domain.c | |
index 2ca1bf03c8..9e47df273d 100644 | |
--- a/xen/arch/arm/arm32/domain.c | |
+++ b/xen/arch/arm/arm32/domain.c | |
@@ -7,27 +7,49 @@ | |
/* C(hyp,user), hyp is Xen internal name, user is user API name. */ | |
-#define ALLREGS \ | |
- C(r0,r0_usr); C(r1,r1_usr); C(r2,r2_usr); C(r3,r3_usr); \ | |
- C(r4,r4_usr); C(r5,r5_usr); C(r6,r6_usr); C(r7,r7_usr); \ | |
- C(r8,r8_usr); C(r9,r9_usr); C(r10,r10_usr); C(r11,r11_usr); \ | |
- C(r12,r12_usr); \ | |
- C(sp_usr,sp_usr); \ | |
- C(lr,lr_usr); \ | |
- C(spsr_irq,spsr_irq); C(lr_irq,lr_irq); C(sp_irq,sp_irq); \ | |
- C(spsr_svc,spsr_svc); C(lr_svc,lr_svc); C(sp_svc,sp_svc); \ | |
- C(spsr_abt,spsr_abt); C(lr_abt,lr_abt); C(sp_abt,sp_abt); \ | |
- C(spsr_und,spsr_und); C(lr_und,lr_und); C(sp_und,sp_und); \ | |
- C(spsr_fiq,spsr_fiq); C(sp_fiq,sp_fiq); C(sp_fiq,sp_fiq); \ | |
- C(r8_fiq,r8_fiq); C(r9_fiq,r9_fiq); \ | |
- C(r10_fiq,r10_fiq); C(r11_fiq,r11_fiq); C(r12_fiq,r12_fiq); \ | |
- C(pc,pc32); \ | |
- C(cpsr,cpsr) | |
+#define ALLREGS \ | |
+ C(r0, r0_usr); \ | |
+ C(r1, r1_usr); \ | |
+ C(r2, r2_usr); \ | |
+ C(r3, r3_usr); \ | |
+ C(r4, r4_usr); \ | |
+ C(r5, r5_usr); \ | |
+ C(r6, r6_usr); \ | |
+ C(r7, r7_usr); \ | |
+ C(r8, r8_usr); \ | |
+ C(r9, r9_usr); \ | |
+ C(r10, r10_usr); \ | |
+ C(r11, r11_usr); \ | |
+ C(r12, r12_usr); \ | |
+ C(sp_usr, sp_usr); \ | |
+ C(lr, lr_usr); \ | |
+ C(spsr_irq, spsr_irq); \ | |
+ C(lr_irq, lr_irq); \ | |
+ C(sp_irq, sp_irq); \ | |
+ C(spsr_svc, spsr_svc); \ | |
+ C(lr_svc, lr_svc); \ | |
+ C(sp_svc, sp_svc); \ | |
+ C(spsr_abt, spsr_abt); \ | |
+ C(lr_abt, lr_abt); \ | |
+ C(sp_abt, sp_abt); \ | |
+ C(spsr_und, spsr_und); \ | |
+ C(lr_und, lr_und); \ | |
+ C(sp_und, sp_und); \ | |
+ C(spsr_fiq, spsr_fiq); \ | |
+ C(sp_fiq, sp_fiq); \ | |
+ C(sp_fiq, sp_fiq); \ | |
+ C(r8_fiq, r8_fiq); \ | |
+ C(r9_fiq, r9_fiq); \ | |
+ C(r10_fiq, r10_fiq); \ | |
+ C(r11_fiq, r11_fiq); \ | |
+ C(r12_fiq, r12_fiq); \ | |
+ C(pc, pc32); \ | |
+ C(cpsr, cpsr) | |
void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, | |
struct vcpu_guest_core_regs *regs) | |
{ | |
-#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp | |
+#define C(hyp, user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp | |
ALLREGS; | |
#undef C | |
} | |
@@ -35,7 +57,7 @@ void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, | |
void vcpu_regs_user_to_hyp(struct vcpu *vcpu, | |
const struct vcpu_guest_core_regs *regs) | |
{ | |
-#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user | |
+#define C(hyp, user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user | |
ALLREGS; | |
#undef C | |
} | |
diff --git a/xen/arch/arm/arm32/domctl.c b/xen/arch/arm/arm32/domctl.c | |
index fbf9d3bddc..b150fdb0e7 100644 | |
--- a/xen/arch/arm/arm32/domctl.c | |
+++ b/xen/arch/arm/arm32/domctl.c | |
@@ -12,9 +12,9 @@ | |
#include <public/domctl.h> | |
long subarch_do_domctl(struct xen_domctl *domctl, struct domain *d, | |
- XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) | |
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) | |
{ | |
- switch ( domctl->cmd ) | |
+ switch (domctl->cmd) | |
{ | |
case XEN_DOMCTL_set_address_size: | |
return domctl->u.address_size.size == 32 ? 0 : -EINVAL; | |
diff --git a/xen/arch/arm/arm32/insn.c b/xen/arch/arm/arm32/insn.c | |
index 49953a042a..53ae969e4a 100644 | |
--- a/xen/arch/arm/arm32/insn.c | |
+++ b/xen/arch/arm/arm32/insn.c | |
@@ -1,27 +1,27 @@ | |
/* | |
- * Copyright (C) 2017 ARM Ltd. | |
- * | |
- * This program is free software; you can redistribute it and/or modify | |
- * it under the terms of the GNU General Public License version 2 as | |
- * published by the Free Software Foundation. | |
- * | |
- * This program is distributed in the hope that it will be useful, | |
- * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
- * GNU General Public License for more details. | |
- * | |
- * You should have received a copy of the GNU General Public License | |
- * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
- */ | |
+ * Copyright (C) 2017 ARM Ltd. | |
+ * | |
+ * This program is free software; you can redistribute it and/or modify | |
+ * it under the terms of the GNU General Public License version 2 as | |
+ * published by the Free Software Foundation. | |
+ * | |
+ * This program is distributed in the hope that it will be useful, | |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
+ * GNU General Public License for more details. | |
+ * | |
+ * You should have received a copy of the GNU General Public License | |
+ * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
+ */ | |
#include <xen/lib.h> | |
#include <xen/bitops.h> | |
#include <xen/sizes.h> | |
#include <asm/insn.h> | |
/* Mask of branch instructions' immediate. */ | |
-#define BRANCH_INSN_IMM_MASK GENMASK(23, 0) | |
+#define BRANCH_INSN_IMM_MASK GENMASK(23, 0) | |
/* Shift of branch instructions' immediate. */ | |
-#define BRANCH_INSN_IMM_SHIFT 0 | |
+#define BRANCH_INSN_IMM_SHIFT 0 | |
static uint32_t branch_insn_encode_immediate(uint32_t insn, int32_t offset) | |
{ | |
@@ -52,7 +52,7 @@ int32_t aarch32_get_branch_offset(uint32_t insn) | |
uint32_t imm; | |
/* Retrieve imm from branch instruction. */ | |
- imm = ( insn >> BRANCH_INSN_IMM_SHIFT ) & BRANCH_INSN_IMM_MASK; | |
+ imm = (insn >> BRANCH_INSN_IMM_SHIFT) & BRANCH_INSN_IMM_MASK; | |
/* | |
* Check the imm signed bit. If the imm is a negative value, we | |
@@ -73,8 +73,7 @@ uint32_t aarch32_set_branch_offset(uint32_t insn, int32_t offset) | |
/* B/BL support [-32M, 32M) offset (see ARM DDI 0406C.c A4.3). */ | |
if ( offset < -SZ_32M || offset >= SZ_32M ) | |
{ | |
- printk(XENLOG_ERR | |
- "%s: new branch offset out of range.\n", __func__); | |
+ printk(XENLOG_ERR "%s: new branch offset out of range.\n", __func__); | |
return BUG_OPCODE; | |
} | |
diff --git a/xen/arch/arm/arm32/lib/assembler.h b/xen/arch/arm/arm32/lib/assembler.h | |
index 6de2638a36..c32958f260 100644 | |
--- a/xen/arch/arm/arm32/lib/assembler.h | |
+++ b/xen/arch/arm/arm32/lib/assembler.h | |
@@ -22,12 +22,12 @@ | |
#endif | |
// No Thumb, hence: | |
-#define W(instr) instr | |
-#define ARM(instr...) instr | |
+#define W(instr) instr | |
+#define ARM(instr...) instr | |
#define THUMB(instr...) | |
#ifdef CONFIG_ARM_UNWIND | |
-#define UNWIND(code...) code | |
+#define UNWIND(code...) code | |
#else | |
#define UNWIND(code...) | |
#endif | |
@@ -36,34 +36,34 @@ | |
* Endian independent macros for shifting bytes within registers. | |
*/ | |
#ifndef __ARMEB__ | |
-#define lspull lsr | |
-#define lspush lsl | |
-#define get_byte_0 lsl #0 | |
-#define get_byte_1 lsr #8 | |
-#define get_byte_2 lsr #16 | |
-#define get_byte_3 lsr #24 | |
-#define put_byte_0 lsl #0 | |
-#define put_byte_1 lsl #8 | |
-#define put_byte_2 lsl #16 | |
-#define put_byte_3 lsl #24 | |
+#define lspull lsr | |
+#define lspush lsl | |
+#define get_byte_0 lsl #0 | |
+#define get_byte_1 lsr #8 | |
+#define get_byte_2 lsr #16 | |
+#define get_byte_3 lsr #24 | |
+#define put_byte_0 lsl #0 | |
+#define put_byte_1 lsl #8 | |
+#define put_byte_2 lsl #16 | |
+#define put_byte_3 lsl #24 | |
#else | |
-#define lspull lsl | |
-#define lspush lsr | |
-#define get_byte_0 lsr #24 | |
-#define get_byte_1 lsr #16 | |
-#define get_byte_2 lsr #8 | |
-#define get_byte_3 lsl #0 | |
-#define put_byte_0 lsl #24 | |
-#define put_byte_1 lsl #16 | |
-#define put_byte_2 lsl #8 | |
-#define put_byte_3 lsl #0 | |
+#define lspull lsl | |
+#define lspush lsr | |
+#define get_byte_0 lsr #24 | |
+#define get_byte_1 lsr #16 | |
+#define get_byte_2 lsr #8 | |
+#define get_byte_3 lsl #0 | |
+#define put_byte_0 lsl #24 | |
+#define put_byte_1 lsl #16 | |
+#define put_byte_2 lsl #8 | |
+#define put_byte_3 lsl #0 | |
#endif | |
/* | |
* Data preload for architectures that support it | |
*/ | |
#if __LINUX_ARM_ARCH__ >= 5 | |
-#define PLD(code...) code | |
+#define PLD(code...) code | |
#else | |
#define PLD(code...) | |
#endif | |
@@ -87,42 +87,45 @@ | |
* Enable and disable interrupts | |
*/ | |
#if __LINUX_ARM_ARCH__ >= 6 | |
- .macro disable_irq_notrace | |
- cpsid i | |
- .endm | |
+.macro disable_irq_notrace cpsid | |
+ i.endm | |
- .macro enable_irq_notrace | |
- cpsie i | |
- .endm | |
+ .macro enable_irq_notrace cpsie i | |
+ .endm | |
#else | |
- .macro disable_irq_notrace | |
- msr cpsr_c, #PSR_I_BIT | SVC_MODE | |
- .endm | |
+.macro disable_irq_notrace msr cpsr_c, | |
+ #PSR_I_BIT | SVC_MODE | |
+ .endm | |
- .macro enable_irq_notrace | |
- msr cpsr_c, #SVC_MODE | |
- .endm | |
+ .macro enable_irq_notrace msr cpsr_c, | |
+ #SVC_MODE | |
+ .endm | |
#endif | |
- .macro asm_trace_hardirqs_off | |
+ .macro asm_trace_hardirqs_off | |
#if defined(CONFIG_TRACE_IRQFLAGS) | |
- stmdb sp!, {r0-r3, ip, lr} | |
- bl trace_hardirqs_off | |
- ldmia sp!, {r0-r3, ip, lr} | |
+ stmdb sp !, | |
+ {r0 - r3, ip, lr} bl trace_hardirqs_off ldmia sp !, | |
+{ | |
+ r0 - r3, ip, lr | |
+} | |
#endif | |
- .endm | |
+.endm | |
- .macro asm_trace_hardirqs_on_cond, cond | |
+ .macro asm_trace_hardirqs_on_cond, | |
+ cond | |
#if defined(CONFIG_TRACE_IRQFLAGS) | |
- /* | |
- * actually the registers should be pushed and pop'd conditionally, but | |
- * after bl the flags are certainly clobbered | |
- */ | |
- stmdb sp!, {r0-r3, ip, lr} | |
- bl\cond trace_hardirqs_on | |
- ldmia sp!, {r0-r3, ip, lr} | |
+ /* | |
+ * actually the registers should be pushed and pop'd conditionally, but | |
+ * after bl the flags are certainly clobbered | |
+ */ | |
+ stmdb sp !, | |
+ {r0 - r3, ip, lr} bl\cond trace_hardirqs_on ldmia sp !, | |
+{ | |
+ r0 - r3, ip, lr | |
+} | |
#endif | |
- .endm | |
+.endm | |
.macro asm_trace_hardirqs_on | |
asm_trace_hardirqs_on_cond al | |
@@ -160,35 +163,34 @@ | |
restore_irqs_notrace \oldcpsr | |
.endm | |
-#define USER(x...) \ | |
-9999: x; \ | |
- .pushsection __ex_table,"a"; \ | |
- .align 3; \ | |
- .long 9999b,9001f; \ | |
- .popsection | |
+#define USER(x...) \ | |
+ 9999: x; \ | |
+ .pushsection __ex_table, "a"; \ | |
+ .align 3; \ | |
+ .long 9999b, 9001f; \ | |
+ .popsection | |
#ifdef CONFIG_SMP | |
-#define ALT_SMP(instr...) \ | |
-9998: instr | |
+#define ALT_SMP(instr...) 9998: instr | |
/* | |
* Note: if you get assembler errors from ALT_UP() when building with | |
* CONFIG_THUMB2_KERNEL, you almost certainly need to use | |
* ALT_SMP( W(instr) ... ) | |
*/ | |
-#define ALT_UP(instr...) \ | |
- .pushsection ".alt.smp.init", "a" ;\ | |
- .long 9998b ;\ | |
-9997: instr ;\ | |
- .if . - 9997b != 4 ;\ | |
- .error "ALT_UP() content must assemble to exactly 4 bytes";\ | |
- .endif ;\ | |
- .popsection | |
-#define ALT_UP_B(label) \ | |
- .equ up_b_offset, label - 9998b ;\ | |
- .pushsection ".alt.smp.init", "a" ;\ | |
- .long 9998b ;\ | |
- W(b) . + up_b_offset ;\ | |
- .popsection | |
+#define ALT_UP(instr...) \ | |
+ .pushsection ".alt.smp.init", "a"; \ | |
+ .long 9998b; \ | |
+ 9997: instr; \ | |
+ .if.- 9997b != 4; \ | |
+ .error "ALT_UP() content must assemble to exactly 4 bytes"; \ | |
+ .endif; \ | |
+ .popsection | |
+#define ALT_UP_B(label) \ | |
+ .equ up_b_offset, label - 9998b; \ | |
+ .pushsection ".alt.smp.init", "a"; \ | |
+ .long 9998b; \ | |
+ W(b).+ up_b_offset; \ | |
+ .popsection | |
#else | |
#define ALT_SMP(instr...) | |
#define ALT_UP(instr...) instr | |
@@ -202,7 +204,9 @@ | |
#if __LINUX_ARM_ARCH__ >= 7 | |
isb | |
#elif __LINUX_ARM_ARCH__ == 6 | |
- mcr p15, 0, r0, c7, c5, 4 | |
+ mcr p15, | |
+ 0, r0, c7, c5, | |
+ 4 | |
#endif | |
.endm | |
@@ -236,9 +240,10 @@ | |
msr cpsr_c, \reg | |
.endm | |
#else | |
- .macro setmode, mode, reg | |
- msr cpsr_c, #\mode | |
- .endm | |
+ .macro setmode, | |
+ mode, reg msr cpsr_c, | |
+ #\mode | |
+ .endm | |
#endif | |
/* | |
@@ -284,27 +289,21 @@ | |
add\cond \ptr, #\rept * \inc | |
.endm | |
-#else /* !CONFIG_THUMB2_KERNEL */ | |
+#else /* !CONFIG_THUMB2_KERNEL */ | |
- .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T() | |
- .rept \rept | |
-9999: | |
- .if \inc == 1 | |
- \instr\cond\()b\()\t \reg, [\ptr], #\inc | |
- .elseif \inc == 4 | |
- \instr\cond\()\t \reg, [\ptr], #\inc | |
- .else | |
- .error "Unsupported inc macro argument" | |
- .endif | |
+ .macro usracc, | |
+ instr, reg, ptr, inc, cond, rept, abort, | |
+ t = T().rept \rept 9999:.if \inc == 1 | |
+ \instr\cond\() b\()\t \reg, [\ptr], #\inc.elseif \inc == 4 | |
+ \instr\cond\()\t \reg, [\ptr], | |
+ #\inc.else.error "Unsupported inc macro argument" | |
+ .endif | |
- .pushsection __ex_table,"a" | |
- .align 3 | |
- .long 9999b, \abort | |
- .popsection | |
- .endr | |
- .endm | |
+ .pushsection __ex_table, | |
+ "a".align 3.long 9999b, \abort.popsection.endr | |
+ .endm | |
-#endif /* CONFIG_THUMB2_KERNEL */ | |
+#endif /* CONFIG_THUMB2_KERNEL */ | |
.macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f | |
usracc str, \reg, \ptr, \inc, \cond, \rept, \abort | |
diff --git a/xen/arch/arm/arm32/lib/bitops.c b/xen/arch/arm/arm32/lib/bitops.c | |
index 3dca769bf0..8c15c549f5 100644 | |
--- a/xen/arch/arm/arm32/lib/bitops.c | |
+++ b/xen/arch/arm/arm32/lib/bitops.c | |
@@ -29,118 +29,119 @@ | |
* XXX: Rework the interface to use unsigned int. | |
*/ | |
-#define bitop(name, instr) \ | |
-static always_inline bool int_##name(int nr, volatile void *p, bool timeout,\ | |
- unsigned int max_try) \ | |
-{ \ | |
- volatile uint32_t *ptr = (uint32_t *)p + BIT_WORD((unsigned int)nr); \ | |
- const uint32_t mask = BIT_MASK((unsigned int)nr); \ | |
- unsigned long res, tmp; \ | |
- \ | |
- ASSERT(((vaddr_t)p & 0x3) == 0); \ | |
- prefetchw((const void *)ptr); \ | |
- \ | |
- do \ | |
- { \ | |
- asm volatile ("// " __stringify(name) "\n" \ | |
- " ldrex %2, %1\n" \ | |
- " " __stringify(instr) " %2, %2, %3\n" \ | |
- " strex %0, %2, %1\n" \ | |
- : "=&r" (res), "+Qo" (*ptr), "=&r" (tmp) \ | |
- : "r" (mask)); \ | |
- \ | |
- if ( !res ) \ | |
- break; \ | |
- } while ( !timeout || ((--max_try) > 0) ); \ | |
- \ | |
- return !res; \ | |
-} \ | |
- \ | |
-void name(int nr, volatile void *p) \ | |
-{ \ | |
- if ( !int_##name(nr, p, false, 0) ) \ | |
- ASSERT_UNREACHABLE(); \ | |
-} \ | |
- \ | |
-bool name##_timeout(int nr, volatile void *p, unsigned int max_try) \ | |
-{ \ | |
- return int_##name(nr, p, true, max_try); \ | |
-} | |
+#define bitop(name, instr) \ | |
+ static always_inline bool int_##name(int nr, volatile void *p, \ | |
+ bool timeout, unsigned int max_try) \ | |
+ { \ | |
+ volatile uint32_t *ptr = (uint32_t *)p + BIT_WORD((unsigned int)nr); \ | |
+ const uint32_t mask = BIT_MASK((unsigned int)nr); \ | |
+ unsigned long res, tmp; \ | |
+ \ | |
+ ASSERT(((vaddr_t)p & 0x3) == 0); \ | |
+ prefetchw((const void *)ptr); \ | |
+ \ | |
+ do { \ | |
+ asm volatile( \ | |
+ "// " __stringify(name) "\n" \ | |
+ " ldrex %2, %1\n" \ | |
+ " " __stringify( \ | |
+ instr) " %2, %2, %3\n" \ | |
+ " strex %0, %2, %1\n" \ | |
+ : "=&r"(res), "+Qo"(*ptr), "=&r"(tmp) \ | |
+ : "r"(mask)); \ | |
+ \ | |
+ if ( !res ) \ | |
+ break; \ | |
+ } while ( !timeout || ((--max_try) > 0) ); \ | |
+ \ | |
+ return !res; \ | |
+ } \ | |
+ \ | |
+ void name(int nr, volatile void *p) \ | |
+ { \ | |
+ if ( !int_##name(nr, p, false, 0) ) \ | |
+ ASSERT_UNREACHABLE(); \ | |
+ } \ | |
+ \ | |
+ bool name##_timeout(int nr, volatile void *p, unsigned int max_try) \ | |
+ { \ | |
+ return int_##name(nr, p, true, max_try); \ | |
+ } | |
-#define testop(name, instr) \ | |
-static always_inline bool int_##name(int nr, volatile void *p, int *oldbit, \ | |
- bool timeout, unsigned int max_try) \ | |
-{ \ | |
- volatile uint32_t *ptr = (uint32_t *)p + BIT_WORD((unsigned int)nr); \ | |
- unsigned int bit = (unsigned int)nr % BITS_PER_WORD; \ | |
- const uint32_t mask = BIT_MASK(bit); \ | |
- unsigned long res, tmp; \ | |
- \ | |
- ASSERT(((vaddr_t)p & 0x3) == 0); \ | |
- smp_mb(); \ | |
- \ | |
- prefetchw((const void *)ptr); \ | |
- \ | |
- do \ | |
- { \ | |
- asm volatile ("// " __stringify(name) "\n" \ | |
- " ldrex %3, %2\n" \ | |
- " lsr %1, %3, %5 // Save old value of bit\n" \ | |
- " " __stringify(instr) " %3, %3, %4 // Toggle bit\n" \ | |
- " strex %0, %3, %2\n" \ | |
- : "=&r" (res), "=&r" (*oldbit), "+Qo" (*ptr), "=&r" (tmp) \ | |
- : "r" (mask), "r" (bit)); \ | |
- \ | |
- if ( !res ) \ | |
- break; \ | |
- } while ( !timeout || ((--max_try) > 0) ); \ | |
- \ | |
- smp_mb(); \ | |
- \ | |
- *oldbit &= 1; \ | |
- \ | |
- return !res; \ | |
-} \ | |
- \ | |
-int name(int nr, volatile void *p) \ | |
-{ \ | |
- int oldbit; \ | |
- \ | |
- if ( !int_##name(nr, p, &oldbit, false, 0) ) \ | |
- ASSERT_UNREACHABLE(); \ | |
- \ | |
- return oldbit; \ | |
-} \ | |
- \ | |
-bool name##_timeout(int nr, volatile void *p, \ | |
- int *oldbit, unsigned int max_try) \ | |
-{ \ | |
- return int_##name(nr, p, oldbit, true, max_try); \ | |
-} | |
+#define testop(name, instr) \ | |
+ static always_inline bool int_##name(int nr, volatile void *p, \ | |
+ int *oldbit, bool timeout, \ | |
+ unsigned int max_try) \ | |
+ { \ | |
+ volatile uint32_t *ptr = (uint32_t *)p + BIT_WORD((unsigned int)nr); \ | |
+ unsigned int bit = (unsigned int)nr % BITS_PER_WORD; \ | |
+ const uint32_t mask = BIT_MASK(bit); \ | |
+ unsigned long res, tmp; \ | |
+ \ | |
+ ASSERT(((vaddr_t)p & 0x3) == 0); \ | |
+ smp_mb(); \ | |
+ \ | |
+ prefetchw((const void *)ptr); \ | |
+ \ | |
+ do { \ | |
+ asm volatile( \ | |
+ "// " __stringify( \ | |
+ name) "\n" \ | |
+ " ldrex %3, %2\n" \ | |
+ " lsr %1, %3, %5 // Save old value of bit\n" \ | |
+ " " __stringify( \ | |
+ instr) " %3, %3, %4 // Toggle bit\n" \ | |
+ " strex %0, %3, %2\n" \ | |
+ : "=&r"(res), "=&r"(*oldbit), "+Qo"(*ptr), "=&r"(tmp) \ | |
+ : "r"(mask), "r"(bit)); \ | |
+ \ | |
+ if ( !res ) \ | |
+ break; \ | |
+ } while ( !timeout || ((--max_try) > 0) ); \ | |
+ \ | |
+ smp_mb(); \ | |
+ \ | |
+ *oldbit &= 1; \ | |
+ \ | |
+ return !res; \ | |
+ } \ | |
+ \ | |
+ int name(int nr, volatile void *p) \ | |
+ { \ | |
+ int oldbit; \ | |
+ \ | |
+ if ( !int_##name(nr, p, &oldbit, false, 0) ) \ | |
+ ASSERT_UNREACHABLE(); \ | |
+ \ | |
+ return oldbit; \ | |
+ } \ | |
+ \ | |
+ bool name##_timeout(int nr, volatile void *p, int *oldbit, \ | |
+ unsigned int max_try) \ | |
+ { \ | |
+ return int_##name(nr, p, oldbit, true, max_try); \ | |
+ } | |
-bitop(change_bit, eor) | |
-bitop(clear_bit, bic) | |
-bitop(set_bit, orr) | |
+bitop(change_bit, eor) bitop(clear_bit, bic) bitop(set_bit, orr) | |
-testop(test_and_change_bit, eor) | |
-testop(test_and_clear_bit, bic) | |
-testop(test_and_set_bit, orr) | |
+ testop(test_and_change_bit, eor) testop(test_and_clear_bit, bic) | |
+ testop(test_and_set_bit, orr) | |
-static always_inline bool int_clear_mask16(uint16_t mask, volatile uint16_t *p, | |
- bool timeout, unsigned int max_try) | |
+ static always_inline | |
+ bool int_clear_mask16(uint16_t mask, volatile uint16_t *p, bool timeout, | |
+ unsigned int max_try) | |
{ | |
unsigned long res, tmp; | |
prefetchw((const uint16_t *)p); | |
- do | |
- { | |
- asm volatile ("// int_clear_mask16\n" | |
- " ldrexh %2, %1\n" | |
- " bic %2, %2, %3\n" | |
- " strexh %0, %2, %1\n" | |
- : "=&r" (res), "+Qo" (*p), "=&r" (tmp) | |
- : "r" (mask)); | |
+ do { | |
+ asm volatile("// int_clear_mask16\n" | |
+ " ldrexh %2, %1\n" | |
+ " bic %2, %2, %3\n" | |
+ " strexh %0, %2, %1\n" | |
+ : "=&r"(res), "+Qo"(*p), "=&r"(tmp) | |
+ : "r"(mask)); | |
if ( !res ) | |
break; | |
diff --git a/xen/arch/arm/arm32/livepatch.c b/xen/arch/arm/arm32/livepatch.c | |
index 41378a54ae..d640b9c21b 100644 | |
--- a/xen/arch/arm/arm32/livepatch.c | |
+++ b/xen/arch/arm/arm32/livepatch.c | |
@@ -64,15 +64,15 @@ void arch_livepatch_apply(struct livepatch_func *func) | |
*(new_ptr + i) = insn; | |
/* | |
- * When we upload the payload, it will go through the data cache | |
- * (the region is cacheable). Until the data cache is cleaned, the data | |
- * may not reach the memory. And in the case the data and instruction cache | |
- * are separated, we may read invalid instruction from the memory because | |
- * the data cache have not yet synced with the memory. Hence sync it. | |
- */ | |
+ * When we upload the payload, it will go through the data cache | |
+ * (the region is cacheable). Until the data cache is cleaned, the data | |
+ * may not reach the memory. And in the case the data and instruction cache | |
+ * are separated, we may read invalid instruction from the memory because | |
+ * the data cache have not yet synced with the memory. Hence sync it. | |
+ */ | |
if ( func->new_addr ) | |
clean_and_invalidate_dcache_va_range(func->new_addr, func->new_size); | |
- clean_and_invalidate_dcache_va_range(new_ptr, sizeof (*new_ptr) * len); | |
+ clean_and_invalidate_dcache_va_range(new_ptr, sizeof(*new_ptr) * len); | |
} | |
/* arch_livepatch_revert shared with ARM 32/ARM 64. */ | |
@@ -81,8 +81,7 @@ int arch_livepatch_verify_elf(const struct livepatch_elf *elf) | |
{ | |
const Elf_Ehdr *hdr = elf->hdr; | |
- if ( hdr->e_machine != EM_ARM || | |
- hdr->e_ident[EI_CLASS] != ELFCLASS32 ) | |
+ if ( hdr->e_machine != EM_ARM || hdr->e_ident[EI_CLASS] != ELFCLASS32 ) | |
{ | |
dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported ELF Machine type!\n", | |
elf->name); | |
@@ -107,7 +106,7 @@ bool arch_livepatch_symbol_deny(const struct livepatch_elf *elf, | |
* them. If we do, abort. | |
*/ | |
if ( sym->name && sym->name[0] == '$' && sym->name[1] == 't' ) | |
- return ( !sym->name[2] || sym->name[2] == '.' ); | |
+ return (!sym->name[2] || sym->name[2] == '.'); | |
return false; | |
} | |
@@ -116,7 +115,8 @@ static s32 get_addend(unsigned char type, void *dest) | |
{ | |
s32 addend = 0; | |
- switch ( type ) { | |
+ switch (type) | |
+ { | |
case R_ARM_NONE: | |
/* ignore */ | |
break; | |
@@ -131,7 +131,7 @@ static s32 get_addend(unsigned char type, void *dest) | |
case R_ARM_MOVW_ABS_NC: | |
case R_ARM_MOVT_ABS: | |
- addend = (*(u32 *)dest & 0x00000FFF); | |
+ addend = (*(u32 *)dest & 0x00000FFF); | |
addend |= (*(u32 *)dest & 0x000F0000) >> 4; | |
/* Addend is to sign-extend ([19:16],[11:0]). */ | |
addend = (s16)addend; | |
@@ -148,10 +148,11 @@ static s32 get_addend(unsigned char type, void *dest) | |
return addend; | |
} | |
-static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend) | |
+static int perform_rel(unsigned char type, void *dest, uint32_t val, | |
+ s32 addend) | |
{ | |
- | |
- switch ( type ) { | |
+ switch (type) | |
+ { | |
case R_ARM_NONE: | |
/* ignore */ | |
break; | |
@@ -165,7 +166,7 @@ static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend) | |
break; | |
case R_ARM_MOVW_ABS_NC: /* S + A */ | |
- case R_ARM_MOVT_ABS: /* S + A */ | |
+ case R_ARM_MOVT_ABS: /* S + A */ | |
/* Clear addend if needed . */ | |
if ( addend ) | |
*(u32 *)dest &= 0xFFF0F000; | |
@@ -213,7 +214,7 @@ static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend) | |
break; | |
default: | |
- return -EOPNOTSUPP; | |
+ return -EOPNOTSUPP; | |
} | |
return 0; | |
@@ -221,8 +222,7 @@ static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend) | |
int arch_livepatch_perform(struct livepatch_elf *elf, | |
const struct livepatch_elf_sec *base, | |
- const struct livepatch_elf_sec *rela, | |
- bool use_rela) | |
+ const struct livepatch_elf_sec *rela, bool use_rela) | |
{ | |
unsigned int i; | |
int rc = 0; | |
@@ -262,7 +262,9 @@ int arch_livepatch_perform(struct livepatch_elf *elf, | |
} | |
else if ( symndx >= elf->nsym ) | |
{ | |
- dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative symbol wants symbol@%u which is past end!\n", | |
+ dprintk(XENLOG_ERR, | |
+ LIVEPATCH | |
+ "%s: Relative symbol wants symbol@%u which is past end!\n", | |
elf->name, symndx); | |
return -EINVAL; | |
} | |
@@ -276,10 +278,11 @@ int arch_livepatch_perform(struct livepatch_elf *elf, | |
val = elf->sym[symndx].sym->st_value; /* S */ | |
rc = perform_rel(type, dest, val, addend); | |
- switch ( rc ) | |
+ switch (rc) | |
{ | |
case -EOVERFLOW: | |
- dprintk(XENLOG_ERR, LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", | |
+ dprintk(XENLOG_ERR, | |
+ LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", | |
elf->name, i, rela->name, base->name); | |
break; | |
diff --git a/xen/arch/arm/arm32/traps.c b/xen/arch/arm/arm32/traps.c | |
index 76f714a168..3067f58f1d 100644 | |
--- a/xen/arch/arm/arm32/traps.c | |
+++ b/xen/arch/arm/arm32/traps.c | |
@@ -38,7 +38,8 @@ void do_trap_undefined_instruction(struct cpu_user_regs *regs) | |
(system_state >= SYS_STATE_active || !is_kernel_inittext(pc)) ) | |
goto die; | |
- /* PC should be always a multiple of 4, as Xen is using ARM instruction set */ | |
+ /* PC should be always a multiple of 4, as Xen is using ARM instruction set | |
+ */ | |
if ( regs->pc & 0x3 ) | |
goto die; | |
diff --git a/xen/arch/arm/arm32/vfp.c b/xen/arch/arm/arm32/vfp.c | |
index 0069acd297..9636f0362e 100644 | |
--- a/xen/arch/arm/arm32/vfp.c | |
+++ b/xen/arch/arm/arm32/vfp.c | |
@@ -23,14 +23,16 @@ void vfp_save_state(struct vcpu *v) | |
/* Save {d0-d15} */ | |
asm volatile("stc p11, cr0, [%1], #32*4" | |
- : "=Q" (*v->arch.vfp.fpregs1) : "r" (v->arch.vfp.fpregs1)); | |
+ : "=Q"(*v->arch.vfp.fpregs1) | |
+ : "r"(v->arch.vfp.fpregs1)); | |
/* 32 x 64 bits registers? */ | |
if ( (READ_CP32(MVFR0) & MVFR0_A_SIMD_MASK) == 2 ) | |
{ | |
/* Save {d16-d31} */ | |
asm volatile("stcl p11, cr0, [%1], #32*4" | |
- : "=Q" (*v->arch.vfp.fpregs2) : "r" (v->arch.vfp.fpregs2)); | |
+ : "=Q"(*v->arch.vfp.fpregs2) | |
+ : "r"(v->arch.vfp.fpregs2)); | |
} | |
WRITE_CP32(v->arch.vfp.fpexc & ~(FPEXC_EN), FPEXC); | |
@@ -38,18 +40,21 @@ void vfp_save_state(struct vcpu *v) | |
void vfp_restore_state(struct vcpu *v) | |
{ | |
- //uint64_t test[16]; | |
+ // uint64_t test[16]; | |
WRITE_CP32(READ_CP32(FPEXC) | FPEXC_EN, FPEXC); | |
/* Restore {d0-d15} */ | |
asm volatile("ldc p11, cr0, [%1], #32*4" | |
- : : "Q" (*v->arch.vfp.fpregs1), "r" (v->arch.vfp.fpregs1)); | |
+ : | |
+ : "Q"(*v->arch.vfp.fpregs1), "r"(v->arch.vfp.fpregs1)); | |
/* 32 x 64 bits registers? */ | |
- if ( (READ_CP32(MVFR0) & MVFR0_A_SIMD_MASK) == 2 ) /* 32 x 64 bits registers */ | |
+ if ( (READ_CP32(MVFR0) & MVFR0_A_SIMD_MASK) == | |
+ 2 ) /* 32 x 64 bits registers */ | |
/* Restore {d16-d31} */ | |
asm volatile("ldcl p11, cr0, [%1], #32*4" | |
- : : "Q" (*v->arch.vfp.fpregs2), "r" (v->arch.vfp.fpregs2)); | |
+ : | |
+ : "Q"(*v->arch.vfp.fpregs2), "r"(v->arch.vfp.fpregs2)); | |
if ( v->arch.vfp.fpexc & FPEXC_EX ) | |
{ | |
diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c | |
index 280ddb55bf..878f87f744 100644 | |
--- a/xen/arch/arm/arm64/asm-offsets.c | |
+++ b/xen/arch/arm/arm64/asm-offsets.c | |
@@ -12,50 +12,49 @@ | |
#include <asm/current.h> | |
#include <asm/smccc.h> | |
-#define DEFINE(_sym, _val) \ | |
- asm volatile ("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ | |
- : : "i" (_val) ) | |
-#define BLANK() \ | |
- asm volatile ( "\n.ascii\"==><==\"" : : ) | |
-#define OFFSET(_sym, _str, _mem) \ | |
- DEFINE(_sym, offsetof(_str, _mem)); | |
+#define DEFINE(_sym, _val) \ | |
+ asm volatile("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \ | |
+ : \ | |
+ : "i"(_val)) | |
+#define BLANK() asm volatile("\n.ascii\"==><==\"" : :) | |
+#define OFFSET(_sym, _str, _mem) DEFINE(_sym, offsetof(_str, _mem)); | |
void __dummy__(void) | |
{ | |
- OFFSET(UREGS_X0, struct cpu_user_regs, x0); | |
- OFFSET(UREGS_X1, struct cpu_user_regs, x1); | |
- OFFSET(UREGS_LR, struct cpu_user_regs, lr); | |
+ OFFSET(UREGS_X0, struct cpu_user_regs, x0); | |
+ OFFSET(UREGS_X1, struct cpu_user_regs, x1); | |
+ OFFSET(UREGS_LR, struct cpu_user_regs, lr); | |
- OFFSET(UREGS_SP, struct cpu_user_regs, sp); | |
- OFFSET(UREGS_PC, struct cpu_user_regs, pc); | |
- OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr); | |
- OFFSET(UREGS_ESR_el2, struct cpu_user_regs, hsr); | |
+ OFFSET(UREGS_SP, struct cpu_user_regs, sp); | |
+ OFFSET(UREGS_PC, struct cpu_user_regs, pc); | |
+ OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr); | |
+ OFFSET(UREGS_ESR_el2, struct cpu_user_regs, hsr); | |
- OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1); | |
+ OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1); | |
- OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); | |
- OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); | |
- OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); | |
- OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); | |
+ OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); | |
+ OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); | |
+ OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); | |
+ OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); | |
- OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0); | |
- OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1); | |
- OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1); | |
+ OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0); | |
+ OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1); | |
+ OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1); | |
- OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, spsr_el1); | |
- BLANK(); | |
+ OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, spsr_el1); | |
+ BLANK(); | |
- DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); | |
- OFFSET(CPUINFO_flags, struct cpu_info, flags); | |
+ DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); | |
+ OFFSET(CPUINFO_flags, struct cpu_info, flags); | |
- OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); | |
+ OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); | |
- BLANK(); | |
- OFFSET(INITINFO_stack, struct init_info, stack); | |
+ BLANK(); | |
+ OFFSET(INITINFO_stack, struct init_info, stack); | |
- BLANK(); | |
- OFFSET(SMCCC_RES_a0, struct arm_smccc_res, a0); | |
- OFFSET(SMCCC_RES_a2, struct arm_smccc_res, a2); | |
+ BLANK(); | |
+ OFFSET(SMCCC_RES_a0, struct arm_smccc_res, a0); | |
+ OFFSET(SMCCC_RES_a2, struct arm_smccc_res, a2); | |
} | |
/* | |
diff --git a/xen/arch/arm/arm64/domain.c b/xen/arch/arm/arm64/domain.c | |
index dd19098929..a10d0a3604 100644 | |
--- a/xen/arch/arm/arm64/domain.c | |
+++ b/xen/arch/arm/arm64/domain.c | |
@@ -7,26 +7,57 @@ | |
/* C(hyp,user), hyp is Xen internal name, user is user API name. */ | |
-#define ALLREGS \ | |
- C(x0,x0); C(x1,x1); C(x2,x2); C(x3,x3); \ | |
- C(x4,x4); C(x5,x5); C(x6,x6); C(x7,x7); \ | |
- C(x8,x8); C(x9,x9); C(x10,x10); C(x11,x11); \ | |
- C(x12,x12); C(x13,x13); C(x14,x14); C(x15,x15); \ | |
- C(x16,x16); C(x17,x17); C(x18,x18); C(x19,x19); \ | |
- C(x20,x20); C(x21,x21); C(x22,x22); C(x23,x23); \ | |
- C(x24,x24); C(x25,x25); C(x26,x26); C(x27,x27); \ | |
- C(x28,x28); C(fp,x29); C(lr,x30); C(pc,pc64); \ | |
- C(cpsr, cpsr); C(spsr_el1, spsr_el1) | |
+#define ALLREGS \ | |
+ C(x0, x0); \ | |
+ C(x1, x1); \ | |
+ C(x2, x2); \ | |
+ C(x3, x3); \ | |
+ C(x4, x4); \ | |
+ C(x5, x5); \ | |
+ C(x6, x6); \ | |
+ C(x7, x7); \ | |
+ C(x8, x8); \ | |
+ C(x9, x9); \ | |
+ C(x10, x10); \ | |
+ C(x11, x11); \ | |
+ C(x12, x12); \ | |
+ C(x13, x13); \ | |
+ C(x14, x14); \ | |
+ C(x15, x15); \ | |
+ C(x16, x16); \ | |
+ C(x17, x17); \ | |
+ C(x18, x18); \ | |
+ C(x19, x19); \ | |
+ C(x20, x20); \ | |
+ C(x21, x21); \ | |
+ C(x22, x22); \ | |
+ C(x23, x23); \ | |
+ C(x24, x24); \ | |
+ C(x25, x25); \ | |
+ C(x26, x26); \ | |
+ C(x27, x27); \ | |
+ C(x28, x28); \ | |
+ C(fp, x29); \ | |
+ C(lr, x30); \ | |
+ C(pc, pc64); \ | |
+ C(cpsr, cpsr); \ | |
+ C(spsr_el1, spsr_el1) | |
-#define ALLREGS32 C(spsr_fiq, spsr_fiq); C(spsr_irq,spsr_irq); \ | |
- C(spsr_und,spsr_und); C(spsr_abt,spsr_abt) | |
+#define ALLREGS32 \ | |
+ C(spsr_fiq, spsr_fiq); \ | |
+ C(spsr_irq, spsr_irq); \ | |
+ C(spsr_und, spsr_und); \ | |
+ C(spsr_abt, spsr_abt) | |
-#define ALLREGS64 C(sp_el0,sp_el0); C(sp_el1,sp_el1); C(elr_el1,elr_el1) | |
+#define ALLREGS64 \ | |
+ C(sp_el0, sp_el0); \ | |
+ C(sp_el1, sp_el1); \ | |
+ C(elr_el1, elr_el1) | |
void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, | |
struct vcpu_guest_core_regs *regs) | |
{ | |
-#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp | |
+#define C(hyp, user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp | |
ALLREGS; | |
if ( is_32bit_domain(vcpu->domain) ) | |
{ | |
@@ -42,7 +73,7 @@ void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, | |
void vcpu_regs_user_to_hyp(struct vcpu *vcpu, | |
const struct vcpu_guest_core_regs *regs) | |
{ | |
-#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user | |
+#define C(hyp, user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user | |
ALLREGS; | |
if ( is_32bit_domain(vcpu->domain) ) | |
{ | |
diff --git a/xen/arch/arm/arm64/domctl.c b/xen/arch/arm/arm64/domctl.c | |
index ab8781fb91..2257213594 100644 | |
--- a/xen/arch/arm/arm64/domctl.c | |
+++ b/xen/arch/arm/arm64/domctl.c | |
@@ -26,7 +26,7 @@ static long switch_mode(struct domain *d, enum domain_type type) | |
d->arch.type = type; | |
if ( is_64bit_domain(d) ) | |
- for_each_vcpu(d, v) | |
+ for_each_vcpu (d, v) | |
vcpu_switch_to_aarch64_mode(v); | |
return 0; | |
@@ -35,10 +35,10 @@ static long switch_mode(struct domain *d, enum domain_type type) | |
long subarch_do_domctl(struct xen_domctl *domctl, struct domain *d, | |
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) | |
{ | |
- switch ( domctl->cmd ) | |
+ switch (domctl->cmd) | |
{ | |
case XEN_DOMCTL_set_address_size: | |
- switch ( domctl->u.address_size.size ) | |
+ switch (domctl->u.address_size.size) | |
{ | |
case 32: | |
if ( !cpu_has_el1_32 ) | |
diff --git a/xen/arch/arm/arm64/insn.c b/xen/arch/arm/arm64/insn.c | |
index 22f2bdebd5..4b475dfad0 100644 | |
--- a/xen/arch/arm/arm64/insn.c | |
+++ b/xen/arch/arm/arm64/insn.c | |
@@ -27,194 +27,204 @@ | |
#include <asm/arm64/insn.h> | |
#define __kprobes | |
-#define pr_err(fmt, ...) printk(XENLOG_ERR fmt, ## __VA_ARGS__) | |
+#define pr_err(fmt, ...) printk(XENLOG_ERR fmt, ##__VA_ARGS__) | |
bool aarch64_insn_is_branch_imm(u32 insn) | |
{ | |
- return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) || | |
- aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) || | |
- aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || | |
- aarch64_insn_is_bcond(insn)); | |
+ return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) || | |
+ aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) || | |
+ aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || | |
+ aarch64_insn_is_bcond(insn)); | |
} | |
-static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, | |
- u32 *maskp, int *shiftp) | |
+static int __kprobes aarch64_get_imm_shift_mask( | |
+ enum aarch64_insn_imm_type type, u32 *maskp, int *shiftp) | |
{ | |
- u32 mask; | |
- int shift; | |
- | |
- switch (type) { | |
- case AARCH64_INSN_IMM_26: | |
- mask = BIT(26, UL) - 1; | |
- shift = 0; | |
- break; | |
- case AARCH64_INSN_IMM_19: | |
- mask = BIT(19, UL) - 1; | |
- shift = 5; | |
- break; | |
- case AARCH64_INSN_IMM_16: | |
- mask = BIT(16, UL) - 1; | |
- shift = 5; | |
- break; | |
- case AARCH64_INSN_IMM_14: | |
- mask = BIT(14, UL) - 1; | |
- shift = 5; | |
- break; | |
- case AARCH64_INSN_IMM_12: | |
- mask = BIT(12, UL) - 1; | |
- shift = 10; | |
- break; | |
- case AARCH64_INSN_IMM_9: | |
- mask = BIT(9, UL) - 1; | |
- shift = 12; | |
- break; | |
- case AARCH64_INSN_IMM_7: | |
- mask = BIT(7, UL) - 1; | |
- shift = 15; | |
- break; | |
- case AARCH64_INSN_IMM_6: | |
- case AARCH64_INSN_IMM_S: | |
- mask = BIT(6, UL) - 1; | |
- shift = 10; | |
- break; | |
- case AARCH64_INSN_IMM_R: | |
- mask = BIT(6, UL) - 1; | |
- shift = 16; | |
- break; | |
- default: | |
- return -EINVAL; | |
- } | |
- | |
- *maskp = mask; | |
- *shiftp = shift; | |
- | |
- return 0; | |
+ u32 mask; | |
+ int shift; | |
+ | |
+ switch (type) | |
+ { | |
+ case AARCH64_INSN_IMM_26: | |
+ mask = BIT(26, UL) - 1; | |
+ shift = 0; | |
+ break; | |
+ case AARCH64_INSN_IMM_19: | |
+ mask = BIT(19, UL) - 1; | |
+ shift = 5; | |
+ break; | |
+ case AARCH64_INSN_IMM_16: | |
+ mask = BIT(16, UL) - 1; | |
+ shift = 5; | |
+ break; | |
+ case AARCH64_INSN_IMM_14: | |
+ mask = BIT(14, UL) - 1; | |
+ shift = 5; | |
+ break; | |
+ case AARCH64_INSN_IMM_12: | |
+ mask = BIT(12, UL) - 1; | |
+ shift = 10; | |
+ break; | |
+ case AARCH64_INSN_IMM_9: | |
+ mask = BIT(9, UL) - 1; | |
+ shift = 12; | |
+ break; | |
+ case AARCH64_INSN_IMM_7: | |
+ mask = BIT(7, UL) - 1; | |
+ shift = 15; | |
+ break; | |
+ case AARCH64_INSN_IMM_6: | |
+ case AARCH64_INSN_IMM_S: | |
+ mask = BIT(6, UL) - 1; | |
+ shift = 10; | |
+ break; | |
+ case AARCH64_INSN_IMM_R: | |
+ mask = BIT(6, UL) - 1; | |
+ shift = 16; | |
+ break; | |
+ default: | |
+ return -EINVAL; | |
+ } | |
+ | |
+ *maskp = mask; | |
+ *shiftp = shift; | |
+ | |
+ return 0; | |
} | |
-#define ADR_IMM_HILOSPLIT 2 | |
-#define ADR_IMM_SIZE SZ_2M | |
-#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) | |
-#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) | |
-#define ADR_IMM_LOSHIFT 29 | |
-#define ADR_IMM_HISHIFT 5 | |
+#define ADR_IMM_HILOSPLIT 2 | |
+#define ADR_IMM_SIZE SZ_2M | |
+#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) | |
+#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) | |
+#define ADR_IMM_LOSHIFT 29 | |
+#define ADR_IMM_HISHIFT 5 | |
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn) | |
{ | |
- u32 immlo, immhi, mask; | |
- int shift; | |
- | |
- switch (type) { | |
- case AARCH64_INSN_IMM_ADR: | |
- shift = 0; | |
- immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; | |
- immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; | |
- insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; | |
- mask = ADR_IMM_SIZE - 1; | |
- break; | |
- default: | |
- if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { | |
- pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n", | |
- type); | |
- return 0; | |
- } | |
- } | |
- | |
- return (insn >> shift) & mask; | |
+ u32 immlo, immhi, mask; | |
+ int shift; | |
+ | |
+ switch (type) | |
+ { | |
+ case AARCH64_INSN_IMM_ADR: | |
+ shift = 0; | |
+ immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; | |
+ immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; | |
+ insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; | |
+ mask = ADR_IMM_SIZE - 1; | |
+ break; | |
+ default: | |
+ if ( aarch64_get_imm_shift_mask(type, &mask, &shift) < 0 ) | |
+ { | |
+ pr_err( | |
+ "aarch64_insn_decode_immediate: unknown immediate encoding %d\n", | |
+ type); | |
+ return 0; | |
+ } | |
+ } | |
+ | |
+ return (insn >> shift) & mask; | |
} | |
u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | |
- u32 insn, u64 imm) | |
+ u32 insn, u64 imm) | |
{ | |
- u32 immlo, immhi, mask; | |
- int shift; | |
- | |
- if (insn == AARCH64_BREAK_FAULT) | |
- return AARCH64_BREAK_FAULT; | |
- | |
- switch (type) { | |
- case AARCH64_INSN_IMM_ADR: | |
- shift = 0; | |
- immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; | |
- imm >>= ADR_IMM_HILOSPLIT; | |
- immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; | |
- imm = immlo | immhi; | |
- mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | | |
- (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); | |
- break; | |
- default: | |
- if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { | |
- pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", | |
- type); | |
- return AARCH64_BREAK_FAULT; | |
- } | |
- } | |
- | |
- /* Update the immediate field. */ | |
- insn &= ~(mask << shift); | |
- insn |= (imm & mask) << shift; | |
- | |
- return insn; | |
+ u32 immlo, immhi, mask; | |
+ int shift; | |
+ | |
+ if ( insn == AARCH64_BREAK_FAULT ) | |
+ return AARCH64_BREAK_FAULT; | |
+ | |
+ switch (type) | |
+ { | |
+ case AARCH64_INSN_IMM_ADR: | |
+ shift = 0; | |
+ immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; | |
+ imm >>= ADR_IMM_HILOSPLIT; | |
+ immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; | |
+ imm = immlo | immhi; | |
+ mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | | |
+ (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); | |
+ break; | |
+ default: | |
+ if ( aarch64_get_imm_shift_mask(type, &mask, &shift) < 0 ) | |
+ { | |
+ pr_err( | |
+ "aarch64_insn_encode_immediate: unknown immediate encoding %d\n", | |
+ type); | |
+ return AARCH64_BREAK_FAULT; | |
+ } | |
+ } | |
+ | |
+ /* Update the immediate field. */ | |
+ insn &= ~(mask << shift); | |
+ insn |= (imm & mask) << shift; | |
+ | |
+ return insn; | |
} | |
static inline long branch_imm_common(unsigned long pc, unsigned long addr, | |
- long range) | |
+ long range) | |
{ | |
- long offset; | |
+ long offset; | |
- if ((pc & 0x3) || (addr & 0x3)) { | |
- pr_err("%s: A64 instructions must be word aligned\n", __func__); | |
- return range; | |
- } | |
+ if ( (pc & 0x3) || (addr & 0x3) ) | |
+ { | |
+ pr_err("%s: A64 instructions must be word aligned\n", __func__); | |
+ return range; | |
+ } | |
- offset = ((long)addr - (long)pc); | |
+ offset = ((long)addr - (long)pc); | |
- if (offset < -range || offset >= range) { | |
- pr_err("%s: offset out of range\n", __func__); | |
- return range; | |
- } | |
+ if ( offset < -range || offset >= range ) | |
+ { | |
+ pr_err("%s: offset out of range\n", __func__); | |
+ return range; | |
+ } | |
- return offset; | |
+ return offset; | |
} | |
u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | |
- enum aarch64_insn_branch_type type) | |
+ enum aarch64_insn_branch_type type) | |
{ | |
- u32 insn; | |
- long offset; | |
- | |
- /* | |
- * B/BL support [-128M, 128M) offset | |
- * ARM64 virtual address arrangement guarantees all kernel and module | |
- * texts are within +/-128M. | |
- */ | |
- offset = branch_imm_common(pc, addr, SZ_128M); | |
- if (offset >= SZ_128M) | |
- return AARCH64_BREAK_FAULT; | |
- | |
- switch (type) { | |
- case AARCH64_INSN_BRANCH_LINK: | |
- insn = aarch64_insn_get_bl_value(); | |
- break; | |
- case AARCH64_INSN_BRANCH_NOLINK: | |
- insn = aarch64_insn_get_b_value(); | |
- break; | |
- default: | |
- pr_err("%s: unknown branch encoding %d\n", __func__, type); | |
- return AARCH64_BREAK_FAULT; | |
- } | |
- | |
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, | |
- offset >> 2); | |
+ u32 insn; | |
+ long offset; | |
+ | |
+ /* | |
+ * B/BL support [-128M, 128M) offset | |
+ * ARM64 virtual address arrangement guarantees all kernel and module | |
+ * texts are within +/-128M. | |
+ */ | |
+ offset = branch_imm_common(pc, addr, SZ_128M); | |
+ if ( offset >= SZ_128M ) | |
+ return AARCH64_BREAK_FAULT; | |
+ | |
+ switch (type) | |
+ { | |
+ case AARCH64_INSN_BRANCH_LINK: | |
+ insn = aarch64_insn_get_bl_value(); | |
+ break; | |
+ case AARCH64_INSN_BRANCH_NOLINK: | |
+ insn = aarch64_insn_get_b_value(); | |
+ break; | |
+ default: | |
+ pr_err("%s: unknown branch encoding %d\n", __func__, type); | |
+ return AARCH64_BREAK_FAULT; | |
+ } | |
+ | |
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, | |
+ offset >> 2); | |
} | |
u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) | |
{ | |
- return aarch64_insn_get_hint_value() | op; | |
+ return aarch64_insn_get_hint_value() | op; | |
} | |
u32 __kprobes aarch64_insn_gen_nop(void) | |
{ | |
- return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); | |
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); | |
} | |
/* | |
@@ -224,26 +234,29 @@ u32 __kprobes aarch64_insn_gen_nop(void) | |
*/ | |
s32 aarch64_get_branch_offset(u32 insn) | |
{ | |
- s32 imm; | |
- | |
- if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { | |
- imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); | |
- return (imm << 6) >> 4; | |
- } | |
- | |
- if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || | |
- aarch64_insn_is_bcond(insn)) { | |
- imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn); | |
- return (imm << 13) >> 11; | |
- } | |
- | |
- if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) { | |
- imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn); | |
- return (imm << 18) >> 16; | |
- } | |
- | |
- /* Unhandled instruction */ | |
- BUG(); | |
+ s32 imm; | |
+ | |
+ if ( aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ) | |
+ { | |
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); | |
+ return (imm << 6) >> 4; | |
+ } | |
+ | |
+ if ( aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || | |
+ aarch64_insn_is_bcond(insn) ) | |
+ { | |
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn); | |
+ return (imm << 13) >> 11; | |
+ } | |
+ | |
+ if ( aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ) | |
+ { | |
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn); | |
+ return (imm << 18) >> 16; | |
+ } | |
+ | |
+ /* Unhandled instruction */ | |
+ BUG(); | |
} | |
/* | |
@@ -252,21 +265,21 @@ s32 aarch64_get_branch_offset(u32 insn) | |
*/ | |
u32 aarch64_set_branch_offset(u32 insn, s32 offset) | |
{ | |
- if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) | |
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, | |
- offset >> 2); | |
+ if ( aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ) | |
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, | |
+ offset >> 2); | |
- if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || | |
- aarch64_insn_is_bcond(insn)) | |
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, | |
- offset >> 2); | |
+ if ( aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || | |
+ aarch64_insn_is_bcond(insn) ) | |
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, | |
+ offset >> 2); | |
- if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) | |
- return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn, | |
- offset >> 2); | |
+ if ( aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ) | |
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn, | |
+ offset >> 2); | |
- /* Unhandled instruction */ | |
- BUG(); | |
+ /* Unhandled instruction */ | |
+ BUG(); | |
} | |
/* | |
diff --git a/xen/arch/arm/arm64/lib/bitops.c b/xen/arch/arm/arm64/lib/bitops.c | |
index 27688e5418..f6e97ea919 100644 | |
--- a/xen/arch/arm/arm64/lib/bitops.c | |
+++ b/xen/arch/arm/arm64/lib/bitops.c | |
@@ -28,109 +28,110 @@ | |
* XXX: Rework the interface to use unsigned int. | |
*/ | |
-#define bitop(name, instr) \ | |
-static always_inline bool int_##name(int nr, volatile void *p, bool timeout,\ | |
- unsigned int max_try) \ | |
-{ \ | |
- volatile uint32_t *ptr = (uint32_t *)p + BIT_WORD((unsigned int)nr); \ | |
- const uint32_t mask = BIT_MASK((unsigned int)nr); \ | |
- unsigned long res, tmp; \ | |
- \ | |
- do \ | |
- { \ | |
- asm volatile ("// " __stringify(name) "\n" \ | |
- " ldxr %w2, %1\n" \ | |
- " " __stringify(instr) " %w2, %w2, %w3\n" \ | |
- " stxr %w0, %w2, %1\n" \ | |
- : "=&r" (res), "+Q" (*ptr), "=&r" (tmp) \ | |
- : "r" (mask)); \ | |
- \ | |
- if ( !res ) \ | |
- break; \ | |
- } while ( !timeout || ((--max_try) > 0) ); \ | |
- \ | |
- return !res; \ | |
-} \ | |
- \ | |
-void name(int nr, volatile void *p) \ | |
-{ \ | |
- if ( !int_##name(nr, p, false, 0) ) \ | |
- ASSERT_UNREACHABLE(); \ | |
-} \ | |
- \ | |
-bool name##_timeout(int nr, volatile void *p, unsigned int max_try) \ | |
-{ \ | |
- return int_##name(nr, p, true, max_try); \ | |
-} | |
+#define bitop(name, instr) \ | |
+ static always_inline bool int_##name(int nr, volatile void *p, \ | |
+ bool timeout, unsigned int max_try) \ | |
+ { \ | |
+ volatile uint32_t *ptr = (uint32_t *)p + BIT_WORD((unsigned int)nr); \ | |
+ const uint32_t mask = BIT_MASK((unsigned int)nr); \ | |
+ unsigned long res, tmp; \ | |
+ \ | |
+ do { \ | |
+ asm volatile("// " __stringify( \ | |
+ name) "\n" \ | |
+ " ldxr %w2, %1\n" \ | |
+ " " __stringify( \ | |
+ instr) " %w2, %w2, %w3\n" \ | |
+ " stxr %w0, %w2, %1\n" \ | |
+ : "=&r"(res), "+Q"(*ptr), "=&r"(tmp) \ | |
+ : "r"(mask)); \ | |
+ \ | |
+ if ( !res ) \ | |
+ break; \ | |
+ } while ( !timeout || ((--max_try) > 0) ); \ | |
+ \ | |
+ return !res; \ | |
+ } \ | |
+ \ | |
+ void name(int nr, volatile void *p) \ | |
+ { \ | |
+ if ( !int_##name(nr, p, false, 0) ) \ | |
+ ASSERT_UNREACHABLE(); \ | |
+ } \ | |
+ \ | |
+ bool name##_timeout(int nr, volatile void *p, unsigned int max_try) \ | |
+ { \ | |
+ return int_##name(nr, p, true, max_try); \ | |
+ } | |
-#define testop(name, instr) \ | |
-static always_inline bool int_##name(int nr, volatile void *p, int *oldbit, \ | |
- bool timeout, unsigned int max_try) \ | |
-{ \ | |
- volatile uint32_t *ptr = (uint32_t *)p + BIT_WORD((unsigned int)nr); \ | |
- unsigned int bit = (unsigned int)nr % BITS_PER_WORD; \ | |
- const uint32_t mask = BIT_MASK(bit); \ | |
- unsigned long res, tmp; \ | |
- \ | |
- do \ | |
- { \ | |
- asm volatile ("// " __stringify(name) "\n" \ | |
- " ldxr %w3, %2\n" \ | |
- " lsr %w1, %w3, %w5 // Save old value of bit\n" \ | |
- " " __stringify(instr) " %w3, %w3, %w4 // Toggle bit\n" \ | |
- " stlxr %w0, %w3, %2\n" \ | |
- : "=&r" (res), "=&r" (*oldbit), "+Q" (*ptr), "=&r" (tmp) \ | |
- : "r" (mask), "r" (bit) \ | |
- : "memory"); \ | |
- \ | |
- if ( !res ) \ | |
- break; \ | |
- } while ( !timeout || ((--max_try) > 0) ); \ | |
- \ | |
- dmb(ish); \ | |
- \ | |
- *oldbit &= 1; \ | |
- \ | |
- return !res; \ | |
-} \ | |
- \ | |
-int name(int nr, volatile void *p) \ | |
-{ \ | |
- int oldbit; \ | |
- \ | |
- if ( !int_##name(nr, p, &oldbit, false, 0) ) \ | |
- ASSERT_UNREACHABLE(); \ | |
- \ | |
- return oldbit; \ | |
-} \ | |
- \ | |
-bool name##_timeout(int nr, volatile void *p, \ | |
- int *oldbit, unsigned int max_try) \ | |
-{ \ | |
- return int_##name(nr, p, oldbit, true, max_try); \ | |
-} | |
+#define testop(name, instr) \ | |
+ static always_inline bool int_##name(int nr, volatile void *p, \ | |
+ int *oldbit, bool timeout, \ | |
+ unsigned int max_try) \ | |
+ { \ | |
+ volatile uint32_t *ptr = (uint32_t *)p + BIT_WORD((unsigned int)nr); \ | |
+ unsigned int bit = (unsigned int)nr % BITS_PER_WORD; \ | |
+ const uint32_t mask = BIT_MASK(bit); \ | |
+ unsigned long res, tmp; \ | |
+ \ | |
+ do { \ | |
+ asm volatile( \ | |
+ "// " __stringify( \ | |
+ name) "\n" \ | |
+ " ldxr %w3, %2\n" \ | |
+ " lsr %w1, %w3, %w5 // Save old value of bit\n" \ | |
+ " " __stringify( \ | |
+ instr) " %w3, %w3, %w4 // Toggle bit\n" \ | |
+ " stlxr %w0, %w3, %2\n" \ | |
+ : "=&r"(res), "=&r"(*oldbit), "+Q"(*ptr), "=&r"(tmp) \ | |
+ : "r"(mask), "r"(bit) \ | |
+ : "memory"); \ | |
+ \ | |
+ if ( !res ) \ | |
+ break; \ | |
+ } while ( !timeout || ((--max_try) > 0) ); \ | |
+ \ | |
+ dmb(ish); \ | |
+ \ | |
+ *oldbit &= 1; \ | |
+ \ | |
+ return !res; \ | |
+ } \ | |
+ \ | |
+ int name(int nr, volatile void *p) \ | |
+ { \ | |
+ int oldbit; \ | |
+ \ | |
+ if ( !int_##name(nr, p, &oldbit, false, 0) ) \ | |
+ ASSERT_UNREACHABLE(); \ | |
+ \ | |
+ return oldbit; \ | |
+ } \ | |
+ \ | |
+ bool name##_timeout(int nr, volatile void *p, int *oldbit, \ | |
+ unsigned int max_try) \ | |
+ { \ | |
+ return int_##name(nr, p, oldbit, true, max_try); \ | |
+ } | |
-bitop(change_bit, eor) | |
-bitop(clear_bit, bic) | |
-bitop(set_bit, orr) | |
+bitop(change_bit, eor) bitop(clear_bit, bic) bitop(set_bit, orr) | |
-testop(test_and_change_bit, eor) | |
-testop(test_and_clear_bit, bic) | |
-testop(test_and_set_bit, orr) | |
+ testop(test_and_change_bit, eor) testop(test_and_clear_bit, bic) | |
+ testop(test_and_set_bit, orr) | |
-static always_inline bool int_clear_mask16(uint16_t mask, volatile uint16_t *p, | |
- bool timeout, unsigned int max_try) | |
+ static always_inline | |
+ bool int_clear_mask16(uint16_t mask, volatile uint16_t *p, bool timeout, | |
+ unsigned int max_try) | |
{ | |
unsigned long res, tmp; | |
- do | |
- { | |
- asm volatile ("// int_clear_mask16\n" | |
- " ldxrh %w2, %1\n" | |
- " bic %w2, %w2, %w3\n" | |
- " stxrh %w0, %w2, %1\n" | |
- : "=&r" (res), "+Q" (*p), "=&r" (tmp) | |
- : "r" (mask)); | |
+ do { | |
+ asm volatile("// int_clear_mask16\n" | |
+ " ldxrh %w2, %1\n" | |
+ " bic %w2, %w2, %w3\n" | |
+ " stxrh %w0, %w2, %1\n" | |
+ : "=&r"(res), "+Q"(*p), "=&r"(tmp) | |
+ : "r"(mask)); | |
if ( !res ) | |
break; | |
diff --git a/xen/arch/arm/arm64/lib/find_next_bit.c b/xen/arch/arm/arm64/lib/find_next_bit.c | |
index 17cb176266..3b2fd676ff 100644 | |
--- a/xen/arch/arm/arm64/lib/find_next_bit.c | |
+++ b/xen/arch/arm/arm64/lib/find_next_bit.c | |
@@ -12,49 +12,51 @@ | |
#include <asm/types.h> | |
#include <asm/byteorder.h> | |
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | |
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | |
#ifndef find_next_bit | |
/* | |
* Find the next set bit in a memory region. | |
*/ | |
unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | |
- unsigned long offset) | |
+ unsigned long offset) | |
{ | |
- const unsigned long *p = addr + BITOP_WORD(offset); | |
- unsigned long result = offset & ~(BITS_PER_LONG-1); | |
- unsigned long tmp; | |
+ const unsigned long *p = addr + BITOP_WORD(offset); | |
+ unsigned long result = offset & ~(BITS_PER_LONG - 1); | |
+ unsigned long tmp; | |
- if (offset >= size) | |
- return size; | |
- size -= result; | |
- offset %= BITS_PER_LONG; | |
- if (offset) { | |
- tmp = *(p++); | |
- tmp &= (~0UL << offset); | |
- if (size < BITS_PER_LONG) | |
- goto found_first; | |
- if (tmp) | |
- goto found_middle; | |
- size -= BITS_PER_LONG; | |
- result += BITS_PER_LONG; | |
- } | |
- while (size & ~(BITS_PER_LONG-1)) { | |
- if ((tmp = *(p++))) | |
- goto found_middle; | |
- result += BITS_PER_LONG; | |
- size -= BITS_PER_LONG; | |
- } | |
- if (!size) | |
- return result; | |
- tmp = *p; | |
+ if ( offset >= size ) | |
+ return size; | |
+ size -= result; | |
+ offset %= BITS_PER_LONG; | |
+ if ( offset ) | |
+ { | |
+ tmp = *(p++); | |
+ tmp &= (~0UL << offset); | |
+ if ( size < BITS_PER_LONG ) | |
+ goto found_first; | |
+ if ( tmp ) | |
+ goto found_middle; | |
+ size -= BITS_PER_LONG; | |
+ result += BITS_PER_LONG; | |
+ } | |
+ while ( size & ~(BITS_PER_LONG - 1) ) | |
+ { | |
+ if ( (tmp = *(p++)) ) | |
+ goto found_middle; | |
+ result += BITS_PER_LONG; | |
+ size -= BITS_PER_LONG; | |
+ } | |
+ if ( !size ) | |
+ return result; | |
+ tmp = *p; | |
found_first: | |
- tmp &= (~0UL >> (BITS_PER_LONG - size)); | |
- if (tmp == 0UL) /* Are any bits set? */ | |
- return result + size; /* Nope. */ | |
+ tmp &= (~0UL >> (BITS_PER_LONG - size)); | |
+ if ( tmp == 0UL ) /* Are any bits set? */ | |
+ return result + size; /* Nope. */ | |
found_middle: | |
- return result + __ffs(tmp); | |
+ return result + __ffs(tmp); | |
} | |
EXPORT_SYMBOL(find_next_bit); | |
#endif | |
@@ -65,42 +67,44 @@ EXPORT_SYMBOL(find_next_bit); | |
* Linus' asm-alpha/bitops.h. | |
*/ | |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | |
- unsigned long offset) | |
+ unsigned long offset) | |
{ | |
- const unsigned long *p = addr + BITOP_WORD(offset); | |
- unsigned long result = offset & ~(BITS_PER_LONG-1); | |
- unsigned long tmp; | |
+ const unsigned long *p = addr + BITOP_WORD(offset); | |
+ unsigned long result = offset & ~(BITS_PER_LONG - 1); | |
+ unsigned long tmp; | |
- if (offset >= size) | |
- return size; | |
- size -= result; | |
- offset %= BITS_PER_LONG; | |
- if (offset) { | |
- tmp = *(p++); | |
- tmp |= ~0UL >> (BITS_PER_LONG - offset); | |
- if (size < BITS_PER_LONG) | |
- goto found_first; | |
- if (~tmp) | |
- goto found_middle; | |
- size -= BITS_PER_LONG; | |
- result += BITS_PER_LONG; | |
- } | |
- while (size & ~(BITS_PER_LONG-1)) { | |
- if (~(tmp = *(p++))) | |
- goto found_middle; | |
- result += BITS_PER_LONG; | |
- size -= BITS_PER_LONG; | |
- } | |
- if (!size) | |
- return result; | |
- tmp = *p; | |
+ if ( offset >= size ) | |
+ return size; | |
+ size -= result; | |
+ offset %= BITS_PER_LONG; | |
+ if ( offset ) | |
+ { | |
+ tmp = *(p++); | |
+ tmp |= ~0UL >> (BITS_PER_LONG - offset); | |
+ if ( size < BITS_PER_LONG ) | |
+ goto found_first; | |
+ if ( ~tmp ) | |
+ goto found_middle; | |
+ size -= BITS_PER_LONG; | |
+ result += BITS_PER_LONG; | |
+ } | |
+ while ( size & ~(BITS_PER_LONG - 1) ) | |
+ { | |
+ if ( ~(tmp = *(p++)) ) | |
+ goto found_middle; | |
+ result += BITS_PER_LONG; | |
+ size -= BITS_PER_LONG; | |
+ } | |
+ if ( !size ) | |
+ return result; | |
+ tmp = *p; | |
found_first: | |
- tmp |= ~0UL << size; | |
- if (tmp == ~0UL) /* Are any bits zero? */ | |
- return result + size; /* Nope. */ | |
+ tmp |= ~0UL << size; | |
+ if ( tmp == ~0UL ) /* Are any bits zero? */ | |
+ return result + size; /* Nope. */ | |
found_middle: | |
- return result + ffz(tmp); | |
+ return result + ffz(tmp); | |
} | |
EXPORT_SYMBOL(find_next_zero_bit); | |
#endif | |
@@ -111,24 +115,25 @@ EXPORT_SYMBOL(find_next_zero_bit); | |
*/ | |
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) | |
{ | |
- const unsigned long *p = addr; | |
- unsigned long result = 0; | |
- unsigned long tmp; | |
+ const unsigned long *p = addr; | |
+ unsigned long result = 0; | |
+ unsigned long tmp; | |
- while (size & ~(BITS_PER_LONG-1)) { | |
- if ((tmp = *(p++))) | |
- goto found; | |
- result += BITS_PER_LONG; | |
- size -= BITS_PER_LONG; | |
- } | |
- if (!size) | |
- return result; | |
+ while ( size & ~(BITS_PER_LONG - 1) ) | |
+ { | |
+ if ( (tmp = *(p++)) ) | |
+ goto found; | |
+ result += BITS_PER_LONG; | |
+ size -= BITS_PER_LONG; | |
+ } | |
+ if ( !size ) | |
+ return result; | |
- tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); | |
- if (tmp == 0UL) /* Are any bits set? */ | |
- return result + size; /* Nope. */ | |
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); | |
+ if ( tmp == 0UL ) /* Are any bits set? */ | |
+ return result + size; /* Nope. */ | |
found: | |
- return result + __ffs(tmp); | |
+ return result + __ffs(tmp); | |
} | |
EXPORT_SYMBOL(find_first_bit); | |
#endif | |
@@ -137,26 +142,28 @@ EXPORT_SYMBOL(find_first_bit); | |
/* | |
* Find the first cleared bit in a memory region. | |
*/ | |
-unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) | |
+unsigned long find_first_zero_bit(const unsigned long *addr, | |
+ unsigned long size) | |
{ | |
- const unsigned long *p = addr; | |
- unsigned long result = 0; | |
- unsigned long tmp; | |
+ const unsigned long *p = addr; | |
+ unsigned long result = 0; | |
+ unsigned long tmp; | |
- while (size & ~(BITS_PER_LONG-1)) { | |
- if (~(tmp = *(p++))) | |
- goto found; | |
- result += BITS_PER_LONG; | |
- size -= BITS_PER_LONG; | |
- } | |
- if (!size) | |
- return result; | |
+ while ( size & ~(BITS_PER_LONG - 1) ) | |
+ { | |
+ if ( ~(tmp = *(p++)) ) | |
+ goto found; | |
+ result += BITS_PER_LONG; | |
+ size -= BITS_PER_LONG; | |
+ } | |
+ if ( !size ) | |
+ return result; | |
- tmp = (*p) | (~0UL << size); | |
- if (tmp == ~0UL) /* Are any bits zero? */ | |
- return result + size; /* Nope. */ | |
+ tmp = (*p) | (~0UL << size); | |
+ if ( tmp == ~0UL ) /* Are any bits zero? */ | |
+ return result + size; /* Nope. */ | |
found: | |
- return result + ffz(tmp); | |
+ return result + ffz(tmp); | |
} | |
EXPORT_SYMBOL(find_first_zero_bit); | |
#endif | |
@@ -164,12 +171,12 @@ EXPORT_SYMBOL(find_first_zero_bit); | |
#ifdef __BIG_ENDIAN | |
/* include/linux/byteorder does not support "unsigned long" type */ | |
-static inline unsigned long ext2_swabp(const unsigned long * x) | |
+static inline unsigned long ext2_swabp(const unsigned long *x) | |
{ | |
#if BITS_PER_LONG == 64 | |
- return (unsigned long) __swab64p((u64 *) x); | |
+ return (unsigned long)__swab64p((u64 *)x); | |
#elif BITS_PER_LONG == 32 | |
- return (unsigned long) __swab32p((u32 *) x); | |
+ return (unsigned long)__swab32p((u32 *)x); | |
#else | |
#error BITS_PER_LONG not defined | |
#endif | |
@@ -179,103 +186,107 @@ static inline unsigned long ext2_swabp(const unsigned long * x) | |
static inline unsigned long ext2_swab(const unsigned long y) | |
{ | |
#if BITS_PER_LONG == 64 | |
- return (unsigned long) __swab64((u64) y); | |
+ return (unsigned long)__swab64((u64)y); | |
#elif BITS_PER_LONG == 32 | |
- return (unsigned long) __swab32((u32) y); | |
+ return (unsigned long)__swab32((u32)y); | |
#else | |
#error BITS_PER_LONG not defined | |
#endif | |
} | |
#ifndef find_next_zero_bit_le | |
-unsigned long find_next_zero_bit_le(const void *addr, unsigned | |
- long size, unsigned long offset) | |
+unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, | |
+ unsigned long offset) | |
{ | |
- const unsigned long *p = addr; | |
- unsigned long result = offset & ~(BITS_PER_LONG - 1); | |
- unsigned long tmp; | |
+ const unsigned long *p = addr; | |
+ unsigned long result = offset & ~(BITS_PER_LONG - 1); | |
+ unsigned long tmp; | |
- if (offset >= size) | |
- return size; | |
- p += BITOP_WORD(offset); | |
- size -= result; | |
- offset &= (BITS_PER_LONG - 1UL); | |
- if (offset) { | |
- tmp = ext2_swabp(p++); | |
- tmp |= (~0UL >> (BITS_PER_LONG - offset)); | |
- if (size < BITS_PER_LONG) | |
- goto found_first; | |
- if (~tmp) | |
- goto found_middle; | |
- size -= BITS_PER_LONG; | |
- result += BITS_PER_LONG; | |
- } | |
+ if ( offset >= size ) | |
+ return size; | |
+ p += BITOP_WORD(offset); | |
+ size -= result; | |
+ offset &= (BITS_PER_LONG - 1UL); | |
+ if ( offset ) | |
+ { | |
+ tmp = ext2_swabp(p++); | |
+ tmp |= (~0UL >> (BITS_PER_LONG - offset)); | |
+ if ( size < BITS_PER_LONG ) | |
+ goto found_first; | |
+ if ( ~tmp ) | |
+ goto found_middle; | |
+ size -= BITS_PER_LONG; | |
+ result += BITS_PER_LONG; | |
+ } | |
- while (size & ~(BITS_PER_LONG - 1)) { | |
- if (~(tmp = *(p++))) | |
- goto found_middle_swap; | |
- result += BITS_PER_LONG; | |
- size -= BITS_PER_LONG; | |
- } | |
- if (!size) | |
- return result; | |
- tmp = ext2_swabp(p); | |
+ while ( size & ~(BITS_PER_LONG - 1) ) | |
+ { | |
+ if ( ~(tmp = *(p++)) ) | |
+ goto found_middle_swap; | |
+ result += BITS_PER_LONG; | |
+ size -= BITS_PER_LONG; | |
+ } | |
+ if ( !size ) | |
+ return result; | |
+ tmp = ext2_swabp(p); | |
found_first: | |
- tmp |= ~0UL << size; | |
- if (tmp == ~0UL) /* Are any bits zero? */ | |
- return result + size; /* Nope. Skip ffz */ | |
+ tmp |= ~0UL << size; | |
+ if ( tmp == ~0UL ) /* Are any bits zero? */ | |
+ return result + size; /* Nope. Skip ffz */ | |
found_middle: | |
- return result + ffz(tmp); | |
+ return result + ffz(tmp); | |
found_middle_swap: | |
- return result + ffz(ext2_swab(tmp)); | |
+ return result + ffz(ext2_swab(tmp)); | |
} | |
EXPORT_SYMBOL(find_next_zero_bit_le); | |
#endif | |
#ifndef find_next_bit_le | |
-unsigned long find_next_bit_le(const void *addr, unsigned | |
- long size, unsigned long offset) | |
+unsigned long find_next_bit_le(const void *addr, unsigned long size, | |
+ unsigned long offset) | |
{ | |
- const unsigned long *p = addr; | |
- unsigned long result = offset & ~(BITS_PER_LONG - 1); | |
- unsigned long tmp; | |
+ const unsigned long *p = addr; | |
+ unsigned long result = offset & ~(BITS_PER_LONG - 1); | |
+ unsigned long tmp; | |
- if (offset >= size) | |
- return size; | |
- p += BITOP_WORD(offset); | |
- size -= result; | |
- offset &= (BITS_PER_LONG - 1UL); | |
- if (offset) { | |
- tmp = ext2_swabp(p++); | |
- tmp &= (~0UL << offset); | |
- if (size < BITS_PER_LONG) | |
- goto found_first; | |
- if (tmp) | |
- goto found_middle; | |
- size -= BITS_PER_LONG; | |
- result += BITS_PER_LONG; | |
- } | |
+ if ( offset >= size ) | |
+ return size; | |
+ p += BITOP_WORD(offset); | |
+ size -= result; | |
+ offset &= (BITS_PER_LONG - 1UL); | |
+ if ( offset ) | |
+ { | |
+ tmp = ext2_swabp(p++); | |
+ tmp &= (~0UL << offset); | |
+ if ( size < BITS_PER_LONG ) | |
+ goto found_first; | |
+ if ( tmp ) | |
+ goto found_middle; | |
+ size -= BITS_PER_LONG; | |
+ result += BITS_PER_LONG; | |
+ } | |
- while (size & ~(BITS_PER_LONG - 1)) { | |
- tmp = *(p++); | |
- if (tmp) | |
- goto found_middle_swap; | |
- result += BITS_PER_LONG; | |
- size -= BITS_PER_LONG; | |
- } | |
- if (!size) | |
- return result; | |
- tmp = ext2_swabp(p); | |
+ while ( size & ~(BITS_PER_LONG - 1) ) | |
+ { | |
+ tmp = *(p++); | |
+ if ( tmp ) | |
+ goto found_middle_swap; | |
+ result += BITS_PER_LONG; | |
+ size -= BITS_PER_LONG; | |
+ } | |
+ if ( !size ) | |
+ return result; | |
+ tmp = ext2_swabp(p); | |
found_first: | |
- tmp &= (~0UL >> (BITS_PER_LONG - size)); | |
- if (tmp == 0UL) /* Are any bits set? */ | |
- return result + size; /* Nope. */ | |
+ tmp &= (~0UL >> (BITS_PER_LONG - size)); | |
+ if ( tmp == 0UL ) /* Are any bits set? */ | |
+ return result + size; /* Nope. */ | |
found_middle: | |
- return result + __ffs(tmp); | |
+ return result + __ffs(tmp); | |
found_middle_swap: | |
- return result + __ffs(ext2_swab(tmp)); | |
+ return result + __ffs(ext2_swab(tmp)); | |
} | |
EXPORT_SYMBOL(find_next_bit_le); | |
#endif | |
diff --git a/xen/arch/arm/arm64/livepatch.c b/xen/arch/arm/arm64/livepatch.c | |
index 5c75779284..0affc93f1e 100644 | |
--- a/xen/arch/arm/arm64/livepatch.c | |
+++ b/xen/arch/arm/arm64/livepatch.c | |
@@ -51,15 +51,15 @@ void arch_livepatch_apply(struct livepatch_func *func) | |
*(new_ptr + i) = insn; | |
/* | |
- * When we upload the payload, it will go through the data cache | |
- * (the region is cacheable). Until the data cache is cleaned, the data | |
- * may not reach the memory. And in the case the data and instruction cache | |
- * are separated, we may read invalid instruction from the memory because | |
- * the data cache have not yet synced with the memory. Hence sync it. | |
- */ | |
+ * When we upload the payload, it will go through the data cache | |
+ * (the region is cacheable). Until the data cache is cleaned, the data | |
+ * may not reach the memory. And in the case the data and instruction cache | |
+ * are separated, we may read invalid instruction from the memory because | |
+ * the data cache have not yet synced with the memory. Hence sync it. | |
+ */ | |
if ( func->new_addr ) | |
clean_and_invalidate_dcache_va_range(func->new_addr, func->new_size); | |
- clean_and_invalidate_dcache_va_range(new_ptr, sizeof (*new_ptr) * len); | |
+ clean_and_invalidate_dcache_va_range(new_ptr, sizeof(*new_ptr) * len); | |
} | |
/* arch_livepatch_revert shared with ARM 32/ARM 64. */ | |
@@ -68,8 +68,7 @@ int arch_livepatch_verify_elf(const struct livepatch_elf *elf) | |
{ | |
const Elf_Ehdr *hdr = elf->hdr; | |
- if ( hdr->e_machine != EM_AARCH64 || | |
- hdr->e_ident[EI_CLASS] != ELFCLASS64 ) | |
+ if ( hdr->e_machine != EM_AARCH64 || hdr->e_ident[EI_CLASS] != ELFCLASS64 ) | |
{ | |
dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported ELF Machine type!\n", | |
elf->name); | |
@@ -86,7 +85,8 @@ bool arch_livepatch_symbol_deny(const struct livepatch_elf *elf, | |
return false; | |
} | |
-enum aarch64_reloc_op { | |
+enum aarch64_reloc_op | |
+{ | |
RELOC_OP_NONE, | |
RELOC_OP_ABS, | |
RELOC_OP_PREL, | |
@@ -95,7 +95,7 @@ enum aarch64_reloc_op { | |
static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) | |
{ | |
- switch ( reloc_op ) | |
+ switch (reloc_op) | |
{ | |
case RELOC_OP_ABS: | |
return val; | |
@@ -108,10 +108,10 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) | |
case RELOC_OP_NONE: | |
return 0; | |
- | |
} | |
- dprintk(XENLOG_DEBUG, LIVEPATCH "do_reloc: unknown relocation operation %d\n", reloc_op); | |
+ dprintk(XENLOG_DEBUG, | |
+ LIVEPATCH "do_reloc: unknown relocation operation %d\n", reloc_op); | |
return 0; | |
} | |
@@ -120,18 +120,18 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) | |
{ | |
s64 sval = do_reloc(op, place, val); | |
- switch ( len ) | |
+ switch (len) | |
{ | |
case 16: | |
*(s16 *)place = sval; | |
if ( sval < INT16_MIN || sval > UINT16_MAX ) | |
- return -EOVERFLOW; | |
+ return -EOVERFLOW; | |
break; | |
case 32: | |
*(s32 *)place = sval; | |
if ( sval < INT32_MIN || sval > UINT32_MAX ) | |
- return -EOVERFLOW; | |
+ return -EOVERFLOW; | |
break; | |
case 64: | |
@@ -139,14 +139,16 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) | |
break; | |
default: | |
- dprintk(XENLOG_DEBUG, LIVEPATCH "Invalid length (%d) for data relocation\n", len); | |
+ dprintk(XENLOG_DEBUG, | |
+ LIVEPATCH "Invalid length (%d) for data relocation\n", len); | |
return 0; | |
} | |
return 0; | |
} | |
-enum aarch64_insn_movw_imm_type { | |
+enum aarch64_insn_movw_imm_type | |
+{ | |
AARCH64_INSN_IMM_MOVNZ, | |
AARCH64_INSN_IMM_MOVKZ, | |
}; | |
@@ -197,7 +199,8 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *dest, u64 val, | |
} | |
static int reloc_insn_imm(enum aarch64_reloc_op op, void *dest, u64 val, | |
- int lsb, int len, enum aarch64_insn_imm_type imm_type) | |
+ int lsb, int len, | |
+ enum aarch64_insn_imm_type imm_type) | |
{ | |
u64 imm, imm_mask; | |
s64 sval; | |
@@ -260,8 +263,11 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, | |
} | |
else if ( symndx >= elf->nsym ) | |
{ | |
- dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation wants symbol@%u which is past end!\n", | |
- elf->name, symndx); | |
+ dprintk( | |
+ XENLOG_ERR, | |
+ LIVEPATCH | |
+ "%s: Relative relocation wants symbol@%u which is past end!\n", | |
+ elf->name, symndx); | |
return -EINVAL; | |
} | |
else if ( !elf->sym[symndx].sym ) | |
@@ -271,14 +277,14 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, | |
return -EINVAL; | |
} | |
- val = elf->sym[symndx].sym->st_value + r->r_addend; /* S+A */ | |
+ val = elf->sym[symndx].sym->st_value + r->r_addend; /* S+A */ | |
/* ARM64 operations at minimum are always 32-bit. */ | |
if ( r->r_offset >= base->sec->sh_size || | |
- (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size ) | |
+ (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size ) | |
goto bad_offset; | |
- switch ( ELF64_R_TYPE(r->r_info) ) | |
+ switch (ELF64_R_TYPE(r->r_info)) | |
{ | |
/* Data */ | |
case R_AARCH64_ABS64: | |
@@ -472,15 +478,17 @@ int arch_livepatch_perform_rela(struct livepatch_elf *elf, | |
if ( overflow_check && ovf == -EOVERFLOW ) | |
{ | |
- dprintk(XENLOG_ERR, LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", | |
+ dprintk(XENLOG_ERR, | |
+ LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n", | |
elf->name, i, rela->name, base->name); | |
return ovf; | |
} | |
} | |
return 0; | |
- bad_offset: | |
- dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation offset is past %s section!\n", | |
+bad_offset: | |
+ dprintk(XENLOG_ERR, | |
+ LIVEPATCH "%s: Relative relocation offset is past %s section!\n", | |
elf->name, base->name); | |
return -EINVAL; | |
} | |
diff --git a/xen/arch/arm/arm64/smpboot.c b/xen/arch/arm/arm64/smpboot.c | |
index 694fbf67e6..820061a000 100644 | |
--- a/xen/arch/arm/arm64/smpboot.c | |
+++ b/xen/arch/arm/arm64/smpboot.c | |
@@ -9,8 +9,9 @@ | |
#include <asm/psci.h> | |
#include <asm/acpi.h> | |
-struct smp_enable_ops { | |
- int (*prepare_cpu)(int); | |
+struct smp_enable_ops | |
+{ | |
+ int (*prepare_cpu)(int); | |
}; | |
static paddr_t cpu_release_addr[NR_CPUS]; | |
@@ -20,7 +21,7 @@ static int __init smp_spin_table_cpu_up(int cpu) | |
{ | |
paddr_t __iomem *release; | |
- if (!cpu_release_addr[cpu]) | |
+ if ( !cpu_release_addr[cpu] ) | |
{ | |
printk("CPU%d: No release addr\n", cpu); | |
return -ENODEV; | |
@@ -44,7 +45,8 @@ static int __init smp_spin_table_cpu_up(int cpu) | |
static void __init smp_spin_table_init(int cpu, struct dt_device_node *dn) | |
{ | |
- if ( !dt_property_read_u64(dn, "cpu-release-addr", &cpu_release_addr[cpu]) ) | |
+ if ( !dt_property_read_u64(dn, "cpu-release-addr", | |
+ &cpu_release_addr[cpu]) ) | |
{ | |
printk("CPU%d has no cpu-release-addr\n", cpu); | |
return; | |
@@ -76,7 +78,7 @@ static int __init dt_arch_cpu_init(int cpu, struct dt_device_node *dn) | |
const char *enable_method; | |
enable_method = dt_get_property(dn, "enable-method", NULL); | |
- if (!enable_method) | |
+ if ( !enable_method ) | |
{ | |
printk("CPU%d has no enable method\n", cpu); | |
return -EINVAL; | |
diff --git a/xen/arch/arm/arm64/traps.c b/xen/arch/arm/arm64/traps.c | |
index babfc1d884..76d31b244c 100644 | |
--- a/xen/arch/arm/arm64/traps.c | |
+++ b/xen/arch/arm/arm64/traps.c | |
@@ -24,19 +24,15 @@ | |
#include <public/xen.h> | |
-static const char *handler[]= { | |
- "Synchronous Abort", | |
- "IRQ", | |
- "FIQ", | |
- "Error" | |
-}; | |
+static const char *handler[] = {"Synchronous Abort", "IRQ", "FIQ", "Error"}; | |
void do_bad_mode(struct cpu_user_regs *regs, int reason) | |
{ | |
- union hsr hsr = { .bits = regs->hsr }; | |
+ union hsr hsr = {.bits = regs->hsr}; | |
printk("Bad mode in %s handler detected\n", handler[reason]); | |
- printk("ESR=0x%08"PRIx32": EC=%"PRIx32", IL=%"PRIx32", ISS=%"PRIx32"\n", | |
+ printk("ESR=0x%08" PRIx32 ": EC=%" PRIx32 ", IL=%" PRIx32 ", ISS=%" PRIx32 | |
+ "\n", | |
hsr.bits, hsr.ec, hsr.len, hsr.iss); | |
local_irq_disable(); | |
diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c | |
index 999a0d58a5..d748dd8a55 100644 | |
--- a/xen/arch/arm/arm64/vfp.c | |
+++ b/xen/arch/arm/arm64/vfp.c | |
@@ -24,7 +24,8 @@ void vfp_save_state(struct vcpu *v) | |
"stp q26, q27, [%1, #16 * 26]\n\t" | |
"stp q28, q29, [%1, #16 * 28]\n\t" | |
"stp q30, q31, [%1, #16 * 30]\n\t" | |
- : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs)); | |
+ : "=Q"(*v->arch.vfp.fpregs) | |
+ : "r"(v->arch.vfp.fpregs)); | |
v->arch.vfp.fpsr = READ_SYSREG32(FPSR); | |
v->arch.vfp.fpcr = READ_SYSREG32(FPCR); | |
@@ -53,7 +54,8 @@ void vfp_restore_state(struct vcpu *v) | |
"ldp q26, q27, [%1, #16 * 26]\n\t" | |
"ldp q28, q29, [%1, #16 * 28]\n\t" | |
"ldp q30, q31, [%1, #16 * 30]\n\t" | |
- : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs)); | |
+ : | |
+ : "Q"(*v->arch.vfp.fpregs), "r"(v->arch.vfp.fpregs)); | |
WRITE_SYSREG32(v->arch.vfp.fpsr, FPSR); | |
WRITE_SYSREG32(v->arch.vfp.fpcr, FPCR); | |
diff --git a/xen/arch/arm/arm64/vsysreg.c b/xen/arch/arm/arm64/vsysreg.c | |
index 8a85507d9d..0dd8008c00 100644 | |
--- a/xen/arch/arm/arm64/vsysreg.c | |
+++ b/xen/arch/arm/arm64/vsysreg.c | |
@@ -30,20 +30,20 @@ | |
* | |
* Note that it only traps NS write access from EL1. | |
*/ | |
-#define TVM_REG(reg) \ | |
-static bool vreg_emulate_##reg(struct cpu_user_regs *regs, \ | |
- uint64_t *r, bool read) \ | |
-{ \ | |
- struct vcpu *v = current; \ | |
- bool cache_enabled = vcpu_has_cache_enabled(v); \ | |
- \ | |
- GUEST_BUG_ON(read); \ | |
- WRITE_SYSREG64(*r, reg); \ | |
- \ | |
- p2m_toggle_cache(v, cache_enabled); \ | |
- \ | |
- return true; \ | |
-} | |
+#define TVM_REG(reg) \ | |
+ static bool vreg_emulate_##reg(struct cpu_user_regs *regs, uint64_t *r, \ | |
+ bool read) \ | |
+ { \ | |
+ struct vcpu *v = current; \ | |
+ bool cache_enabled = vcpu_has_cache_enabled(v); \ | |
+ \ | |
+ GUEST_BUG_ON(read); \ | |
+ WRITE_SYSREG64(*r, reg); \ | |
+ \ | |
+ p2m_toggle_cache(v, cache_enabled); \ | |
+ \ | |
+ return true; \ | |
+ } | |
/* Defining helpers for emulating sysreg registers. */ | |
TVM_REG(SCTLR_EL1) | |
@@ -59,23 +59,22 @@ TVM_REG(AMAIR_EL1) | |
TVM_REG(CONTEXTIDR_EL1) | |
/* Macro to generate easily case for co-processor emulation */ | |
-#define GENERATE_CASE(reg) \ | |
- case HSR_SYSREG_##reg: \ | |
- { \ | |
- bool res; \ | |
- \ | |
- res = vreg_emulate_sysreg64(regs, hsr, vreg_emulate_##reg); \ | |
- ASSERT(res); \ | |
- break; \ | |
+#define GENERATE_CASE(reg) \ | |
+ case HSR_SYSREG_##reg: \ | |
+ { \ | |
+ bool res; \ | |
+ \ | |
+ res = vreg_emulate_sysreg64(regs, hsr, vreg_emulate_##reg); \ | |
+ ASSERT(res); \ | |
+ break; \ | |
} | |
-void do_sysreg(struct cpu_user_regs *regs, | |
- const union hsr hsr) | |
+void do_sysreg(struct cpu_user_regs *regs, const union hsr hsr) | |
{ | |
int regidx = hsr.sysreg.reg; | |
struct vcpu *v = current; | |
- switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) | |
+ switch (hsr.bits & HSR_SYSREG_REGS_MASK) | |
{ | |
/* | |
* HCR_EL2.TACR | |
@@ -101,22 +100,22 @@ void do_sysreg(struct cpu_user_regs *regs, | |
p2m_set_way_flush(current); | |
break; | |
- /* | |
- * HCR_EL2.TVM | |
- * | |
- * ARMv8 (DDI 0487D.a): Table D1-38 | |
- */ | |
- GENERATE_CASE(SCTLR_EL1) | |
- GENERATE_CASE(TTBR0_EL1) | |
- GENERATE_CASE(TTBR1_EL1) | |
- GENERATE_CASE(TCR_EL1) | |
- GENERATE_CASE(ESR_EL1) | |
- GENERATE_CASE(FAR_EL1) | |
- GENERATE_CASE(AFSR0_EL1) | |
- GENERATE_CASE(AFSR1_EL1) | |
- GENERATE_CASE(MAIR_EL1) | |
- GENERATE_CASE(AMAIR_EL1) | |
- GENERATE_CASE(CONTEXTIDR_EL1) | |
+ /* | |
+ * HCR_EL2.TVM | |
+ * | |
+ * ARMv8 (DDI 0487D.a): Table D1-38 | |
+ */ | |
+ GENERATE_CASE(SCTLR_EL1) | |
+ GENERATE_CASE(TTBR0_EL1) | |
+ GENERATE_CASE(TTBR1_EL1) | |
+ GENERATE_CASE(TCR_EL1) | |
+ GENERATE_CASE(ESR_EL1) | |
+ GENERATE_CASE(FAR_EL1) | |
+ GENERATE_CASE(AFSR0_EL1) | |
+ GENERATE_CASE(AFSR1_EL1) | |
+ GENERATE_CASE(MAIR_EL1) | |
+ GENERATE_CASE(AMAIR_EL1) | |
+ GENERATE_CASE(CONTEXTIDR_EL1) | |
/* | |
* MDCR_EL2.TDRA | |
@@ -163,15 +162,15 @@ void do_sysreg(struct cpu_user_regs *regs, | |
return handle_raz_wi(regs, regidx, hsr.sysreg.read, hsr, 1); | |
case HSR_SYSREG_MDCCSR_EL0: | |
/* | |
- * Accessible at EL0 only if MDSCR_EL1.TDCC is set to 0. We emulate that | |
- * register as RAZ/WI above. So RO at both EL0 and EL1. | |
+ * Accessible at EL0 only if MDSCR_EL1.TDCC is set to 0. We emulate | |
+ * that register as RAZ/WI above. So RO at both EL0 and EL1. | |
*/ | |
return handle_ro_raz(regs, regidx, hsr.sysreg.read, hsr, 0); | |
- HSR_SYSREG_DBG_CASES(DBGBVR): | |
- HSR_SYSREG_DBG_CASES(DBGBCR): | |
- HSR_SYSREG_DBG_CASES(DBGWVR): | |
- HSR_SYSREG_DBG_CASES(DBGWCR): | |
- return handle_raz_wi(regs, regidx, hsr.sysreg.read, hsr, 1); | |
+ HSR_SYSREG_DBG_CASES(DBGBVR) | |
+ : HSR_SYSREG_DBG_CASES(DBGBCR) | |
+ : HSR_SYSREG_DBG_CASES(DBGWVR) | |
+ : HSR_SYSREG_DBG_CASES(DBGWCR) | |
+ : return handle_raz_wi(regs, regidx, hsr.sysreg.read, hsr, 1); | |
/* | |
* MDCR_EL2.TPM | |
@@ -275,22 +274,19 @@ void do_sysreg(struct cpu_user_regs *regs, | |
* And all other unknown registers. | |
*/ | |
default: | |
- { | |
- const struct hsr_sysreg sysreg = hsr.sysreg; | |
+ { | |
+ const struct hsr_sysreg sysreg = hsr.sysreg; | |
- gdprintk(XENLOG_ERR, | |
- "%s %d, %d, c%d, c%d, %d %s x%d @ 0x%"PRIregister"\n", | |
- sysreg.read ? "mrs" : "msr", | |
- sysreg.op0, sysreg.op1, | |
- sysreg.crn, sysreg.crm, | |
- sysreg.op2, | |
- sysreg.read ? "=>" : "<=", | |
- sysreg.reg, regs->pc); | |
- gdprintk(XENLOG_ERR, "unhandled 64-bit sysreg access %#x\n", | |
- hsr.bits & HSR_SYSREG_REGS_MASK); | |
- inject_undef_exception(regs, hsr); | |
- return; | |
- } | |
+ gdprintk(XENLOG_ERR, | |
+ "%s %d, %d, c%d, c%d, %d %s x%d @ 0x%" PRIregister "\n", | |
+ sysreg.read ? "mrs" : "msr", sysreg.op0, sysreg.op1, | |
+ sysreg.crn, sysreg.crm, sysreg.op2, | |
+ sysreg.read ? "=>" : "<=", sysreg.reg, regs->pc); | |
+ gdprintk(XENLOG_ERR, "unhandled 64-bit sysreg access %#x\n", | |
+ hsr.bits & HSR_SYSREG_REGS_MASK); | |
+ inject_undef_exception(regs, hsr); | |
+ return; | |
+ } | |
} | |
regs->pc += 4; | |
diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c | |
index 891b4b66ff..1e44406aab 100644 | |
--- a/xen/arch/arm/bootfdt.c | |
+++ b/xen/arch/arm/bootfdt.c | |
@@ -27,8 +27,8 @@ static bool __init device_tree_node_matches(const void *fdt, int node, | |
/* Match both "match" and "match@..." patterns but not | |
"match-foo". */ | |
- return strncmp(name, match, match_len) == 0 | |
- && (name[match_len] == '@' || name[match_len] == '\0'); | |
+ return strncmp(name, match, match_len) == 0 && | |
+ (name[match_len] == '@' || name[match_len] == '\0'); | |
} | |
static bool __init device_tree_node_compatible(const void *fdt, int node, | |
@@ -44,7 +44,8 @@ static bool __init device_tree_node_compatible(const void *fdt, int node, | |
if ( prop == NULL ) | |
return false; | |
- while ( len > 0 ) { | |
+ while ( len > 0 ) | |
+ { | |
if ( !dt_compat_cmp(prop, match) ) | |
return true; | |
l = strlen(prop) + 1; | |
@@ -71,7 +72,7 @@ static u32 __init device_tree_get_u32(const void *fdt, int node, | |
if ( !prop || prop->len < sizeof(u32) ) | |
return dflt; | |
- return fdt32_to_cpu(*(uint32_t*)prop->data); | |
+ return fdt32_to_cpu(*(uint32_t *)prop->data); | |
} | |
/** | |
@@ -86,8 +87,7 @@ static u32 __init device_tree_get_u32(const void *fdt, int node, | |
* returns a value different from 0, that value is returned immediately. | |
*/ | |
int __init device_tree_for_each_node(const void *fdt, | |
- device_tree_node_func func, | |
- void *data) | |
+ device_tree_node_func func, void *data) | |
{ | |
int node; | |
int depth; | |
@@ -95,8 +95,7 @@ int __init device_tree_for_each_node(const void *fdt, | |
u32 size_cells[DEVICE_TREE_MAX_DEPTH]; | |
int ret; | |
- for ( node = 0, depth = 0; | |
- node >=0 && depth >= 0; | |
+ for ( node = 0, depth = 0; node >= 0 && depth >= 0; | |
node = fdt_next_node(fdt, node, &depth) ) | |
{ | |
const char *name = fdt_get_name(fdt, node, NULL); | |
@@ -109,13 +108,14 @@ int __init device_tree_for_each_node(const void *fdt, | |
continue; | |
} | |
- as = depth > 0 ? address_cells[depth-1] : DT_ROOT_NODE_ADDR_CELLS_DEFAULT; | |
- ss = depth > 0 ? size_cells[depth-1] : DT_ROOT_NODE_SIZE_CELLS_DEFAULT; | |
+ as = depth > 0 ? address_cells[depth - 1] | |
+ : DT_ROOT_NODE_ADDR_CELLS_DEFAULT; | |
+ ss = depth > 0 ? size_cells[depth - 1] | |
+ : DT_ROOT_NODE_SIZE_CELLS_DEFAULT; | |
- address_cells[depth] = device_tree_get_u32(fdt, node, | |
- "#address-cells", as); | |
- size_cells[depth] = device_tree_get_u32(fdt, node, | |
- "#size-cells", ss); | |
+ address_cells[depth] = | |
+ device_tree_get_u32(fdt, node, "#address-cells", as); | |
+ size_cells[depth] = device_tree_get_u32(fdt, node, "#size-cells", ss); | |
ret = func(fdt, node, name, depth, as, ss, data); | |
if ( ret != 0 ) | |
@@ -125,8 +125,8 @@ int __init device_tree_for_each_node(const void *fdt, | |
} | |
static void __init process_memory_node(const void *fdt, int node, | |
- const char *name, | |
- u32 address_cells, u32 size_cells) | |
+ const char *name, u32 address_cells, | |
+ u32 size_cells) | |
{ | |
const struct fdt_property *prop; | |
int i; | |
@@ -137,8 +137,7 @@ static void __init process_memory_node(const void *fdt, int node, | |
if ( address_cells < 1 || size_cells < 1 ) | |
{ | |
- printk("fdt: node `%s': invalid #address-cells or #size-cells", | |
- name); | |
+ printk("fdt: node `%s': invalid #address-cells or #size-cells", name); | |
return; | |
} | |
@@ -150,7 +149,7 @@ static void __init process_memory_node(const void *fdt, int node, | |
} | |
cell = (const __be32 *)prop->data; | |
- banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof (u32)); | |
+ banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof(u32)); | |
for ( i = 0; i < banks && bootinfo.mem.nr_banks < NR_MEM_BANKS; i++ ) | |
{ | |
@@ -164,8 +163,8 @@ static void __init process_memory_node(const void *fdt, int node, | |
} | |
static void __init process_multiboot_node(const void *fdt, int node, | |
- const char *name, | |
- u32 address_cells, u32 size_cells) | |
+ const char *name, u32 address_cells, | |
+ u32 size_cells) | |
{ | |
static int __initdata kind_guess = 0; | |
const struct fdt_property *prop; | |
@@ -182,7 +181,7 @@ static void __init process_multiboot_node(const void *fdt, int node, | |
ASSERT(parent_node >= 0); | |
/* Check that the node is under "/chosen" (first 7 chars of path) */ | |
- ret = fdt_get_path(fdt, node, path, sizeof (path)); | |
+ ret = fdt_get_path(fdt, node, path, sizeof(path)); | |
if ( ret != 0 || strncmp(path, "/chosen", 7) ) | |
return; | |
@@ -191,8 +190,7 @@ static void __init process_multiboot_node(const void *fdt, int node, | |
panic("node %s missing `reg' property\n", name); | |
if ( len < dt_cells_to_size(address_cells + size_cells) ) | |
- panic("fdt: node `%s': `reg` property length is too short\n", | |
- name); | |
+ panic("fdt: node `%s': `reg` property length is too short\n", name); | |
cell = (const __be32 *)prop->data; | |
device_tree_get_reg(&cell, address_cells, size_cells, &start, &size); | |
@@ -221,13 +219,18 @@ static void __init process_multiboot_node(const void *fdt, int node, | |
*/ | |
if ( kind == BOOTMOD_UNKNOWN ) | |
{ | |
- switch ( kind_guess++ ) | |
+ switch (kind_guess++) | |
{ | |
- case 0: kind = BOOTMOD_KERNEL; break; | |
- case 1: kind = BOOTMOD_RAMDISK; break; | |
- default: break; | |
+ case 0: | |
+ kind = BOOTMOD_KERNEL; | |
+ break; | |
+ case 1: | |
+ kind = BOOTMOD_RAMDISK; | |
+ break; | |
+ default: | |
+ break; | |
} | |
- if ( kind_guess > 1 && has_xsm_magic(start) ) | |
+ if ( kind_guess > 1 && has_xsm_magic(start) ) | |
kind = BOOTMOD_XSM; | |
} | |
@@ -237,13 +240,13 @@ static void __init process_multiboot_node(const void *fdt, int node, | |
prop = fdt_get_property(fdt, node, "bootargs", &len); | |
if ( !prop ) | |
return; | |
- add_boot_cmdline(fdt_get_name(fdt, parent_node, &len), prop->data, | |
- kind, start, domU); | |
+ add_boot_cmdline(fdt_get_name(fdt, parent_node, &len), prop->data, kind, | |
+ start, domU); | |
} | |
static void __init process_chosen_node(const void *fdt, int node, | |
- const char *name, | |
- u32 address_cells, u32 size_cells) | |
+ const char *name, u32 address_cells, | |
+ u32 size_cells) | |
{ | |
const struct fdt_property *prop; | |
paddr_t start, end; | |
@@ -277,25 +280,26 @@ static void __init process_chosen_node(const void *fdt, int node, | |
if ( start >= end ) | |
{ | |
- printk("linux,initrd limits invalid: %"PRIpaddr" >= %"PRIpaddr"\n", | |
- start, end); | |
+ printk("linux,initrd limits invalid: %" PRIpaddr " >= %" PRIpaddr "\n", | |
+ start, end); | |
return; | |
} | |
- printk("Initrd %"PRIpaddr"-%"PRIpaddr"\n", start, end); | |
+ printk("Initrd %" PRIpaddr "-%" PRIpaddr "\n", start, end); | |
- add_boot_module(BOOTMOD_RAMDISK, start, end-start, false); | |
+ add_boot_module(BOOTMOD_RAMDISK, start, end - start, false); | |
} | |
-static int __init early_scan_node(const void *fdt, | |
- int node, const char *name, int depth, | |
- u32 address_cells, u32 size_cells, | |
+static int __init early_scan_node(const void *fdt, int node, const char *name, | |
+ int depth, u32 address_cells, u32 size_cells, | |
void *data) | |
{ | |
if ( device_tree_node_matches(fdt, node, "memory") ) | |
process_memory_node(fdt, node, name, address_cells, size_cells); | |
- else if ( depth <= 3 && (device_tree_node_compatible(fdt, node, "xen,multiboot-module" ) || | |
- device_tree_node_compatible(fdt, node, "multiboot,module" ))) | |
+ else if ( depth <= 3 && | |
+ (device_tree_node_compatible(fdt, node, | |
+ "xen,multiboot-module") || | |
+ device_tree_node_compatible(fdt, node, "multiboot,module")) ) | |
process_multiboot_node(fdt, node, name, address_cells, size_cells); | |
else if ( depth == 1 && device_tree_node_matches(fdt, node, "chosen") ) | |
process_chosen_node(fdt, node, name, address_cells, size_cells); | |
@@ -311,16 +315,14 @@ static void __init early_print_info(void) | |
int i, nr_rsvd; | |
for ( i = 0; i < mi->nr_banks; i++ ) | |
- printk("RAM: %"PRIpaddr" - %"PRIpaddr"\n", | |
- mi->bank[i].start, | |
- mi->bank[i].start + mi->bank[i].size - 1); | |
+ printk("RAM: %" PRIpaddr " - %" PRIpaddr "\n", mi->bank[i].start, | |
+ mi->bank[i].start + mi->bank[i].size - 1); | |
printk("\n"); | |
- for ( i = 0 ; i < mods->nr_mods; i++ ) | |
- printk("MODULE[%d]: %"PRIpaddr" - %"PRIpaddr" %-12s\n", | |
- i, | |
- mods->module[i].start, | |
- mods->module[i].start + mods->module[i].size, | |
- boot_module_kind_as_string(mods->module[i].kind)); | |
+ for ( i = 0; i < mods->nr_mods; i++ ) | |
+ printk("MODULE[%d]: %" PRIpaddr " - %" PRIpaddr " %-12s\n", i, | |
+ mods->module[i].start, | |
+ mods->module[i].start + mods->module[i].size, | |
+ boot_module_kind_as_string(mods->module[i].kind)); | |
nr_rsvd = fdt_num_mem_rsv(device_tree_flattened); | |
for ( i = 0; i < nr_rsvd; i++ ) | |
@@ -330,14 +332,12 @@ static void __init early_print_info(void) | |
continue; | |
/* fdt_get_mem_rsv returns length */ | |
e += s; | |
- printk(" RESVD[%d]: %"PRIpaddr" - %"PRIpaddr"\n", | |
- i, s, e); | |
+ printk(" RESVD[%d]: %" PRIpaddr " - %" PRIpaddr "\n", i, s, e); | |
} | |
printk("\n"); | |
- for ( i = 0 ; i < cmds->nr_mods; i++ ) | |
- printk("CMDLINE[%"PRIpaddr"]:%s %s\n", cmds->cmdline[i].start, | |
- cmds->cmdline[i].dt_name, | |
- &cmds->cmdline[i].cmdline[0]); | |
+ for ( i = 0; i < cmds->nr_mods; i++ ) | |
+ printk("CMDLINE[%" PRIpaddr "]:%s %s\n", cmds->cmdline[i].start, | |
+ cmds->cmdline[i].dt_name, &cmds->cmdline[i].cmdline[0]); | |
printk("\n"); | |
} | |
@@ -378,8 +378,8 @@ const __init char *boot_fdt_cmdline(const void *fdt) | |
struct bootcmdline *dom0_cmdline = | |
boot_cmdline_find_by_kind(BOOTMOD_KERNEL); | |
- if (fdt_get_property(fdt, node, "xen,dom0-bootargs", NULL) || | |
- ( dom0_cmdline && dom0_cmdline->cmdline[0] ) ) | |
+ if ( fdt_get_property(fdt, node, "xen,dom0-bootargs", NULL) || | |
+ (dom0_cmdline && dom0_cmdline->cmdline[0]) ) | |
prop = fdt_get_property(fdt, node, "bootargs", NULL); | |
} | |
if ( prop == NULL ) | |
diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c | |
index 8904939aca..2d34cf94ef 100644 | |
--- a/xen/arch/arm/cpuerrata.c | |
+++ b/xen/arch/arm/cpuerrata.c | |
@@ -59,9 +59,9 @@ static bool copy_hyp_vect_bpi(unsigned int slot, const char *hyp_vec_start, | |
* Vectors are part of the text that are mapped read-only. So re-map | |
* the vector table to be able to update vectors. | |
*/ | |
- dst_remapped = __vmap(&dst_mfn, | |
- 1UL << get_order_from_bytes(VECTOR_TABLE_SIZE), | |
- 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT); | |
+ dst_remapped = | |
+ __vmap(&dst_mfn, 1UL << get_order_from_bytes(VECTOR_TABLE_SIZE), 1, 1, | |
+ PAGE_HYPERVISOR, VMAP_DEFAULT); | |
if ( !dst_remapped ) | |
return false; | |
@@ -80,11 +80,9 @@ static bool copy_hyp_vect_bpi(unsigned int slot, const char *hyp_vec_start, | |
return true; | |
} | |
-static bool __maybe_unused | |
-install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, | |
- const char *hyp_vec_start, | |
- const char *hyp_vec_end, | |
- const char *desc) | |
+static bool __maybe_unused install_bp_hardening_vec( | |
+ const struct arm_cpu_capabilities *entry, const char *hyp_vec_start, | |
+ const char *hyp_vec_end, const char *desc) | |
{ | |
static int last_slot = -1; | |
static DEFINE_SPINLOCK(bp_lock); | |
@@ -141,8 +139,9 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, | |
if ( ret ) | |
{ | |
/* Install the new vector table. */ | |
- WRITE_SYSREG((vaddr_t)(__bp_harden_hyp_vecs_start + slot * VECTOR_TABLE_SIZE), | |
- VBAR_EL2); | |
+ WRITE_SYSREG( | |
+ (vaddr_t)(__bp_harden_hyp_vecs_start + slot * VECTOR_TABLE_SIZE), | |
+ VBAR_EL2); | |
isb(); | |
} | |
@@ -176,7 +175,7 @@ static int enable_smccc_arch_workaround_1(void *data) | |
if ( (int)res.a0 < 0 ) | |
goto warn; | |
- return !install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start, | |
+ return !install_bp_hardening_vec(entry, __smccc_workaround_1_smc_start, | |
__smccc_workaround_1_smc_end, | |
"call ARM_SMCCC_ARCH_WORKAROUND_1"); | |
@@ -218,15 +217,14 @@ install_bp_hardening_vecs(const struct arm_cpu_capabilities *entry, | |
if ( !entry->matches(entry) ) | |
return; | |
- printk(XENLOG_INFO "CPU%u will %s on guest exit\n", | |
- smp_processor_id(), desc); | |
+ printk(XENLOG_INFO "CPU%u will %s on guest exit\n", smp_processor_id(), | |
+ desc); | |
this_cpu(bp_harden_vecs) = hyp_vecs; | |
} | |
static int enable_bp_inv_hardening(void *data) | |
{ | |
- install_bp_hardening_vecs(data, hyp_traps_vector_bp_inv, | |
- "execute BPIALL"); | |
+ install_bp_hardening_vecs(data, hyp_traps_vector_bp_inv, "execute BPIALL"); | |
return 0; | |
} | |
@@ -279,8 +277,8 @@ custom_param("spec-ctrl", parse_spec_ctrl); | |
/* Arm64 only for now as for Arm32 the workaround is currently handled in C. */ | |
#ifdef CONFIG_ARM_64 | |
void __init arm_enable_wa2_handling(const struct alt_instr *alt, | |
- const uint32_t *origptr, | |
- uint32_t *updptr, int nr_inst) | |
+ const uint32_t *origptr, uint32_t *updptr, | |
+ int nr_inst) | |
{ | |
BUG_ON(nr_inst != 1); | |
@@ -310,7 +308,7 @@ static bool has_ssbd_mitigation(const struct arm_cpu_capabilities *entry) | |
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID, | |
ARM_SMCCC_ARCH_WORKAROUND_2_FID, &res); | |
- switch ( (int)res.a0 ) | |
+ switch ((int)res.a0) | |
{ | |
case ARM_SMCCC_NOT_SUPPORTED: | |
ssbd_state = ARM_SSBD_UNKNOWN; | |
@@ -333,7 +331,7 @@ static bool has_ssbd_mitigation(const struct arm_cpu_capabilities *entry) | |
return false; | |
} | |
- switch ( ssbd_state ) | |
+ switch (ssbd_state) | |
{ | |
case ARM_SSBD_FORCE_DISABLE: | |
printk_once("%s disabled from command-line\n", entry->desc); | |
@@ -367,23 +365,20 @@ static bool has_ssbd_mitigation(const struct arm_cpu_capabilities *entry) | |
} | |
#endif | |
-#define MIDR_RANGE(model, min, max) \ | |
- .matches = is_affected_midr_range, \ | |
- .midr_model = model, \ | |
- .midr_range_min = min, \ | |
- .midr_range_max = max | |
+#define MIDR_RANGE(model, min, max) \ | |
+ .matches = is_affected_midr_range, .midr_model = model, \ | |
+ .midr_range_min = min, .midr_range_max = max | |
-#define MIDR_ALL_VERSIONS(model) \ | |
- .matches = is_affected_midr_range, \ | |
- .midr_model = model, \ | |
- .midr_range_min = 0, \ | |
+#define MIDR_ALL_VERSIONS(model) \ | |
+ .matches = is_affected_midr_range, .midr_model = model, \ | |
+ .midr_range_min = 0, \ | |
.midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK) | |
static bool __maybe_unused | |
is_affected_midr_range(const struct arm_cpu_capabilities *entry) | |
{ | |
- return MIDR_IS_CPU_MODEL_RANGE(current_cpu_data.midr.bits, entry->midr_model, | |
- entry->midr_range_min, | |
+ return MIDR_IS_CPU_MODEL_RANGE(current_cpu_data.midr.bits, | |
+ entry->midr_model, entry->midr_range_min, | |
entry->midr_range_max); | |
} | |
@@ -416,8 +411,7 @@ static const struct arm_cpu_capabilities arm_errata[] = { | |
/* Cortex-A57 r0p0 - r1p2 */ | |
.desc = "ARM erratum 832075", | |
.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | |
- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, | |
- (1 << MIDR_VARIANT_SHIFT) | 2), | |
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00, (1 << MIDR_VARIANT_SHIFT) | 2), | |
}, | |
#endif | |
#ifdef CONFIG_ARM64_ERRATUM_834220 | |
@@ -425,8 +419,7 @@ static const struct arm_cpu_capabilities arm_errata[] = { | |
/* Cortex-A57 r0p0 - r1p2 */ | |
.desc = "ARM erratum 834220", | |
.capability = ARM64_WORKAROUND_834220, | |
- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, | |
- (1 << MIDR_VARIANT_SHIFT) | 2), | |
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00, (1 << MIDR_VARIANT_SHIFT) | 2), | |
}, | |
#endif | |
#ifdef CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR | |
@@ -495,22 +488,21 @@ void __init enable_errata_workarounds(void) | |
} | |
static int cpu_errata_callback(struct notifier_block *nfb, | |
- unsigned long action, | |
- void *hcpu) | |
+ unsigned long action, void *hcpu) | |
{ | |
int rc = 0; | |
- switch ( action ) | |
+ switch (action) | |
{ | |
case CPU_STARTING: | |
/* | |
* At CPU_STARTING phase no notifier shall return an error, because the | |
* system is designed with the assumption that starting a CPU cannot | |
- * fail at this point. If an error happens here it will cause Xen to hit | |
- * the BUG_ON() in notify_cpu_starting(). In future, either this | |
+ * fail at this point. If an error happens here it will cause Xen to | |
+ * hit the BUG_ON() in notify_cpu_starting(). In future, either this | |
* notifier/enabling capabilities should be fixed to always return | |
- * success/void or notify_cpu_starting() and other common code should be | |
- * fixed to expect an error at CPU_STARTING phase. | |
+ * success/void or notify_cpu_starting() and other common code should | |
+ * be fixed to expect an error at CPU_STARTING phase. | |
*/ | |
ASSERT(system_state != SYS_STATE_boot); | |
rc = enable_nonboot_cpu_caps(arm_errata); | |
diff --git a/xen/arch/arm/cpufeature.c b/xen/arch/arm/cpufeature.c | |
index 44126dbf07..ca297a4b21 100644 | |
--- a/xen/arch/arm/cpufeature.c | |
+++ b/xen/arch/arm/cpufeature.c | |
@@ -99,44 +99,44 @@ int enable_nonboot_cpu_caps(const struct arm_cpu_capabilities *caps) | |
void identify_cpu(struct cpuinfo_arm *c) | |
{ | |
- c->midr.bits = READ_SYSREG32(MIDR_EL1); | |
- c->mpidr.bits = READ_SYSREG(MPIDR_EL1); | |
+ c->midr.bits = READ_SYSREG32(MIDR_EL1); | |
+ c->mpidr.bits = READ_SYSREG(MPIDR_EL1); | |
#ifdef CONFIG_ARM_64 | |
- c->pfr64.bits[0] = READ_SYSREG64(ID_AA64PFR0_EL1); | |
- c->pfr64.bits[1] = READ_SYSREG64(ID_AA64PFR1_EL1); | |
+ c->pfr64.bits[0] = READ_SYSREG64(ID_AA64PFR0_EL1); | |
+ c->pfr64.bits[1] = READ_SYSREG64(ID_AA64PFR1_EL1); | |
- c->dbg64.bits[0] = READ_SYSREG64(ID_AA64DFR0_EL1); | |
- c->dbg64.bits[1] = READ_SYSREG64(ID_AA64DFR1_EL1); | |
+ c->dbg64.bits[0] = READ_SYSREG64(ID_AA64DFR0_EL1); | |
+ c->dbg64.bits[1] = READ_SYSREG64(ID_AA64DFR1_EL1); | |
- c->aux64.bits[0] = READ_SYSREG64(ID_AA64AFR0_EL1); | |
- c->aux64.bits[1] = READ_SYSREG64(ID_AA64AFR1_EL1); | |
+ c->aux64.bits[0] = READ_SYSREG64(ID_AA64AFR0_EL1); | |
+ c->aux64.bits[1] = READ_SYSREG64(ID_AA64AFR1_EL1); | |
- c->mm64.bits[0] = READ_SYSREG64(ID_AA64MMFR0_EL1); | |
- c->mm64.bits[1] = READ_SYSREG64(ID_AA64MMFR1_EL1); | |
+ c->mm64.bits[0] = READ_SYSREG64(ID_AA64MMFR0_EL1); | |
+ c->mm64.bits[1] = READ_SYSREG64(ID_AA64MMFR1_EL1); | |
- c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1); | |
- c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1); | |
+ c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1); | |
+ c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1); | |
#endif | |
- c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1); | |
- c->pfr32.bits[1] = READ_SYSREG32(ID_PFR1_EL1); | |
+ c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1); | |
+ c->pfr32.bits[1] = READ_SYSREG32(ID_PFR1_EL1); | |
- c->dbg32.bits[0] = READ_SYSREG32(ID_DFR0_EL1); | |
+ c->dbg32.bits[0] = READ_SYSREG32(ID_DFR0_EL1); | |
- c->aux32.bits[0] = READ_SYSREG32(ID_AFR0_EL1); | |
+ c->aux32.bits[0] = READ_SYSREG32(ID_AFR0_EL1); | |
- c->mm32.bits[0] = READ_SYSREG32(ID_MMFR0_EL1); | |
- c->mm32.bits[1] = READ_SYSREG32(ID_MMFR1_EL1); | |
- c->mm32.bits[2] = READ_SYSREG32(ID_MMFR2_EL1); | |
- c->mm32.bits[3] = READ_SYSREG32(ID_MMFR3_EL1); | |
+ c->mm32.bits[0] = READ_SYSREG32(ID_MMFR0_EL1); | |
+ c->mm32.bits[1] = READ_SYSREG32(ID_MMFR1_EL1); | |
+ c->mm32.bits[2] = READ_SYSREG32(ID_MMFR2_EL1); | |
+ c->mm32.bits[3] = READ_SYSREG32(ID_MMFR3_EL1); | |
- c->isa32.bits[0] = READ_SYSREG32(ID_ISAR0_EL1); | |
- c->isa32.bits[1] = READ_SYSREG32(ID_ISAR1_EL1); | |
- c->isa32.bits[2] = READ_SYSREG32(ID_ISAR2_EL1); | |
- c->isa32.bits[3] = READ_SYSREG32(ID_ISAR3_EL1); | |
- c->isa32.bits[4] = READ_SYSREG32(ID_ISAR4_EL1); | |
- c->isa32.bits[5] = READ_SYSREG32(ID_ISAR5_EL1); | |
+ c->isa32.bits[0] = READ_SYSREG32(ID_ISAR0_EL1); | |
+ c->isa32.bits[1] = READ_SYSREG32(ID_ISAR1_EL1); | |
+ c->isa32.bits[2] = READ_SYSREG32(ID_ISAR2_EL1); | |
+ c->isa32.bits[3] = READ_SYSREG32(ID_ISAR3_EL1); | |
+ c->isa32.bits[4] = READ_SYSREG32(ID_ISAR4_EL1); | |
+ c->isa32.bits[5] = READ_SYSREG32(ID_ISAR5_EL1); | |
} | |
/* | |
diff --git a/xen/arch/arm/decode.c b/xen/arch/arm/decode.c | |
index 8b1e15d118..f67e0a51cd 100644 | |
--- a/xen/arch/arm/decode.c | |
+++ b/xen/arch/arm/decode.c | |
@@ -25,8 +25,8 @@ | |
#include "decode.h" | |
-static void update_dabt(struct hsr_dabt *dabt, int reg, | |
- uint8_t size, bool sign) | |
+static void update_dabt(struct hsr_dabt *dabt, int reg, uint8_t size, | |
+ bool sign) | |
{ | |
dabt->reg = reg; | |
dabt->size = size; | |
@@ -38,12 +38,12 @@ static int decode_thumb2(register_t pc, struct hsr_dabt *dabt, uint16_t hw1) | |
uint16_t hw2; | |
uint16_t rt; | |
- if ( raw_copy_from_guest(&hw2, (void *__user)(pc + 2), sizeof (hw2)) ) | |
+ if ( raw_copy_from_guest(&hw2, (void *__user)(pc + 2), sizeof(hw2)) ) | |
return -EFAULT; | |
rt = (hw2 >> 12) & 0xf; | |
- switch ( (hw1 >> 9) & 0xf ) | |
+ switch ((hw1 >> 9) & 0xf) | |
{ | |
case 12: | |
{ | |
@@ -87,10 +87,10 @@ static int decode_thumb(register_t pc, struct hsr_dabt *dabt) | |
{ | |
uint16_t instr; | |
- if ( raw_copy_from_guest(&instr, (void * __user)pc, sizeof (instr)) ) | |
+ if ( raw_copy_from_guest(&instr, (void *__user)pc, sizeof(instr)) ) | |
return -EFAULT; | |
- switch ( instr >> 12 ) | |
+ switch (instr >> 12) | |
{ | |
case 5: | |
{ | |
@@ -98,7 +98,7 @@ static int decode_thumb(register_t pc, struct hsr_dabt *dabt) | |
uint16_t opB = (instr >> 9) & 0x7; | |
int reg = instr & 7; | |
- switch ( opB & 0x3 ) | |
+ switch (opB & 0x3) | |
{ | |
case 0: /* Non-signed word */ | |
update_dabt(dabt, reg, 2, false); | |
diff --git a/xen/arch/arm/device.c b/xen/arch/arm/device.c | |
index 70cd6c1a19..d04e73b3f8 100644 | |
--- a/xen/arch/arm/device.c | |
+++ b/xen/arch/arm/device.c | |
@@ -33,7 +33,7 @@ int __init device_init(struct dt_device_node *dev, enum device_class class, | |
ASSERT(dev != NULL); | |
if ( !dt_device_is_available(dev) || dt_device_for_passthrough(dev) ) | |
- return -ENODEV; | |
+ return -ENODEV; | |
for ( desc = _sdevice; desc != _edevice; desc++ ) | |
{ | |
@@ -46,19 +46,19 @@ int __init device_init(struct dt_device_node *dev, enum device_class class, | |
return desc->init(dev, data); | |
} | |
- | |
} | |
return -EBADF; | |
} | |
-int __init acpi_device_init(enum device_class class, const void *data, int class_type) | |
+int __init acpi_device_init(enum device_class class, const void *data, | |
+ int class_type) | |
{ | |
const struct acpi_device_desc *desc; | |
for ( desc = _asdevice; desc != _aedevice; desc++ ) | |
{ | |
- if ( ( desc->class != class ) || ( desc->class_type != class_type ) ) | |
+ if ( (desc->class != class) || (desc->class_type != class_type) ) | |
continue; | |
ASSERT(desc->init != NULL); | |
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c | |
index 941bbff4fe..9004a45cfa 100644 | |
--- a/xen/arch/arm/domain.c | |
+++ b/xen/arch/arm/domain.c | |
@@ -65,7 +65,7 @@ void idle_loop(void) | |
{ | |
unsigned int cpu = smp_processor_id(); | |
- for ( ; ; ) | |
+ for ( ;; ) | |
{ | |
if ( cpu_is_offline(cpu) ) | |
stop_cpu(); | |
@@ -158,7 +158,7 @@ static void ctxt_switch_from(struct vcpu *p) | |
#endif | |
if ( is_32bit_domain(p->domain) ) | |
- p->arch.ifsr = READ_SYSREG(IFSR32_EL2); | |
+ p->arch.ifsr = READ_SYSREG(IFSR32_EL2); | |
p->arch.afsr0 = READ_SYSREG(AFSR0_EL1); | |
p->arch.afsr1 = READ_SYSREG(AFSR1_EL1); | |
@@ -289,8 +289,8 @@ static void update_runstate_area(struct vcpu *v) | |
guest_handle = &v->runstate_guest.p->state_entry_time + 1; | |
guest_handle--; | |
v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE; | |
- __raw_copy_to_guest(guest_handle, | |
- (void *)(&v->runstate.state_entry_time + 1) - 1, 1); | |
+ __raw_copy_to_guest( | |
+ guest_handle, (void *)(&v->runstate.state_entry_time + 1) - 1, 1); | |
smp_wmb(); | |
} | |
@@ -300,8 +300,8 @@ static void update_runstate_area(struct vcpu *v) | |
{ | |
v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE; | |
smp_wmb(); | |
- __raw_copy_to_guest(guest_handle, | |
- (void *)(&v->runstate.state_entry_time + 1) - 1, 1); | |
+ __raw_copy_to_guest( | |
+ guest_handle, (void *)(&v->runstate.state_entry_time + 1) - 1, 1); | |
} | |
} | |
@@ -383,20 +383,29 @@ void sync_vcpu_execstate(struct vcpu *v) | |
/* Nothing to do -- no lazy switching */ | |
} | |
-#define next_arg(fmt, args) ({ \ | |
- unsigned long __arg; \ | |
- switch ( *(fmt)++ ) \ | |
- { \ | |
- case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \ | |
- case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \ | |
- case 'h': __arg = (unsigned long)va_arg(args, void *); break; \ | |
- default: __arg = 0; BUG(); \ | |
- } \ | |
- __arg; \ | |
-}) | |
- | |
-unsigned long hypercall_create_continuation( | |
- unsigned int op, const char *format, ...) | |
+#define next_arg(fmt, args) \ | |
+ ({ \ | |
+ unsigned long __arg; \ | |
+ switch (*(fmt)++) \ | |
+ { \ | |
+ case 'i': \ | |
+ __arg = (unsigned long)va_arg(args, unsigned int); \ | |
+ break; \ | |
+ case 'l': \ | |
+ __arg = (unsigned long)va_arg(args, unsigned long); \ | |
+ break; \ | |
+ case 'h': \ | |
+ __arg = (unsigned long)va_arg(args, void *); \ | |
+ break; \ | |
+ default: \ | |
+ __arg = 0; \ | |
+ BUG(); \ | |
+ } \ | |
+ __arg; \ | |
+ }) | |
+ | |
+unsigned long hypercall_create_continuation(unsigned int op, | |
+ const char *format, ...) | |
{ | |
struct mc_state *mcs = ¤t->mc_state; | |
struct cpu_user_regs *regs; | |
@@ -406,7 +415,7 @@ unsigned long hypercall_create_continuation( | |
va_list args; | |
/* All hypercalls take at least one argument */ | |
- BUG_ON( !p || *p == '\0' ); | |
+ BUG_ON(!p || *p == '\0'); | |
current->hcall_preempted = true; | |
@@ -433,14 +442,26 @@ unsigned long hypercall_create_continuation( | |
{ | |
arg = next_arg(p, args); | |
- switch ( i ) | |
+ switch (i) | |
{ | |
- case 0: regs->x0 = arg; break; | |
- case 1: regs->x1 = arg; break; | |
- case 2: regs->x2 = arg; break; | |
- case 3: regs->x3 = arg; break; | |
- case 4: regs->x4 = arg; break; | |
- case 5: regs->x5 = arg; break; | |
+ case 0: | |
+ regs->x0 = arg; | |
+ break; | |
+ case 1: | |
+ regs->x1 = arg; | |
+ break; | |
+ case 2: | |
+ regs->x2 = arg; | |
+ break; | |
+ case 3: | |
+ regs->x3 = arg; | |
+ break; | |
+ case 4: | |
+ regs->x4 = arg; | |
+ break; | |
+ case 5: | |
+ regs->x5 = arg; | |
+ break; | |
} | |
} | |
@@ -456,14 +477,26 @@ unsigned long hypercall_create_continuation( | |
{ | |
arg = next_arg(p, args); | |
- switch ( i ) | |
+ switch (i) | |
{ | |
- case 0: regs->r0 = arg; break; | |
- case 1: regs->r1 = arg; break; | |
- case 2: regs->r2 = arg; break; | |
- case 3: regs->r3 = arg; break; | |
- case 4: regs->r4 = arg; break; | |
- case 5: regs->r5 = arg; break; | |
+ case 0: | |
+ regs->r0 = arg; | |
+ break; | |
+ case 1: | |
+ regs->r1 = arg; | |
+ break; | |
+ case 2: | |
+ regs->r2 = arg; | |
+ break; | |
+ case 3: | |
+ regs->r3 = arg; | |
+ break; | |
+ case 4: | |
+ regs->r4 = arg; | |
+ break; | |
+ case 5: | |
+ regs->r5 = arg; | |
+ break; | |
} | |
} | |
@@ -509,7 +542,6 @@ void free_domain_struct(struct domain *d) | |
void dump_pageframe_info(struct domain *d) | |
{ | |
- | |
} | |
/* | |
@@ -517,9 +549,9 @@ void dump_pageframe_info(struct domain *d) | |
* page on ARM64. Cowardly increase the limit in this case. | |
*/ | |
#if defined(CONFIG_NEW_VGIC) && defined(CONFIG_ARM_64) | |
-#define MAX_PAGES_PER_VCPU 2 | |
+#define MAX_PAGES_PER_VCPU 2 | |
#else | |
-#define MAX_PAGES_PER_VCPU 1 | |
+#define MAX_PAGES_PER_VCPU 1 | |
#endif | |
struct vcpu *alloc_vcpu_struct(const struct domain *d) | |
@@ -548,15 +580,15 @@ int arch_vcpu_create(struct vcpu *v) | |
{ | |
int rc = 0; | |
- BUILD_BUG_ON( sizeof(struct cpu_info) > STACK_SIZE ); | |
+ BUILD_BUG_ON(sizeof(struct cpu_info) > STACK_SIZE); | |
- v->arch.stack = alloc_xenheap_pages(STACK_ORDER, MEMF_node(vcpu_to_node(v))); | |
+ v->arch.stack = | |
+ alloc_xenheap_pages(STACK_ORDER, MEMF_node(vcpu_to_node(v))); | |
if ( v->arch.stack == NULL ) | |
return -ENOMEM; | |
- v->arch.cpu_info = (struct cpu_info *)(v->arch.stack | |
- + STACK_SIZE | |
- - sizeof(struct cpu_info)); | |
+ v->arch.cpu_info = (struct cpu_info *)(v->arch.stack + STACK_SIZE - | |
+ sizeof(struct cpu_info)); | |
memset(v->arch.cpu_info, 0, sizeof(*v->arch.cpu_info)); | |
v->arch.saved_context.sp = (register_t)v->arch.cpu_info; | |
@@ -617,7 +649,7 @@ int arch_sanitise_domain_config(struct xen_domctl_createdomain *config) | |
/* Fill in the native GIC version, passed back to the toolstack. */ | |
if ( config->arch.gic_version == XEN_DOMCTL_CONFIG_GIC_NATIVE ) | |
{ | |
- switch ( gic_hw_version() ) | |
+ switch (gic_hw_version()) | |
{ | |
case GIC_V2: | |
config->arch.gic_version = XEN_DOMCTL_CONFIG_GIC_V2; | |
@@ -687,7 +719,7 @@ int arch_domain_create(struct domain *d, | |
clear_page(d->shared_info); | |
share_xen_page_with_guest(virt_to_page(d->shared_info), d, SHARE_rw); | |
- switch ( config->arch.gic_version ) | |
+ switch (config->arch.gic_version) | |
{ | |
case XEN_DOMCTL_CONFIG_GIC_V2: | |
d->arch.vgic.version = GIC_V2; | |
@@ -815,7 +847,6 @@ static int is_guest_pv32_psr(uint32_t psr) | |
} | |
} | |
- | |
#ifdef CONFIG_ARM_64 | |
static int is_guest_pv64_psr(uint32_t psr) | |
{ | |
@@ -843,8 +874,7 @@ static int is_guest_pv64_psr(uint32_t psr) | |
* toolstack (XEN_DOMCTL_setvcpucontext) or the guest | |
* (VCPUOP_initialise) and therefore must be properly validated. | |
*/ | |
-int arch_set_info_guest( | |
- struct vcpu *v, vcpu_guest_context_u c) | |
+int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c) | |
{ | |
struct vcpu_guest_context *ctxt = c.nat; | |
struct vcpu_guest_core_regs *regs = &c.nat->user_regs; | |
@@ -907,12 +937,12 @@ int arch_vcpu_reset(struct vcpu *v) | |
static int relinquish_memory(struct domain *d, struct page_list_head *list) | |
{ | |
struct page_info *page, *tmp; | |
- int ret = 0; | |
+ int ret = 0; | |
/* Use a recursive lock, as we may enter 'free_domheap_page'. */ | |
spin_lock_recursive(&d->page_alloc_lock); | |
- page_list_for_each_safe( page, tmp, list ) | |
+ page_list_for_each_safe(page, tmp, list) | |
{ | |
/* Grab a reference to the page so it won't disappear from under us. */ | |
if ( unlikely(!get_page(page, d)) ) | |
@@ -936,7 +966,7 @@ static int relinquish_memory(struct domain *d, struct page_list_head *list) | |
} | |
} | |
- out: | |
+out: | |
spin_unlock_recursive(&d->page_alloc_lock); | |
return ret; | |
} | |
@@ -945,7 +975,7 @@ int domain_relinquish_resources(struct domain *d) | |
{ | |
int ret = 0; | |
- switch ( d->arch.relmem ) | |
+ switch (d->arch.relmem) | |
{ | |
case RELMEM_not_started: | |
ret = iommu_release_dt_devices(d); | |
@@ -963,7 +993,7 @@ int domain_relinquish_resources(struct domain *d) | |
case RELMEM_tee: | |
ret = tee_relinquish_resources(d); | |
- if (ret ) | |
+ if ( ret ) | |
return ret; | |
d->arch.relmem = RELMEM_xen; | |
@@ -1008,16 +1038,16 @@ void arch_dump_domain_info(struct domain *d) | |
p2m_dump_info(d); | |
} | |
- | |
-long do_arm_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) | |
+long do_arm_vcpu_op(int cmd, unsigned int vcpuid, | |
+ XEN_GUEST_HANDLE_PARAM(void) arg) | |
{ | |
- switch ( cmd ) | |
+ switch (cmd) | |
{ | |
- case VCPUOP_register_vcpu_info: | |
- case VCPUOP_register_runstate_memory_area: | |
- return do_vcpu_op(cmd, vcpuid, arg); | |
- default: | |
- return -EINVAL; | |
+ case VCPUOP_register_vcpu_info: | |
+ case VCPUOP_register_runstate_memory_area: | |
+ return do_vcpu_op(cmd, vcpuid, arg); | |
+ default: | |
+ return -EINVAL; | |
} | |
} | |
@@ -1034,8 +1064,8 @@ void arch_dump_vcpu_info(struct vcpu *v) | |
void vcpu_mark_events_pending(struct vcpu *v) | |
{ | |
- bool already_pending = guest_test_and_set_bit(v->domain, | |
- 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending)); | |
+ bool already_pending = guest_test_and_set_bit( | |
+ v->domain, 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending)); | |
if ( already_pending ) | |
return; | |
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c | |
index 4c8404155a..966490936b 100644 | |
--- a/xen/arch/arm/domain_build.c | |
+++ b/xen/arch/arm/domain_build.c | |
@@ -56,9 +56,11 @@ struct map_range_data | |
//#define DEBUG_11_ALLOCATION | |
#ifdef DEBUG_11_ALLOCATION | |
-# define D11PRINT(fmt, args...) printk(XENLOG_DEBUG fmt, ##args) | |
+#define D11PRINT(fmt, args...) printk(XENLOG_DEBUG fmt, ##args) | |
#else | |
-# define D11PRINT(fmt, args...) do {} while ( 0 ) | |
+#define D11PRINT(fmt, args...) \ | |
+ do { \ | |
+ } while ( 0 ) | |
#endif | |
/* | |
@@ -101,10 +103,8 @@ static unsigned int __init get_allocation_size(paddr_t size) | |
* Returns false if the memory would be below bank 0 or we have run | |
* out of banks. In this case it will free the pages. | |
*/ | |
-static bool __init insert_11_bank(struct domain *d, | |
- struct kernel_info *kinfo, | |
- struct page_info *pg, | |
- unsigned int order) | |
+static bool __init insert_11_bank(struct domain *d, struct kernel_info *kinfo, | |
+ struct page_info *pg, unsigned int order) | |
{ | |
int res, i; | |
mfn_t smfn; | |
@@ -114,15 +114,13 @@ static bool __init insert_11_bank(struct domain *d, | |
start = mfn_to_maddr(smfn); | |
size = pfn_to_paddr(1UL << order); | |
- D11PRINT("Allocated %#"PRIpaddr"-%#"PRIpaddr" (%ldMB/%ldMB, order %d)\n", | |
- start, start + size, | |
- 1UL << (order + PAGE_SHIFT - 20), | |
+ D11PRINT("Allocated %#" PRIpaddr "-%#" PRIpaddr | |
+ " (%ldMB/%ldMB, order %d)\n", | |
+ start, start + size, 1UL << (order + PAGE_SHIFT - 20), | |
/* Don't want format this as PRIpaddr (16 digit hex) */ | |
- (unsigned long)(kinfo->unassigned_mem >> 20), | |
- order); | |
+ (unsigned long)(kinfo->unassigned_mem >> 20), order); | |
- if ( kinfo->mem.nr_banks > 0 && | |
- size < MB(128) && | |
+ if ( kinfo->mem.nr_banks > 0 && size < MB(128) && | |
start + size < kinfo->mem.bank[0].start ) | |
{ | |
D11PRINT("Allocation below bank 0 is too small, not using\n"); | |
@@ -143,12 +141,12 @@ static bool __init insert_11_bank(struct domain *d, | |
return true; | |
} | |
- for( i = 0; i < kinfo->mem.nr_banks; i++ ) | |
+ for ( i = 0; i < kinfo->mem.nr_banks; i++ ) | |
{ | |
struct membank *bank = &kinfo->mem.bank[i]; | |
/* If possible merge new memory into the start of the bank */ | |
- if ( bank->start == start+size ) | |
+ if ( bank->start == start + size ) | |
{ | |
bank->start = start; | |
bank->size += size; | |
@@ -170,8 +168,7 @@ static bool __init insert_11_bank(struct domain *d, | |
*/ | |
if ( start + size < bank->start && kinfo->mem.nr_banks < NR_MEM_BANKS ) | |
{ | |
- memmove(bank + 1, bank, | |
- sizeof(*bank) * (kinfo->mem.nr_banks - i)); | |
+ memmove(bank + 1, bank, sizeof(*bank) * (kinfo->mem.nr_banks - i)); | |
kinfo->mem.nr_banks++; | |
bank->start = start; | |
bank->size = size; | |
@@ -280,7 +277,7 @@ static void __init allocate_memory_11(struct domain *d, | |
*/ | |
while ( order >= min_low_order ) | |
{ | |
- for ( bits = order ; bits <= (lowmem ? 32 : PADDR_BITS); bits++ ) | |
+ for ( bits = order; bits <= (lowmem ? 32 : PADDR_BITS); bits++ ) | |
{ | |
pg = alloc_domheap_pages(d, order, MEMF_bits(bits)); | |
if ( pg != NULL ) | |
@@ -302,7 +299,7 @@ static void __init allocate_memory_11(struct domain *d, | |
printk(XENLOG_INFO "No bank has been allocated below 4GB.\n"); | |
lowmem = false; | |
- got_bank0: | |
+got_bank0: | |
/* | |
* If we failed to allocate bank0 under 4GB, continue allocating | |
@@ -314,9 +311,9 @@ static void __init allocate_memory_11(struct domain *d, | |
pg = alloc_domheap_pages(d, order, lowmem ? MEMF_bits(32) : 0); | |
if ( !pg ) | |
{ | |
- order --; | |
+ order--; | |
- if ( lowmem && order < min_low_order) | |
+ if ( lowmem && order < min_low_order ) | |
{ | |
D11PRINT("Failed at min_low_order, allow high allocations\n"); | |
order = get_allocation_size(kinfo->unassigned_mem); | |
@@ -363,10 +360,9 @@ static void __init allocate_memory_11(struct domain *d, | |
" %ldMB unallocated\n", | |
(unsigned long)kinfo->unassigned_mem >> 20); | |
- for( i = 0; i < kinfo->mem.nr_banks; i++ ) | |
+ for ( i = 0; i < kinfo->mem.nr_banks; i++ ) | |
{ | |
- printk("BANK[%d] %#"PRIpaddr"-%#"PRIpaddr" (%ldMB)\n", | |
- i, | |
+ printk("BANK[%d] %#" PRIpaddr "-%#" PRIpaddr " (%ldMB)\n", i, | |
kinfo->mem.bank[i].start, | |
kinfo->mem.bank[i].start + kinfo->mem.bank[i].size, | |
/* Don't want format this as PRIpaddr (16 digit hex) */ | |
@@ -375,9 +371,8 @@ static void __init allocate_memory_11(struct domain *d, | |
} | |
static bool __init allocate_bank_memory(struct domain *d, | |
- struct kernel_info *kinfo, | |
- gfn_t sgfn, | |
- unsigned long tot_size) | |
+ struct kernel_info *kinfo, gfn_t sgfn, | |
+ unsigned long tot_size) | |
{ | |
int res; | |
struct page_info *pg; | |
@@ -453,12 +448,11 @@ static void __init allocate_memory(struct domain *d, struct kernel_info *kinfo) | |
if ( kinfo->unassigned_mem ) | |
goto fail; | |
- for( i = 0; i < kinfo->mem.nr_banks; i++ ) | |
+ for ( i = 0; i < kinfo->mem.nr_banks; i++ ) | |
{ | |
- printk(XENLOG_INFO "%pd BANK[%d] %#"PRIpaddr"-%#"PRIpaddr" (%ldMB)\n", | |
- d, | |
- i, | |
- kinfo->mem.bank[i].start, | |
+ printk(XENLOG_INFO "%pd BANK[%d] %#" PRIpaddr "-%#" PRIpaddr | |
+ " (%ldMB)\n", | |
+ d, i, kinfo->mem.bank[i].start, | |
kinfo->mem.bank[i].start + kinfo->mem.bank[i].size, | |
/* Don't want format this as PRIpaddr (16 digit hex) */ | |
(unsigned long)(kinfo->mem.bank[i].size >> 20)); | |
@@ -484,7 +478,7 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, | |
if ( kinfo->cmdline && kinfo->cmdline[0] ) | |
bootargs = &kinfo->cmdline[0]; | |
- dt_for_each_property_node (node, prop) | |
+ dt_for_each_property_node(node, prop) | |
{ | |
const void *prop_data = prop->value; | |
u32 prop_len = prop->length; | |
@@ -511,8 +505,9 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, | |
dt_property_name_is_equal(prop, "linux,uefi-system-table") || | |
dt_property_name_is_equal(prop, "linux,uefi-mmap-start") || | |
dt_property_name_is_equal(prop, "linux,uefi-mmap-size") || | |
- dt_property_name_is_equal(prop, "linux,uefi-mmap-desc-size") || | |
- dt_property_name_is_equal(prop, "linux,uefi-mmap-desc-ver")) | |
+ dt_property_name_is_equal(prop, | |
+ "linux,uefi-mmap-desc-size") || | |
+ dt_property_name_is_equal(prop, "linux,uefi-mmap-desc-ver") ) | |
continue; | |
if ( dt_property_name_is_equal(prop, "xen,dom0-bootargs") ) | |
@@ -523,7 +518,7 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, | |
} | |
if ( dt_property_name_is_equal(prop, "bootargs") ) | |
{ | |
- if ( !bootargs && !had_dom0_bootargs ) | |
+ if ( !bootargs && !had_dom0_bootargs ) | |
bootargs = prop->value; | |
continue; | |
} | |
@@ -553,8 +548,8 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, | |
if ( dt_device_for_passthrough(node) ) | |
res = fdt_property_string(kinfo->fdt, "status", "disabled"); | |
else if ( status ) | |
- res = fdt_property(kinfo->fdt, "status", status->value, | |
- status->length); | |
+ res = | |
+ fdt_property(kinfo->fdt, "status", status->value, status->length); | |
if ( res ) | |
return res; | |
@@ -578,7 +573,8 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, | |
if ( initrd && initrd->size ) | |
{ | |
u64 a = 0; | |
- res = fdt_property(kinfo->fdt, "linux,initrd-start", &a, sizeof(a)); | |
+ res = | |
+ fdt_property(kinfo->fdt, "linux,initrd-start", &a, sizeof(a)); | |
if ( res ) | |
return res; | |
@@ -598,18 +594,17 @@ static int __init write_properties(struct domain *d, struct kernel_info *kinfo, | |
typedef __be32 gic_interrupt_t[3]; | |
-static void __init set_interrupt(gic_interrupt_t interrupt, | |
- unsigned int irq, | |
- unsigned int cpumask, | |
- unsigned int level) | |
+static void __init set_interrupt(gic_interrupt_t interrupt, unsigned int irq, | |
+ unsigned int cpumask, unsigned int level) | |
{ | |
__be32 *cells = interrupt; | |
bool is_ppi = !!(irq < 32); | |
BUG_ON(irq < 16); | |
- irq -= (is_ppi) ? 16: 32; /* PPIs start at 16, SPIs at 32 */ | |
+ irq -= (is_ppi) ? 16 : 32; /* PPIs start at 16, SPIs at 32 */ | |
- /* See linux Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt */ | |
+ /* See linux | |
+ * Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt */ | |
dt_set_cell(&cells, 1, is_ppi); /* is a PPI? */ | |
dt_set_cell(&cells, 1, irq); | |
dt_set_cell(&cells, 1, (cpumask << 8) | level); | |
@@ -626,7 +621,7 @@ static int __init fdt_property_interrupts(void *fdt, gic_interrupt_t *intr, | |
{ | |
int res; | |
- res = fdt_property(fdt, "interrupts", intr, sizeof (intr[0]) * num_irq); | |
+ res = fdt_property(fdt, "interrupts", intr, sizeof(intr[0]) * num_irq); | |
if ( res ) | |
return res; | |
@@ -636,21 +631,20 @@ static int __init fdt_property_interrupts(void *fdt, gic_interrupt_t *intr, | |
return res; | |
} | |
-static int __init make_memory_node(const struct domain *d, | |
- void *fdt, | |
+static int __init make_memory_node(const struct domain *d, void *fdt, | |
int addrcells, int sizecells, | |
const struct kernel_info *kinfo) | |
{ | |
int res, i; | |
int reg_size = addrcells + sizecells; | |
- int nr_cells = reg_size*kinfo->mem.nr_banks; | |
+ int nr_cells = reg_size * kinfo->mem.nr_banks; | |
__be32 reg[NR_MEM_BANKS * 4 /* Worst case addrcells + sizecells */]; | |
__be32 *cells; | |
BUG_ON(nr_cells >= ARRAY_SIZE(reg)); | |
- dt_dprintk("Create memory node (reg size %d, nr cells %d)\n", | |
- reg_size, nr_cells); | |
+ dt_dprintk("Create memory node (reg size %d, nr cells %d)\n", reg_size, | |
+ nr_cells); | |
/* ePAPR 3.4 */ | |
res = fdt_begin_node(fdt, "memory"); | |
@@ -662,13 +656,13 @@ static int __init make_memory_node(const struct domain *d, | |
return res; | |
cells = ®[0]; | |
- for ( i = 0 ; i < kinfo->mem.nr_banks; i++ ) | |
+ for ( i = 0; i < kinfo->mem.nr_banks; i++ ) | |
{ | |
u64 start = kinfo->mem.bank[i].start; | |
u64 size = kinfo->mem.bank[i].size; | |
- dt_dprintk(" Bank %d: %#"PRIx64"->%#"PRIx64"\n", | |
- i, start, start + size); | |
+ dt_dprintk(" Bank %d: %#" PRIx64 "->%#" PRIx64 "\n", i, start, | |
+ start + size); | |
dt_child_set_range(&cells, addrcells, sizecells, start, size); | |
} | |
@@ -686,9 +680,9 @@ static int __init make_hypervisor_node(struct domain *d, | |
const struct kernel_info *kinfo, | |
int addrcells, int sizecells) | |
{ | |
- const char compat[] = | |
- "xen,xen-"__stringify(XEN_VERSION)"."__stringify(XEN_SUBVERSION)"\0" | |
- "xen,xen"; | |
+ const char compat[] = "xen,xen-" __stringify(XEN_VERSION) "." __stringify( | |
+ XEN_SUBVERSION) "\0" | |
+ "xen,xen"; | |
__be32 reg[4]; | |
gic_interrupt_t intr; | |
__be32 *cells; | |
@@ -701,8 +695,8 @@ static int __init make_hypervisor_node(struct domain *d, | |
* Sanity-check address sizes, since addresses and sizes which do | |
* not take up exactly 4 or 8 bytes are not supported. | |
*/ | |
- if ((addrcells != 1 && addrcells != 2) || | |
- (sizecells != 1 && sizecells != 2)) | |
+ if ( (addrcells != 1 && addrcells != 2) || | |
+ (sizecells != 1 && sizecells != 2) ) | |
panic("Cannot cope with this size\n"); | |
/* See linux Documentation/devicetree/bindings/arm/xen.txt */ | |
@@ -717,10 +711,10 @@ static int __init make_hypervisor_node(struct domain *d, | |
/* reg 0 is grant table space */ | |
cells = ®[0]; | |
- dt_child_set_range(&cells, addrcells, sizecells, | |
- kinfo->gnttab_start, kinfo->gnttab_size); | |
- res = fdt_property(fdt, "reg", reg, | |
- dt_cells_to_size(addrcells + sizecells)); | |
+ dt_child_set_range(&cells, addrcells, sizecells, kinfo->gnttab_start, | |
+ kinfo->gnttab_size); | |
+ res = | |
+ fdt_property(fdt, "reg", reg, dt_cells_to_size(addrcells + sizecells)); | |
if ( res ) | |
return res; | |
@@ -745,10 +739,11 @@ static int __init make_hypervisor_node(struct domain *d, | |
static int __init make_psci_node(void *fdt) | |
{ | |
int res; | |
- const char compat[] = | |
- "arm,psci-1.0""\0" | |
- "arm,psci-0.2""\0" | |
- "arm,psci"; | |
+ const char compat[] = "arm,psci-1.0" | |
+ "\0" | |
+ "arm,psci-0.2" | |
+ "\0" | |
+ "arm,psci"; | |
dt_dprintk("Create PSCI node\n"); | |
@@ -812,7 +807,7 @@ static int __init make_cpus_node(const struct domain *d, void *fdt) | |
{ | |
compatible = dt_get_property(npcpu, "compatible", &len); | |
clock_valid = dt_property_read_u32(npcpu, "clock-frequency", | |
- &clock_frequency); | |
+ &clock_frequency); | |
break; | |
} | |
} | |
@@ -847,10 +842,10 @@ static int __init make_cpus_node(const struct domain *d, void *fdt) | |
* is enough for the current max vcpu number. | |
*/ | |
mpidr_aff = vcpuid_to_vaffinity(cpu); | |
- dt_dprintk("Create cpu@%"PRIx64" (logical CPUID: %d) node\n", | |
+ dt_dprintk("Create cpu@%" PRIx64 " (logical CPUID: %d) node\n", | |
mpidr_aff, cpu); | |
- snprintf(buf, sizeof(buf), "cpu@%"PRIx64, mpidr_aff); | |
+ snprintf(buf, sizeof(buf), "cpu@%" PRIx64, mpidr_aff); | |
res = fdt_begin_node(fdt, buf); | |
if ( res ) | |
return res; | |
@@ -962,11 +957,10 @@ static int __init make_gic_node(const struct domain *d, void *fdt, | |
static int __init make_timer_node(const struct domain *d, void *fdt) | |
{ | |
- static const struct dt_device_match timer_ids[] __initconst = | |
- { | |
+ static const struct dt_device_match timer_ids[] __initconst = { | |
DT_MATCH_COMPATIBLE("arm,armv7-timer"), | |
DT_MATCH_COMPATIBLE("arm,armv8-timer"), | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
struct dt_device_node *dev; | |
u32 len; | |
@@ -1020,8 +1014,8 @@ static int __init make_timer_node(const struct domain *d, void *fdt) | |
if ( res ) | |
return res; | |
- clock_valid = dt_property_read_u32(dev, "clock-frequency", | |
- &clock_frequency); | |
+ clock_valid = | |
+ dt_property_read_u32(dev, "clock-frequency", &clock_frequency); | |
if ( clock_valid ) | |
{ | |
res = fdt_property_cell(fdt, "clock-frequency", clock_frequency); | |
@@ -1056,7 +1050,7 @@ int __init make_chosen_node(const struct kernel_info *kinfo) | |
bootargs = &kinfo->cmdline[0]; | |
res = fdt_property(fdt, "bootargs", bootargs, strlen(bootargs) + 1); | |
if ( res ) | |
- return res; | |
+ return res; | |
} | |
/* | |
@@ -1105,8 +1099,8 @@ int __init map_irq_to_domain(struct domain *d, unsigned int irq, | |
res = route_irq_to_guest(d, irq, irq, devname); | |
if ( res < 0 ) | |
{ | |
- printk(XENLOG_ERR "Unable to map IRQ%"PRId32" to dom%d\n", | |
- irq, d->domain_id); | |
+ printk(XENLOG_ERR "Unable to map IRQ%" PRId32 " to dom%d\n", irq, | |
+ d->domain_id); | |
return res; | |
} | |
} | |
@@ -1116,8 +1110,7 @@ int __init map_irq_to_domain(struct domain *d, unsigned int irq, | |
} | |
static int __init map_dt_irq_to_domain(const struct dt_device_node *dev, | |
- const struct dt_irq *dt_irq, | |
- void *data) | |
+ const struct dt_irq *dt_irq, void *data) | |
{ | |
struct domain *d = data; | |
unsigned int irq = dt_irq->irq; | |
@@ -1126,7 +1119,7 @@ static int __init map_dt_irq_to_domain(const struct dt_device_node *dev, | |
if ( irq < NR_LOCAL_IRQS ) | |
{ | |
- printk(XENLOG_ERR "%s: IRQ%"PRId32" is not a SPI\n", | |
+ printk(XENLOG_ERR "%s: IRQ%" PRId32 " is not a SPI\n", | |
dt_node_name(dev), irq); | |
return -EINVAL; | |
} | |
@@ -1135,8 +1128,7 @@ static int __init map_dt_irq_to_domain(const struct dt_device_node *dev, | |
res = irq_set_spi_type(irq, dt_irq->type); | |
if ( res ) | |
{ | |
- printk(XENLOG_ERR | |
- "%s: Unable to setup IRQ%"PRId32" to dom%d\n", | |
+ printk(XENLOG_ERR "%s: Unable to setup IRQ%" PRId32 " to dom%d\n", | |
dt_node_name(dev), irq, d->domain_id); | |
return res; | |
} | |
@@ -1147,8 +1139,7 @@ static int __init map_dt_irq_to_domain(const struct dt_device_node *dev, | |
} | |
static int __init map_range_to_domain(const struct dt_device_node *dev, | |
- u64 addr, u64 len, | |
- void *data) | |
+ u64 addr, u64 len, void *data) | |
{ | |
struct map_range_data *mr_data = data; | |
struct domain *d = mr_data->d; | |
@@ -1160,32 +1151,27 @@ static int __init map_range_to_domain(const struct dt_device_node *dev, | |
if ( res ) | |
{ | |
printk(XENLOG_ERR "Unable to permit to dom%d access to" | |
- " 0x%"PRIx64" - 0x%"PRIx64"\n", | |
- d->domain_id, | |
- addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1); | |
+ " 0x%" PRIx64 " - 0x%" PRIx64 "\n", | |
+ d->domain_id, addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1); | |
return res; | |
} | |
if ( need_mapping ) | |
{ | |
- res = map_regions_p2mt(d, | |
- gaddr_to_gfn(addr), | |
- PFN_UP(len), | |
- maddr_to_mfn(addr), | |
- mr_data->p2mt); | |
+ res = map_regions_p2mt(d, gaddr_to_gfn(addr), PFN_UP(len), | |
+ maddr_to_mfn(addr), mr_data->p2mt); | |
if ( res < 0 ) | |
{ | |
- printk(XENLOG_ERR "Unable to map 0x%"PRIx64 | |
- " - 0x%"PRIx64" in domain %d\n", | |
- addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1, | |
- d->domain_id); | |
+ printk(XENLOG_ERR "Unable to map 0x%" PRIx64 " - 0x%" PRIx64 | |
+ " in domain %d\n", | |
+ addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1, d->domain_id); | |
return res; | |
} | |
} | |
- dt_dprintk(" - MMIO: %010"PRIx64" - %010"PRIx64" P2MType=%x\n", | |
- addr, addr + len, mr_data->p2mt); | |
+ dt_dprintk(" - MMIO: %010" PRIx64 " - %010" PRIx64 " P2MType=%x\n", addr, | |
+ addr + len, mr_data->p2mt); | |
return 0; | |
} | |
@@ -1199,7 +1185,7 @@ static int __init map_device_children(struct domain *d, | |
const struct dt_device_node *dev, | |
p2m_type_t p2mt) | |
{ | |
- struct map_range_data mr_data = { .d = d, .p2mt = p2mt }; | |
+ struct map_range_data mr_data = {.d = d, .p2mt = p2mt}; | |
int ret; | |
if ( dt_device_type_is_equal(dev, "pci") ) | |
@@ -1262,8 +1248,8 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev, | |
res = dt_device_get_raw_irq(dev, i, &rirq); | |
if ( res ) | |
{ | |
- printk(XENLOG_ERR "Unable to retrieve irq %u for %s\n", | |
- i, dt_node_full_name(dev)); | |
+ printk(XENLOG_ERR "Unable to retrieve irq %u for %s\n", i, | |
+ dt_node_full_name(dev)); | |
return res; | |
} | |
@@ -1273,16 +1259,17 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev, | |
*/ | |
if ( rirq.controller != dt_interrupt_controller ) | |
{ | |
- dt_dprintk("irq %u not connected to primary controller. Connected to %s\n", | |
- i, dt_node_full_name(rirq.controller)); | |
+ dt_dprintk( | |
+ "irq %u not connected to primary controller. Connected to %s\n", | |
+ i, dt_node_full_name(rirq.controller)); | |
continue; | |
} | |
res = platform_get_irq(dev, i); | |
if ( res < 0 ) | |
{ | |
- printk(XENLOG_ERR "Unable to get irq %u for %s\n", | |
- i, dt_node_full_name(dev)); | |
+ printk(XENLOG_ERR "Unable to get irq %u for %s\n", i, | |
+ dt_node_full_name(dev)); | |
return res; | |
} | |
@@ -1294,12 +1281,12 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev, | |
/* Give permission and map MMIOs */ | |
for ( i = 0; i < naddr; i++ ) | |
{ | |
- struct map_range_data mr_data = { .d = d, .p2mt = p2mt }; | |
+ struct map_range_data mr_data = {.d = d, .p2mt = p2mt}; | |
res = dt_device_get_address(dev, i, &addr, &size); | |
if ( res ) | |
{ | |
- printk(XENLOG_ERR "Unable to retrieve address %u for %s\n", | |
- i, dt_node_full_name(dev)); | |
+ printk(XENLOG_ERR "Unable to retrieve address %u for %s\n", i, | |
+ dt_node_full_name(dev)); | |
return res; | |
} | |
@@ -1316,11 +1303,9 @@ static int __init handle_device(struct domain *d, struct dt_device_node *dev, | |
} | |
static int __init handle_node(struct domain *d, struct kernel_info *kinfo, | |
- struct dt_device_node *node, | |
- p2m_type_t p2mt) | |
+ struct dt_device_node *node, p2m_type_t p2mt) | |
{ | |
- static const struct dt_device_match skip_matches[] __initconst = | |
- { | |
+ static const struct dt_device_match skip_matches[] __initconst = { | |
DT_MATCH_COMPATIBLE("xen,xen"), | |
DT_MATCH_COMPATIBLE("xen,multiboot-module"), | |
DT_MATCH_COMPATIBLE("multiboot,module"), | |
@@ -1335,19 +1320,17 @@ static int __init handle_node(struct domain *d, struct kernel_info *kinfo, | |
DT_MATCH_TYPE("memory"), | |
/* The memory mapped timer is not supported by Xen. */ | |
DT_MATCH_COMPATIBLE("arm,armv7-timer-mem"), | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
- static const struct dt_device_match timer_matches[] __initconst = | |
- { | |
+ static const struct dt_device_match timer_matches[] __initconst = { | |
DT_MATCH_TIMER, | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
- static const struct dt_device_match reserved_matches[] __initconst = | |
- { | |
+ static const struct dt_device_match reserved_matches[] __initconst = { | |
DT_MATCH_PATH("/psci"), | |
DT_MATCH_PATH("/memory"), | |
DT_MATCH_PATH("/hypervisor"), | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
struct dt_device_node *child; | |
int res, i, nirq, irq_id; | |
@@ -1402,7 +1385,7 @@ static int __init handle_node(struct domain *d, struct kernel_info *kinfo, | |
*/ | |
nirq = dt_number_of_irq(node); | |
- for ( i = 0 ; i < nirq ; i++ ) | |
+ for ( i = 0; i < nirq; i++ ) | |
{ | |
irq_id = platform_get_irq(node, i); | |
@@ -1419,12 +1402,13 @@ static int __init handle_node(struct domain *d, struct kernel_info *kinfo, | |
* already exists with the same path. | |
*/ | |
if ( dt_match_node(reserved_matches, node) ) | |
- printk(XENLOG_WARNING | |
- "WARNING: Path %s is reserved, skip the node as we may re-use the path.\n", | |
- path); | |
+ printk( | |
+ XENLOG_WARNING | |
+ "WARNING: Path %s is reserved, skip the node as we may re-use the path.\n", | |
+ path); | |
res = handle_device(d, node, p2mt); | |
- if ( res) | |
+ if ( res ) | |
return res; | |
/* | |
@@ -1480,7 +1464,6 @@ static int __init handle_node(struct domain *d, struct kernel_info *kinfo, | |
res = make_memory_node(d, kinfo->fdt, addrcells, sizecells, kinfo); | |
if ( res ) | |
return res; | |
- | |
} | |
res = fdt_end_node(kinfo->fdt); | |
@@ -1494,7 +1477,8 @@ static int __init make_gicv2_domU_node(const struct domain *d, void *fdt) | |
__be32 reg[(GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) * 2]; | |
__be32 *cells; | |
- res = fdt_begin_node(fdt, "interrupt-controller@"__stringify(GUEST_GICD_BASE)); | |
+ res = fdt_begin_node(fdt, | |
+ "interrupt-controller@" __stringify(GUEST_GICD_BASE)); | |
if ( res ) | |
return res; | |
@@ -1521,15 +1505,15 @@ static int __init make_gicv2_domU_node(const struct domain *d, void *fdt) | |
GUEST_GICC_BASE, GUEST_GICC_SIZE); | |
res = fdt_property(fdt, "reg", reg, sizeof(reg)); | |
- if (res) | |
+ if ( res ) | |
return res; | |
res = fdt_property_cell(fdt, "linux,phandle", GUEST_PHANDLE_GIC); | |
- if (res) | |
+ if ( res ) | |
return res; | |
res = fdt_property_cell(fdt, "phandle", GUEST_PHANDLE_GIC); | |
- if (res) | |
+ if ( res ) | |
return res; | |
res = fdt_end_node(fdt); | |
@@ -1543,7 +1527,8 @@ static int __init make_gicv3_domU_node(const struct domain *d, void *fdt) | |
__be32 reg[(GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) * 2]; | |
__be32 *cells; | |
- res = fdt_begin_node(fdt, "interrupt-controller@"__stringify(GUEST_GICV3_GICD_BASE)); | |
+ res = fdt_begin_node( | |
+ fdt, "interrupt-controller@" __stringify(GUEST_GICV3_GICD_BASE)); | |
if ( res ) | |
return res; | |
@@ -1570,15 +1555,15 @@ static int __init make_gicv3_domU_node(const struct domain *d, void *fdt) | |
GUEST_GICV3_GICR0_BASE, GUEST_GICV3_GICR0_SIZE); | |
res = fdt_property(fdt, "reg", reg, sizeof(reg)); | |
- if (res) | |
+ if ( res ) | |
return res; | |
res = fdt_property_cell(fdt, "linux,phandle", GUEST_PHANDLE_GIC); | |
- if (res) | |
+ if ( res ) | |
return res; | |
res = fdt_property_cell(fdt, "phandle", GUEST_PHANDLE_GIC); | |
- if (res) | |
+ if ( res ) | |
return res; | |
res = fdt_end_node(fdt); | |
@@ -1588,7 +1573,7 @@ static int __init make_gicv3_domU_node(const struct domain *d, void *fdt) | |
static int __init make_gic_domU_node(const struct domain *d, void *fdt) | |
{ | |
- switch ( d->arch.vgic.version ) | |
+ switch (d->arch.vgic.version) | |
{ | |
case GIC_V3: | |
return make_gicv3_domU_node(d, fdt); | |
@@ -1621,17 +1606,18 @@ static int __init make_timer_domU_node(const struct domain *d, void *fdt) | |
return res; | |
} | |
- set_interrupt(intrs[0], GUEST_TIMER_PHYS_S_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); | |
- set_interrupt(intrs[1], GUEST_TIMER_PHYS_NS_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); | |
+ set_interrupt(intrs[0], GUEST_TIMER_PHYS_S_PPI, 0xf, | |
+ DT_IRQ_TYPE_LEVEL_LOW); | |
+ set_interrupt(intrs[1], GUEST_TIMER_PHYS_NS_PPI, 0xf, | |
+ DT_IRQ_TYPE_LEVEL_LOW); | |
set_interrupt(intrs[2], GUEST_TIMER_VIRT_PPI, 0xf, DT_IRQ_TYPE_LEVEL_LOW); | |
- res = fdt_property(fdt, "interrupts", intrs, sizeof (intrs[0]) * 3); | |
+ res = fdt_property(fdt, "interrupts", intrs, sizeof(intrs[0]) * 3); | |
if ( res ) | |
return res; | |
- res = fdt_property_cell(fdt, "interrupt-parent", | |
- GUEST_PHANDLE_GIC); | |
- if (res) | |
+ res = fdt_property_cell(fdt, "interrupt-parent", GUEST_PHANDLE_GIC); | |
+ if ( res ) | |
return res; | |
res = fdt_end_node(fdt); | |
@@ -1647,7 +1633,7 @@ static int __init make_vpl011_uart_node(const struct domain *d, void *fdt) | |
__be32 reg[GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS]; | |
__be32 *cells; | |
- res = fdt_begin_node(fdt, "sbsa-uart@"__stringify(GUEST_PL011_BASE)); | |
+ res = fdt_begin_node(fdt, "sbsa-uart@" __stringify(GUEST_PL011_BASE)); | |
if ( res ) | |
return res; | |
@@ -1656,9 +1642,8 @@ static int __init make_vpl011_uart_node(const struct domain *d, void *fdt) | |
return res; | |
cells = ®[0]; | |
- dt_child_set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, | |
- GUEST_ROOT_SIZE_CELLS, GUEST_PL011_BASE, | |
- GUEST_PL011_SIZE); | |
+ dt_child_set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, GUEST_ROOT_SIZE_CELLS, | |
+ GUEST_PL011_BASE, GUEST_PL011_SIZE); | |
if ( res ) | |
return res; | |
res = fdt_property(fdt, "reg", reg, sizeof(reg)); | |
@@ -1667,12 +1652,11 @@ static int __init make_vpl011_uart_node(const struct domain *d, void *fdt) | |
set_interrupt(intr, GUEST_VPL011_SPI, 0xf, DT_IRQ_TYPE_LEVEL_HIGH); | |
- res = fdt_property(fdt, "interrupts", intr, sizeof (intr)); | |
+ res = fdt_property(fdt, "interrupts", intr, sizeof(intr)); | |
if ( res ) | |
return res; | |
- res = fdt_property_cell(fdt, "interrupt-parent", | |
- GUEST_PHANDLE_GIC); | |
+ res = fdt_property_cell(fdt, "interrupt-parent", GUEST_PHANDLE_GIC); | |
if ( res ) | |
return res; | |
@@ -1768,14 +1752,15 @@ static int __init prepare_dtb_domU(struct domain *d, struct kernel_info *kinfo) | |
return 0; | |
- err: | |
+err: | |
printk("Device tree generation failed (%d).\n", ret); | |
xfree(kinfo->fdt); | |
return -EINVAL; | |
} | |
-static int __init prepare_dtb_hwdom(struct domain *d, struct kernel_info *kinfo) | |
+static int __init prepare_dtb_hwdom(struct domain *d, | |
+ struct kernel_info *kinfo) | |
{ | |
const p2m_type_t default_p2mt = p2m_mmio_direct_c; | |
const void *fdt; | |
@@ -1807,7 +1792,7 @@ static int __init prepare_dtb_hwdom(struct domain *d, struct kernel_info *kinfo) | |
return 0; | |
- err: | |
+err: | |
printk("Device tree generation failed (%d).\n", ret); | |
xfree(kinfo->fdt); | |
return -EINVAL; | |
@@ -1817,15 +1802,15 @@ static void __init dtb_load(struct kernel_info *kinfo) | |
{ | |
unsigned long left; | |
- printk("Loading dom0 DTB to 0x%"PRIpaddr"-0x%"PRIpaddr"\n", | |
+ printk("Loading dom0 DTB to 0x%" PRIpaddr "-0x%" PRIpaddr "\n", | |
kinfo->dtb_paddr, kinfo->dtb_paddr + fdt_totalsize(kinfo->fdt)); | |
- left = copy_to_guest_phys_flush_dcache(kinfo->d, kinfo->dtb_paddr, | |
- kinfo->fdt, | |
- fdt_totalsize(kinfo->fdt)); | |
+ left = copy_to_guest_phys_flush_dcache( | |
+ kinfo->d, kinfo->dtb_paddr, kinfo->fdt, fdt_totalsize(kinfo->fdt)); | |
if ( left != 0 ) | |
- panic("Unable to copy the DTB to dom0 memory (left = %lu bytes)\n", left); | |
+ panic("Unable to copy the DTB to dom0 memory (left = %lu bytes)\n", | |
+ left); | |
xfree(kinfo->fdt); | |
} | |
@@ -1846,7 +1831,8 @@ static void __init initrd_load(struct kernel_info *kinfo) | |
paddr = mod->start; | |
len = mod->size; | |
- printk("Loading dom0 initrd from %"PRIpaddr" to 0x%"PRIpaddr"-0x%"PRIpaddr"\n", | |
+ printk("Loading dom0 initrd from %" PRIpaddr " to 0x%" PRIpaddr | |
+ "-0x%" PRIpaddr "\n", | |
paddr, load_addr, load_addr + len); | |
/* Fix up linux,initrd-start and linux,initrd-end in /chosen */ | |
@@ -1856,15 +1842,15 @@ static void __init initrd_load(struct kernel_info *kinfo) | |
cellp = (__be32 *)val; | |
dt_set_cell(&cellp, ARRAY_SIZE(val), load_addr); | |
- res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-start", | |
- val, sizeof(val)); | |
+ res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-start", val, | |
+ sizeof(val)); | |
if ( res ) | |
panic("Cannot fix up \"linux,initrd-start\" property\n"); | |
cellp = (__be32 *)val; | |
dt_set_cell(&cellp, ARRAY_SIZE(val), load_addr + len); | |
- res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-end", | |
- val, sizeof(val)); | |
+ res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-end", val, | |
+ sizeof(val)); | |
if ( res ) | |
panic("Cannot fix up \"linux,initrd-end\" property\n"); | |
@@ -1872,8 +1858,7 @@ static void __init initrd_load(struct kernel_info *kinfo) | |
if ( !initrd ) | |
panic("Unable to map the hwdom initrd\n"); | |
- res = copy_to_guest_phys_flush_dcache(kinfo->d, load_addr, | |
- initrd, len); | |
+ res = copy_to_guest_phys_flush_dcache(kinfo->d, load_addr, initrd, len); | |
if ( res != 0 ) | |
panic("Unable to copy the initrd in the hwdom memory\n"); | |
} | |
@@ -1932,7 +1917,7 @@ static void __init find_gnttab_region(struct domain *d, | |
BUG_ON((kinfo->gnttab_start + kinfo->gnttab_size) > GB(4)); | |
#endif | |
- printk("Grant table range: %#"PRIpaddr"-%#"PRIpaddr"\n", | |
+ printk("Grant table range: %#" PRIpaddr "-%#" PRIpaddr "\n", | |
kinfo->gnttab_start, kinfo->gnttab_start + kinfo->gnttab_size); | |
} | |
@@ -1980,12 +1965,12 @@ static int __init construct_domain(struct domain *d, struct kernel_info *kinfo) | |
* Kernel startup entry point. | |
* --------------------------- | |
* | |
- * This is normally called from the decompressor code. The requirements | |
- * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, | |
- * r1 = machine nr, r2 = atags or dtb pointer. | |
+ * This is normally called from the decompressor code. The | |
+ *requirements are: MMU = off, D-cache = off, I-cache = dont care, r0 = | |
+ *0, r1 = machine nr, r2 = atags or dtb pointer. | |
*... | |
*/ | |
- regs->r0 = 0; /* SBZ */ | |
+ regs->r0 = 0; /* SBZ */ | |
regs->r1 = 0xffffffff; /* We use DTB therefore no machine id */ | |
regs->r2 = kinfo->dtb_paddr; | |
} | |
@@ -2037,7 +2022,8 @@ static int __init construct_domU(struct domain *d, | |
} | |
kinfo.unassigned_mem = (paddr_t)mem * SZ_1K; | |
- printk("*** LOADING DOMU cpus=%u memory=%"PRIx64"KB ***\n", d->max_vcpus, mem); | |
+ printk("*** LOADING DOMU cpus=%u memory=%" PRIx64 "KB ***\n", d->max_vcpus, | |
+ mem); | |
kinfo.vpl011 = dt_property_read_bool(node, "vpl011"); | |
@@ -2127,7 +2113,8 @@ int __init construct_dom0(struct domain *d) | |
if ( dom0_mem <= 0 ) | |
{ | |
- warning_add("PLEASE SPECIFY dom0_mem PARAMETER - USING 512M FOR NOW\n"); | |
+ warning_add( | |
+ "PLEASE SPECIFY dom0_mem PARAMETER - USING 512M FOR NOW\n"); | |
dom0_mem = MB(512); | |
} | |
diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c | |
index 9da88b8c64..aac06d937a 100644 | |
--- a/xen/arch/arm/domctl.c | |
+++ b/xen/arch/arm/domctl.c | |
@@ -22,7 +22,7 @@ void arch_get_domain_info(const struct domain *d, | |
info->flags |= XEN_DOMINF_hap; | |
} | |
-static int handle_vuart_init(struct domain *d, | |
+static int handle_vuart_init(struct domain *d, | |
struct xen_domctl_vuart_op *vuart_op) | |
{ | |
int rc; | |
@@ -48,7 +48,7 @@ static int handle_vuart_init(struct domain *d, | |
long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, | |
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) | |
{ | |
- switch ( domctl->cmd ) | |
+ switch (domctl->cmd) | |
{ | |
case XEN_DOMCTL_cacheflush: | |
{ | |
@@ -56,7 +56,7 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, | |
gfn_t e = gfn_add(s, domctl->u.cacheflush.nr_pfns); | |
int rc; | |
- if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) ) | |
+ if ( domctl->u.cacheflush.nr_pfns > (1U << MAX_ORDER) ) | |
return -EINVAL; | |
if ( gfn_x(e) < gfn_x(s) ) | |
@@ -161,7 +161,7 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, | |
if ( vuart_op->pad[i] ) | |
return -EINVAL; | |
- switch( vuart_op->cmd ) | |
+ switch (vuart_op->cmd) | |
{ | |
case XEN_DOMCTL_VUART_OP_INIT: | |
rc = handle_vuart_init(d, vuart_op); | |
diff --git a/xen/arch/arm/early_printk.c b/xen/arch/arm/early_printk.c | |
index 97466a12b1..113b5b4298 100644 | |
--- a/xen/arch/arm/early_printk.c | |
+++ b/xen/arch/arm/early_printk.c | |
@@ -19,8 +19,9 @@ void early_flush(void); | |
void early_puts(const char *s) | |
{ | |
- while (*s != '\0') { | |
- if (*s == '\n') | |
+ while ( *s != '\0' ) | |
+ { | |
+ if ( *s == '\n' ) | |
early_putch('\r'); | |
early_putch(*s); | |
s++; | |
diff --git a/xen/arch/arm/efi/efi-boot.h b/xen/arch/arm/efi/efi-boot.h | |
index ca655ff003..b9e8f67a02 100644 | |
--- a/xen/arch/arm/efi/efi-boot.h | |
+++ b/xen/arch/arm/efi/efi-boot.h | |
@@ -11,14 +11,20 @@ | |
void noreturn efi_xen_start(void *fdt_ptr, uint32_t fdt_size); | |
void __flush_dcache_area(const void *vaddr, unsigned long size); | |
-#define DEVICE_TREE_GUID \ | |
-{0xb1b621d5, 0xf19c, 0x41a5, {0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0}} | |
+#define DEVICE_TREE_GUID \ | |
+ { \ | |
+ 0xb1b621d5, 0xf19c, 0x41a5, \ | |
+ { \ | |
+ 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 \ | |
+ } \ | |
+ } | |
static struct file __initdata dtbfile; | |
static void __initdata *fdt; | |
static void __initdata *memmap; | |
-static int __init setup_chosen_node(void *fdt, int *addr_cells, int *size_cells) | |
+static int __init setup_chosen_node(void *fdt, int *addr_cells, | |
+ int *size_cells) | |
{ | |
int node; | |
const struct fdt_property *prop; | |
@@ -72,7 +78,7 @@ static int __init setup_chosen_node(void *fdt, int *addr_cells, int *size_cells) | |
return -1; | |
} | |
else if ( fdt32_to_cpu(prop->len) ) | |
- return -1; /* Non-empty ranges property */ | |
+ return -1; /* Non-empty ranges property */ | |
return node; | |
} | |
@@ -101,7 +107,8 @@ static int __init fdt_set_reg(void *fdt, int node, int addr_cells, | |
dt_set_cell(&cellp, addr_cells, addr); | |
dt_set_cell(&cellp, size_cells, len); | |
- return(fdt_setprop(fdt, node, "reg", val, sizeof(*cellp) * (cellp - val))); | |
+ return ( | |
+ fdt_setprop(fdt, node, "reg", val, sizeof(*cellp) * (cellp - val))); | |
} | |
static void __init *lookup_fdt_config_table(EFI_SYSTEM_TABLE *sys_table) | |
@@ -140,9 +147,8 @@ static bool __init meminfo_add_bank(struct meminfo *mem, | |
return true; | |
} | |
-static EFI_STATUS __init efi_process_memory_map_bootinfo(EFI_MEMORY_DESCRIPTOR *map, | |
- UINTN mmap_size, | |
- UINTN desc_size) | |
+static EFI_STATUS __init efi_process_memory_map_bootinfo( | |
+ EFI_MEMORY_DESCRIPTOR *map, UINTN mmap_size, UINTN desc_size) | |
{ | |
int Index; | |
EFI_MEMORY_DESCRIPTOR *desc_ptr = map; | |
@@ -150,14 +156,13 @@ static EFI_STATUS __init efi_process_memory_map_bootinfo(EFI_MEMORY_DESCRIPTOR * | |
for ( Index = 0; Index < (mmap_size / desc_size); Index++ ) | |
{ | |
if ( desc_ptr->Type == EfiConventionalMemory || | |
- (!map_bs && | |
- (desc_ptr->Type == EfiBootServicesCode || | |
- desc_ptr->Type == EfiBootServicesData)) ) | |
+ (!map_bs && (desc_ptr->Type == EfiBootServicesCode || | |
+ desc_ptr->Type == EfiBootServicesData)) ) | |
{ | |
if ( !meminfo_add_bank(&bootinfo.mem, desc_ptr) ) | |
{ | |
- PrintStr(L"Warning: All " __stringify(NR_MEM_BANKS) | |
- " bootinfo mem banks exhausted.\r\n"); | |
+ PrintStr(L"Warning: All " __stringify( | |
+ NR_MEM_BANKS) " bootinfo mem banks exhausted.\r\n"); | |
break; | |
} | |
} | |
@@ -166,8 +171,8 @@ static EFI_STATUS __init efi_process_memory_map_bootinfo(EFI_MEMORY_DESCRIPTOR * | |
{ | |
if ( !meminfo_add_bank(&bootinfo.acpi, desc_ptr) ) | |
{ | |
- PrintStr(L"Error: All " __stringify(NR_MEM_BANKS) | |
- " acpi meminfo mem banks exhausted.\r\n"); | |
+ PrintStr(L"Error: All " __stringify( | |
+ NR_MEM_BANKS) " acpi meminfo mem banks exhausted.\r\n"); | |
return EFI_LOAD_ERROR; | |
} | |
} | |
@@ -183,12 +188,10 @@ static EFI_STATUS __init efi_process_memory_map_bootinfo(EFI_MEMORY_DESCRIPTOR * | |
* of the System table address, the address of the final EFI memory map, | |
* and memory map information. | |
*/ | |
-EFI_STATUS __init fdt_add_uefi_nodes(EFI_SYSTEM_TABLE *sys_table, | |
- void *fdt, | |
- EFI_MEMORY_DESCRIPTOR *memory_map, | |
- UINTN map_size, | |
- UINTN desc_size, | |
- UINT32 desc_ver) | |
+EFI_STATUS __init fdt_add_uefi_nodes(EFI_SYSTEM_TABLE *sys_table, void *fdt, | |
+ EFI_MEMORY_DESCRIPTOR *memory_map, | |
+ UINTN map_size, UINTN desc_size, | |
+ UINT32 desc_ver) | |
{ | |
int node; | |
int status; | |
@@ -202,7 +205,7 @@ EFI_STATUS __init fdt_add_uefi_nodes(EFI_SYSTEM_TABLE *sys_table, | |
* memory description provided to Xen. | |
*/ | |
prev = 0; | |
- for (;;) | |
+ for ( ;; ) | |
{ | |
const char *type; | |
int len; | |
@@ -221,13 +224,13 @@ EFI_STATUS __init fdt_add_uefi_nodes(EFI_SYSTEM_TABLE *sys_table, | |
prev = node; | |
} | |
- /* | |
- * Delete all memory reserve map entries. When booting via UEFI, | |
- * kernel will use the UEFI memory map to find reserved regions. | |
- */ | |
- num_rsv = fdt_num_mem_rsv(fdt); | |
- while ( num_rsv-- > 0 ) | |
- fdt_del_mem_rsv(fdt, num_rsv); | |
+ /* | |
+ * Delete all memory reserve map entries. When booting via UEFI, | |
+ * kernel will use the UEFI memory map to find reserved regions. | |
+ */ | |
+ num_rsv = fdt_num_mem_rsv(fdt); | |
+ while ( num_rsv-- > 0 ) | |
+ fdt_del_mem_rsv(fdt, num_rsv); | |
/* Add FDT entries for EFI runtime services in chosen node. */ | |
node = fdt_subnode_offset(fdt, 0, "chosen"); | |
@@ -242,32 +245,32 @@ EFI_STATUS __init fdt_add_uefi_nodes(EFI_SYSTEM_TABLE *sys_table, | |
} | |
fdt_val64 = cpu_to_fdt64((u64)(uintptr_t)sys_table); | |
- status = fdt_setprop(fdt, node, "linux,uefi-system-table", | |
- &fdt_val64, sizeof(fdt_val64)); | |
+ status = fdt_setprop(fdt, node, "linux,uefi-system-table", &fdt_val64, | |
+ sizeof(fdt_val64)); | |
if ( status ) | |
goto fdt_set_fail; | |
fdt_val64 = cpu_to_fdt64((u64)(uintptr_t)memory_map); | |
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", | |
- &fdt_val64, sizeof(fdt_val64)); | |
+ status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", &fdt_val64, | |
+ sizeof(fdt_val64)); | |
if ( status ) | |
goto fdt_set_fail; | |
fdt_val32 = cpu_to_fdt32(map_size); | |
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", | |
- &fdt_val32, sizeof(fdt_val32)); | |
+ status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", &fdt_val32, | |
+ sizeof(fdt_val32)); | |
if ( status ) | |
goto fdt_set_fail; | |
fdt_val32 = cpu_to_fdt32(desc_size); | |
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", | |
- &fdt_val32, sizeof(fdt_val32)); | |
+ status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", &fdt_val32, | |
+ sizeof(fdt_val32)); | |
if ( status ) | |
goto fdt_set_fail; | |
fdt_val32 = cpu_to_fdt32(desc_ver); | |
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", | |
- &fdt_val32, sizeof(fdt_val32)); | |
+ status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", &fdt_val32, | |
+ sizeof(fdt_val32)); | |
if ( status ) | |
goto fdt_set_fail; | |
@@ -300,8 +303,8 @@ static void __init *fdt_increase_size(struct file *fdtfile, int add_size) | |
fdt_size = 0; | |
pages = PFN_UP(fdt_size + add_size); | |
- status = efi_bs->AllocatePages(AllocateAnyPages, EfiLoaderData, | |
- pages, &fdt_addr); | |
+ status = efi_bs->AllocatePages(AllocateAnyPages, EfiLoaderData, pages, | |
+ &fdt_addr); | |
if ( status != EFI_SUCCESS ) | |
return NULL; | |
@@ -346,8 +349,7 @@ static void __init efi_arch_relocate_image(unsigned long delta) | |
} | |
static void __init efi_arch_process_memory_map(EFI_SYSTEM_TABLE *SystemTable, | |
- void *map, | |
- UINTN map_size, | |
+ void *map, UINTN map_size, | |
UINTN desc_size, | |
UINT32 desc_ver) | |
{ | |
@@ -372,7 +374,8 @@ static void __init efi_arch_post_exit_boot(void) | |
efi_xen_start(fdt, fdt_totalsize(fdt)); | |
} | |
-static void __init efi_arch_cfg_file_early(EFI_FILE_HANDLE dir_handle, char *section) | |
+static void __init efi_arch_cfg_file_early(EFI_FILE_HANDLE dir_handle, | |
+ char *section) | |
{ | |
union string name; | |
@@ -392,7 +395,8 @@ static void __init efi_arch_cfg_file_early(EFI_FILE_HANDLE dir_handle, char *sec | |
blexit(L"Unable to create new FDT"); | |
} | |
-static void __init efi_arch_cfg_file_late(EFI_FILE_HANDLE dir_handle, char *section) | |
+static void __init efi_arch_cfg_file_late(EFI_FILE_HANDLE dir_handle, | |
+ char *section) | |
{ | |
} | |
@@ -430,7 +434,8 @@ static void __init efi_arch_handle_cmdline(CHAR16 *image_name, | |
if ( chosen < 0 ) | |
blexit(L"Unable to find chosen node"); | |
- status = efi_bs->AllocatePool(EfiBootServicesData, EFI_PAGE_SIZE, (void **)&buf); | |
+ status = efi_bs->AllocatePool(EfiBootServicesData, EFI_PAGE_SIZE, | |
+ (void **)&buf); | |
if ( EFI_ERROR(status) ) | |
PrintErrMesg(L"Unable to allocate string buffer", status); | |
@@ -443,15 +448,15 @@ static void __init efi_arch_handle_cmdline(CHAR16 *image_name, | |
name.s = "xen"; | |
prop_len = 0; | |
- prop_len += snprintf(buf + prop_len, | |
- EFI_PAGE_SIZE - prop_len, "%s", name.s); | |
+ prop_len += | |
+ snprintf(buf + prop_len, EFI_PAGE_SIZE - prop_len, "%s", name.s); | |
if ( prop_len >= EFI_PAGE_SIZE ) | |
blexit(L"FDT string overflow"); | |
if ( cfgfile_options ) | |
{ | |
- prop_len += snprintf(buf + prop_len, | |
- EFI_PAGE_SIZE - prop_len, " %s", cfgfile_options); | |
+ prop_len += snprintf(buf + prop_len, EFI_PAGE_SIZE - prop_len, " %s", | |
+ cfgfile_options); | |
if ( prop_len >= EFI_PAGE_SIZE ) | |
blexit(L"FDT string overflow"); | |
} | |
@@ -466,8 +471,8 @@ static void __init efi_arch_handle_cmdline(CHAR16 *image_name, | |
if ( name.s ) | |
{ | |
- prop_len += snprintf(buf + prop_len, | |
- EFI_PAGE_SIZE - prop_len, " %s", name.s); | |
+ prop_len += | |
+ snprintf(buf + prop_len, EFI_PAGE_SIZE - prop_len, " %s", name.s); | |
if ( prop_len >= EFI_PAGE_SIZE ) | |
blexit(L"FDT string overflow"); | |
} | |
@@ -478,8 +483,8 @@ static void __init efi_arch_handle_cmdline(CHAR16 *image_name, | |
efi_bs->FreePool(buf); | |
} | |
-static void __init efi_arch_handle_module(struct file *file, const CHAR16 *name, | |
- char *options) | |
+static void __init efi_arch_handle_module(struct file *file, | |
+ const CHAR16 *name, char *options) | |
{ | |
int node; | |
int chosen; | |
@@ -501,7 +506,7 @@ static void __init efi_arch_handle_module(struct file *file, const CHAR16 *name, | |
sizeof(ramdisk_compat)) < 0 ) | |
blexit(L"Unable to set compatible property."); | |
if ( fdt_set_reg(fdt, node, addr_len, size_len, ramdisk.addr, | |
- ramdisk.size) < 0 ) | |
+ ramdisk.size) < 0 ) | |
blexit(L"Unable to set reg property."); | |
} | |
else if ( file == &xsm ) | |
@@ -513,8 +518,8 @@ static void __init efi_arch_handle_module(struct file *file, const CHAR16 *name, | |
if ( fdt_setprop(fdt, node, "compatible", xsm_compat, | |
sizeof(xsm_compat)) < 0 ) | |
blexit(L"Unable to set compatible property."); | |
- if ( fdt_set_reg(fdt, node, addr_len, size_len, xsm.addr, | |
- xsm.size) < 0 ) | |
+ if ( fdt_set_reg(fdt, node, addr_len, size_len, xsm.addr, xsm.size) < | |
+ 0 ) | |
blexit(L"Unable to set reg property."); | |
} | |
else if ( file == &kernel ) | |
@@ -526,7 +531,8 @@ static void __init efi_arch_handle_module(struct file *file, const CHAR16 *name, | |
if ( fdt_setprop(fdt, node, "compatible", kernel_compat, | |
sizeof(kernel_compat)) < 0 ) | |
blexit(L"Unable to set compatible property."); | |
- if ( options && fdt_setprop_string(fdt, node, "bootargs", options) < 0 ) | |
+ if ( options && | |
+ fdt_setprop_string(fdt, node, "bootargs", options) < 0 ) | |
blexit(L"Unable to set bootargs property."); | |
if ( fdt_set_reg(fdt, node, addr_len, size_len, kernel.addr, | |
kernel.size) < 0 ) | |
@@ -571,8 +577,10 @@ static bool __init efi_arch_use_config_file(EFI_SYSTEM_TABLE *SystemTable) | |
fdt = lookup_fdt_config_table(SystemTable); | |
dtbfile.ptr = fdt; | |
- dtbfile.size = 0; /* Config table memory can't be freed, so set size to 0 */ | |
- if ( !fdt || fdt_node_offset_by_compatible(fdt, 0, "multiboot,module") < 0 ) | |
+ dtbfile.size = | |
+ 0; /* Config table memory can't be freed, so set size to 0 */ | |
+ if ( !fdt || | |
+ fdt_node_offset_by_compatible(fdt, 0, "multiboot,module") < 0 ) | |
{ | |
/* | |
* We either have no FDT, or one without modules, so we must have a | |
@@ -591,9 +599,9 @@ static void __init efi_arch_console_init(UINTN cols, UINTN rows) | |
{ | |
} | |
-static void __init efi_arch_video_init(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, | |
- UINTN info_size, | |
- EFI_GRAPHICS_OUTPUT_MODE_INFORMATION *mode_info) | |
+static void __init | |
+efi_arch_video_init(EFI_GRAPHICS_OUTPUT_PROTOCOL *gop, UINTN info_size, | |
+ EFI_GRAPHICS_OUTPUT_MODE_INFORMATION *mode_info) | |
{ | |
} | |
diff --git a/xen/arch/arm/efi/efi-dom0.c b/xen/arch/arm/efi/efi-dom0.c | |
index 1c356540f7..46f35052d6 100644 | |
--- a/xen/arch/arm/efi/efi-dom0.c | |
+++ b/xen/arch/arm/efi/efi-dom0.c | |
@@ -61,12 +61,12 @@ void __init acpi_create_efi_system_table(struct domain *d, | |
EFI_CONFIGURATION_TABLE *efi_conf_tbl; | |
EFI_SYSTEM_TABLE *efi_sys_tbl; | |
- table_addr = d->arch.efi_acpi_gpa | |
- + acpi_get_table_offset(tbl_add, TBL_EFIT); | |
- table_size = sizeof(EFI_SYSTEM_TABLE) + sizeof(EFI_CONFIGURATION_TABLE) | |
- + sizeof(xen_efi_fw_vendor); | |
- base_ptr = d->arch.efi_acpi_table | |
- + acpi_get_table_offset(tbl_add, TBL_EFIT); | |
+ table_addr = | |
+ d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_EFIT); | |
+ table_size = sizeof(EFI_SYSTEM_TABLE) + sizeof(EFI_CONFIGURATION_TABLE) + | |
+ sizeof(xen_efi_fw_vendor); | |
+ base_ptr = | |
+ d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_EFIT); | |
efi_sys_tbl = (EFI_SYSTEM_TABLE *)base_ptr; | |
efi_sys_tbl->Hdr.Signature = EFI_SYSTEM_TABLE_SIGNATURE; | |
@@ -84,11 +84,11 @@ void __init acpi_create_efi_system_table(struct domain *d, | |
efi_conf_tbl = (EFI_CONFIGURATION_TABLE *)(base_ptr + offset); | |
efi_conf_tbl->VendorGuid = (EFI_GUID)ACPI_20_TABLE_GUID; | |
efi_conf_tbl->VendorTable = (VOID *)tbl_add[TBL_RSDP].start; | |
- efi_sys_tbl->ConfigurationTable = (EFI_CONFIGURATION_TABLE *)(table_addr | |
- + offset); | |
+ efi_sys_tbl->ConfigurationTable = | |
+ (EFI_CONFIGURATION_TABLE *)(table_addr + offset); | |
xz_crc32_init(); | |
- efi_sys_tbl->Hdr.CRC32 = xz_crc32((uint8_t *)efi_sys_tbl, | |
- efi_sys_tbl->Hdr.HeaderSize, 0); | |
+ efi_sys_tbl->Hdr.CRC32 = | |
+ xz_crc32((uint8_t *)efi_sys_tbl, efi_sys_tbl->Hdr.HeaderSize, 0); | |
tbl_add[TBL_EFIT].start = table_addr; | |
tbl_add[TBL_EFIT].size = table_size; | |
@@ -114,8 +114,8 @@ void __init acpi_create_efi_mmap_table(struct domain *d, | |
unsigned int i; | |
u8 *base_ptr; | |
- base_ptr = d->arch.efi_acpi_table | |
- + acpi_get_table_offset(tbl_add, TBL_MMAP); | |
+ base_ptr = | |
+ d->arch.efi_acpi_table + acpi_get_table_offset(tbl_add, TBL_MMAP); | |
desc = (EFI_MEMORY_DESCRIPTOR *)base_ptr; | |
for ( i = 0; i < mem->nr_banks; i++, desc++ ) | |
@@ -130,10 +130,10 @@ void __init acpi_create_efi_mmap_table(struct domain *d, | |
fill_efi_memory_descriptor(desc, EfiACPIReclaimMemory, | |
d->arch.efi_acpi_gpa, d->arch.efi_acpi_len); | |
- tbl_add[TBL_MMAP].start = d->arch.efi_acpi_gpa | |
- + acpi_get_table_offset(tbl_add, TBL_MMAP); | |
- tbl_add[TBL_MMAP].size = sizeof(EFI_MEMORY_DESCRIPTOR) | |
- * (mem->nr_banks + bootinfo.acpi.nr_banks + 1); | |
+ tbl_add[TBL_MMAP].start = | |
+ d->arch.efi_acpi_gpa + acpi_get_table_offset(tbl_add, TBL_MMAP); | |
+ tbl_add[TBL_MMAP].size = sizeof(EFI_MEMORY_DESCRIPTOR) * | |
+ (mem->nr_banks + bootinfo.acpi.nr_banks + 1); | |
} | |
/* Create /hypervisor/uefi node for efi properties. */ | |
@@ -150,13 +150,12 @@ int __init acpi_make_efi_nodes(void *fdt, struct membank tbl_add[]) | |
if ( res ) | |
return res; | |
- res = fdt_property_u64(fdt, "xen,uefi-mmap-start", | |
- tbl_add[TBL_MMAP].start); | |
+ res = | |
+ fdt_property_u64(fdt, "xen,uefi-mmap-start", tbl_add[TBL_MMAP].start); | |
if ( res ) | |
return res; | |
- res = fdt_property_u32(fdt, "xen,uefi-mmap-size", | |
- tbl_add[TBL_MMAP].size); | |
+ res = fdt_property_u32(fdt, "xen,uefi-mmap-size", tbl_add[TBL_MMAP].size); | |
if ( res ) | |
return res; | |
diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c | |
index 256988c665..44f1e37919 100644 | |
--- a/xen/arch/arm/gic-v2.c | |
+++ b/xen/arch/arm/gic-v2.c | |
@@ -45,70 +45,72 @@ | |
* LR register definitions are GIC v2 specific. | |
* Moved these definitions from header file to here | |
*/ | |
-#define GICH_V2_LR_VIRTUAL_MASK 0x3ff | |
-#define GICH_V2_LR_VIRTUAL_SHIFT 0 | |
-#define GICH_V2_LR_PHYSICAL_MASK 0x3ff | |
-#define GICH_V2_LR_PHYSICAL_SHIFT 10 | |
-#define GICH_V2_LR_STATE_MASK 0x3 | |
-#define GICH_V2_LR_STATE_SHIFT 28 | |
-#define GICH_V2_LR_PENDING (1U << 28) | |
-#define GICH_V2_LR_ACTIVE (1U << 29) | |
-#define GICH_V2_LR_PRIORITY_SHIFT 23 | |
-#define GICH_V2_LR_PRIORITY_MASK 0x1f | |
-#define GICH_V2_LR_HW_SHIFT 31 | |
-#define GICH_V2_LR_HW_MASK 0x1 | |
-#define GICH_V2_LR_GRP_SHIFT 30 | |
-#define GICH_V2_LR_GRP_MASK 0x1 | |
+#define GICH_V2_LR_VIRTUAL_MASK 0x3ff | |
+#define GICH_V2_LR_VIRTUAL_SHIFT 0 | |
+#define GICH_V2_LR_PHYSICAL_MASK 0x3ff | |
+#define GICH_V2_LR_PHYSICAL_SHIFT 10 | |
+#define GICH_V2_LR_STATE_MASK 0x3 | |
+#define GICH_V2_LR_STATE_SHIFT 28 | |
+#define GICH_V2_LR_PENDING (1U << 28) | |
+#define GICH_V2_LR_ACTIVE (1U << 29) | |
+#define GICH_V2_LR_PRIORITY_SHIFT 23 | |
+#define GICH_V2_LR_PRIORITY_MASK 0x1f | |
+#define GICH_V2_LR_HW_SHIFT 31 | |
+#define GICH_V2_LR_HW_MASK 0x1 | |
+#define GICH_V2_LR_GRP_SHIFT 30 | |
+#define GICH_V2_LR_GRP_MASK 0x1 | |
#define GICH_V2_LR_MAINTENANCE_IRQ (1U << 19) | |
-#define GICH_V2_LR_GRP1 (1U << 30) | |
-#define GICH_V2_LR_HW (1U << GICH_V2_LR_HW_SHIFT) | |
-#define GICH_V2_LR_CPUID_SHIFT 10 | |
-#define GICH_V2_LR_CPUID_MASK 0x7 | |
-#define GICH_V2_VTR_NRLRGS 0x3f | |
+#define GICH_V2_LR_GRP1 (1U << 30) | |
+#define GICH_V2_LR_HW (1U << GICH_V2_LR_HW_SHIFT) | |
+#define GICH_V2_LR_CPUID_SHIFT 10 | |
+#define GICH_V2_LR_CPUID_MASK 0x7 | |
+#define GICH_V2_VTR_NRLRGS 0x3f | |
-#define GICH_V2_VMCR_PRIORITY_MASK 0x1f | |
-#define GICH_V2_VMCR_PRIORITY_SHIFT 27 | |
+#define GICH_V2_VMCR_PRIORITY_MASK 0x1f | |
+#define GICH_V2_VMCR_PRIORITY_SHIFT 27 | |
/* GICv2m extension register definitions. */ | |
/* | |
-* MSI_TYPER: | |
-* [31:26] Reserved | |
-* [25:16] lowest SPI assigned to MSI | |
-* [15:10] Reserved | |
-* [9:0] Number of SPIs assigned to MSI | |
-*/ | |
-#define V2M_MSI_TYPER 0x008 | |
-#define V2M_MSI_TYPER_BASE_SHIFT 16 | |
-#define V2M_MSI_TYPER_BASE_MASK 0x3FF | |
-#define V2M_MSI_TYPER_NUM_MASK 0x3FF | |
-#define V2M_MSI_SETSPI_NS 0x040 | |
-#define V2M_MIN_SPI 32 | |
-#define V2M_MAX_SPI 1019 | |
-#define V2M_MSI_IIDR 0xFCC | |
- | |
-#define V2M_MSI_TYPER_BASE_SPI(x) \ | |
- (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) | |
- | |
-#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) | |
- | |
-struct v2m_data { | |
+ * MSI_TYPER: | |
+ * [31:26] Reserved | |
+ * [25:16] lowest SPI assigned to MSI | |
+ * [15:10] Reserved | |
+ * [9:0] Number of SPIs assigned to MSI | |
+ */ | |
+#define V2M_MSI_TYPER 0x008 | |
+#define V2M_MSI_TYPER_BASE_SHIFT 16 | |
+#define V2M_MSI_TYPER_BASE_MASK 0x3FF | |
+#define V2M_MSI_TYPER_NUM_MASK 0x3FF | |
+#define V2M_MSI_SETSPI_NS 0x040 | |
+#define V2M_MIN_SPI 32 | |
+#define V2M_MAX_SPI 1019 | |
+#define V2M_MSI_IIDR 0xFCC | |
+ | |
+#define V2M_MSI_TYPER_BASE_SPI(x) \ | |
+ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) | |
+ | |
+#define V2M_MSI_TYPER_NUM_SPI(x) ((x)&V2M_MSI_TYPER_NUM_MASK) | |
+ | |
+struct v2m_data | |
+{ | |
struct list_head entry; | |
/* Pointer to the DT node representing the v2m frame */ | |
const struct dt_device_node *dt_node; | |
- paddr_t addr; /* Register frame base */ | |
- paddr_t size; /* Register frame size */ | |
+ paddr_t addr; /* Register frame base */ | |
+ paddr_t size; /* Register frame size */ | |
u32 spi_start; /* The SPI number that MSIs start */ | |
- u32 nr_spis; /* The number of SPIs for MSIs */ | |
+ u32 nr_spis; /* The number of SPIs for MSIs */ | |
}; | |
/* v2m extension register frame information list */ | |
static LIST_HEAD(gicv2m_info); | |
/* Global state */ | |
-static struct { | |
- void __iomem * map_dbase; /* IO mapped Address of distributor registers */ | |
- void __iomem * map_cbase; /* IO mapped Address of CPU interface registers */ | |
- void __iomem * map_hbase; /* IO Address of virtual interface registers */ | |
+static struct | |
+{ | |
+ void __iomem *map_dbase; /* IO mapped Address of distributor registers */ | |
+ void __iomem *map_cbase; /* IO mapped Address of CPU interface registers */ | |
+ void __iomem *map_hbase; /* IO Address of virtual interface registers */ | |
spinlock_t lock; | |
} gicv2; | |
@@ -165,7 +167,7 @@ static unsigned int gicv2_cpu_mask(const cpumask_t *cpumask) | |
cpumask_t possible_mask; | |
cpumask_and(&possible_mask, cpumask, &cpu_possible_map); | |
- for_each_cpu( cpu, &possible_mask ) | |
+ for_each_cpu (cpu, &possible_mask) | |
{ | |
ASSERT(cpu < NR_GIC_CPU_IF); | |
mask |= per_cpu(gic_cpu_id, cpu); | |
@@ -210,8 +212,7 @@ static void gicv2_dump_state(const struct vcpu *v) | |
if ( v == current ) | |
{ | |
for ( i = 0; i < gicv2_info.nr_lrs; i++ ) | |
- printk(" HW_LR[%d]=%x\n", i, | |
- readl_gich(GICH_LR + i * 4)); | |
+ printk(" HW_LR[%d]=%x\n", i, readl_gich(GICH_LR + i * 4)); | |
} | |
else | |
{ | |
@@ -307,17 +308,16 @@ static void gicv2_set_irq_type(struct irq_desc *desc, unsigned int type) | |
writel_gicd(cfg, GICD_ICFGR + (irq / 16) * 4); | |
actual = readl_gicd(GICD_ICFGR + (irq / 16) * 4); | |
- if ( ( cfg & edgebit ) ^ ( actual & edgebit ) ) | |
+ if ( (cfg & edgebit) ^ (actual & edgebit) ) | |
{ | |
- printk(XENLOG_WARNING "GICv2: WARNING: " | |
+ printk(XENLOG_WARNING | |
+ "GICv2: WARNING: " | |
"CPU%d: Failed to configure IRQ%u as %s-triggered. " | |
"H/w forces to %s-triggered.\n", | |
- smp_processor_id(), desc->irq, | |
- cfg & edgebit ? "Edge" : "Level", | |
+ smp_processor_id(), desc->irq, cfg & edgebit ? "Edge" : "Level", | |
actual & edgebit ? "Edge" : "Level"); | |
- desc->arch.type = actual & edgebit ? | |
- IRQ_TYPE_EDGE_RISING : | |
- IRQ_TYPE_LEVEL_HIGH; | |
+ desc->arch.type = | |
+ actual & edgebit ? IRQ_TYPE_EDGE_RISING : IRQ_TYPE_LEVEL_HIGH; | |
} | |
spin_unlock(&gicv2.lock); | |
@@ -358,10 +358,9 @@ static void __init gicv2_dist_init(void) | |
gicv2_info.nr_lines = nr_lines; | |
gic_cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5); | |
- printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n", | |
- nr_lines, gic_cpus, (gic_cpus == 1) ? "" : "s", | |
- (type & GICD_TYPE_SEC) ? ", secure" : "", | |
- readl_gicd(GICD_IIDR)); | |
+ printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n", nr_lines, gic_cpus, | |
+ (gic_cpus == 1) ? "" : "s", | |
+ (type & GICD_TYPE_SEC) ? ", secure" : "", readl_gicd(GICD_IIDR)); | |
/* Default all global IRQs to level, active low */ | |
for ( i = 32; i < nr_lines; i += 16 ) | |
@@ -373,8 +372,8 @@ static void __init gicv2_dist_init(void) | |
/* Default priority for global interrupts */ | |
for ( i = 32; i < nr_lines; i += 4 ) | |
- writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | | |
- GIC_PRI_IRQ << 8 | GIC_PRI_IRQ, | |
+ writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 | | |
+ GIC_PRI_IRQ, | |
GICD_IPRIORITYR + (i / 4) * 4); | |
/* Disable all global interrupts */ | |
@@ -403,14 +402,14 @@ static void gicv2_cpu_init(void) | |
/* Set SGI priorities */ | |
for ( i = 0; i < 16; i += 4 ) | |
- writel_gicd(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 | | |
- GIC_PRI_IPI << 8 | GIC_PRI_IPI, | |
+ writel_gicd(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 | GIC_PRI_IPI << 8 | | |
+ GIC_PRI_IPI, | |
GICD_IPRIORITYR + (i / 4) * 4); | |
/* Set PPI priorities */ | |
for ( i = 16; i < 32; i += 4 ) | |
- writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | | |
- GIC_PRI_IRQ << 8 | GIC_PRI_IRQ, | |
+ writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 | | |
+ GIC_PRI_IRQ, | |
GICD_IPRIORITYR + (i / 4) * 4); | |
/* Local settings: interface controller */ | |
@@ -419,7 +418,7 @@ static void gicv2_cpu_init(void) | |
/* Finest granularity of priority */ | |
writel_gicc(0x0, GICC_BPR); | |
/* Turn on delivery */ | |
- writel_gicc(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC_CTLR); | |
+ writel_gicc(GICC_CTL_ENABLE | GICC_CTL_EOI, GICC_CTLR); | |
} | |
static void gicv2_cpu_disable(void) | |
@@ -433,7 +432,7 @@ static void gicv2_hyp_init(void) | |
uint8_t nr_lrs; | |
vtr = readl_gich(GICH_VTR); | |
- nr_lrs = (vtr & GICH_V2_VTR_NRLRGS) + 1; | |
+ nr_lrs = (vtr & GICH_V2_VTR_NRLRGS) + 1; | |
gicv2_info.nr_lrs = nr_lrs; | |
} | |
@@ -466,7 +465,7 @@ static void gicv2_send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode, | |
*/ | |
dmb(ishst); | |
- switch ( irqmode ) | |
+ switch (irqmode) | |
{ | |
case SGI_TARGET_OTHERS: | |
writel_gicd(GICD_SGI_TARGET_OTHERS | sgi, GICD_SGIR); | |
@@ -477,8 +476,8 @@ static void gicv2_send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode, | |
case SGI_TARGET_LIST: | |
cpumask_and(&online_mask, cpu_mask, &cpu_online_map); | |
mask = gicv2_cpu_mask(&online_mask); | |
- writel_gicd(GICD_SGI_TARGET_LIST | | |
- (mask << GICD_SGI_TARGET_SHIFT) | sgi, | |
+ writel_gicd(GICD_SGI_TARGET_LIST | (mask << GICD_SGI_TARGET_SHIFT) | | |
+ sgi, | |
GICD_SGIR); | |
break; | |
default: | |
@@ -503,13 +502,13 @@ static void gicv2_update_lr(int lr, unsigned int virq, uint8_t priority, | |
BUG_ON(lr >= gicv2_info.nr_lrs); | |
BUG_ON(lr < 0); | |
- lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT) | | |
+ lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT) | | |
((GIC_PRI_TO_GUEST(priority) & GICH_V2_LR_PRIORITY_MASK) | |
- << GICH_V2_LR_PRIORITY_SHIFT) | | |
+ << GICH_V2_LR_PRIORITY_SHIFT) | | |
((virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT)); | |
if ( hw_irq != INVALID_IRQ ) | |
- lr_reg |= GICH_V2_LR_HW | ((hw_irq & GICH_V2_LR_PHYSICAL_MASK ) | |
+ lr_reg |= GICH_V2_LR_HW | ((hw_irq & GICH_V2_LR_PHYSICAL_MASK) | |
<< GICH_V2_LR_PHYSICAL_SHIFT); | |
writel_gich(lr_reg, GICH_LR + lr * 4); | |
@@ -524,9 +523,10 @@ static void gicv2_read_lr(int lr, struct gic_lr *lr_reg) | |
{ | |
uint32_t lrv; | |
- lrv = readl_gich(GICH_LR + lr * 4); | |
+ lrv = readl_gich(GICH_LR + lr * 4); | |
lr_reg->virq = (lrv >> GICH_V2_LR_VIRTUAL_SHIFT) & GICH_V2_LR_VIRTUAL_MASK; | |
- lr_reg->priority = (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & GICH_V2_LR_PRIORITY_MASK; | |
+ lr_reg->priority = | |
+ (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & GICH_V2_LR_PRIORITY_MASK; | |
lr_reg->pending = lrv & GICH_V2_LR_PENDING; | |
lr_reg->active = lrv & GICH_V2_LR_ACTIVE; | |
lr_reg->hw_status = lrv & GICH_V2_LR_HW; | |
@@ -543,8 +543,8 @@ static void gicv2_read_lr(int lr, struct gic_lr *lr_reg) | |
* This is only valid for SGI, but it does not matter to always | |
* read it as it should be 0 by default. | |
*/ | |
- lr_reg->virt.source = (lrv >> GICH_V2_LR_CPUID_SHIFT) | |
- & GICH_V2_LR_CPUID_MASK; | |
+ lr_reg->virt.source = | |
+ (lrv >> GICH_V2_LR_CPUID_SHIFT) & GICH_V2_LR_CPUID_MASK; | |
} | |
} | |
@@ -552,9 +552,10 @@ static void gicv2_write_lr(int lr, const struct gic_lr *lr_reg) | |
{ | |
uint32_t lrv = 0; | |
- lrv = (((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT) | | |
- ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK) | |
- << GICH_V2_LR_PRIORITY_SHIFT) ); | |
+ lrv = (((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) | |
+ << GICH_V2_LR_VIRTUAL_SHIFT) | | |
+ ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK) | |
+ << GICH_V2_LR_PRIORITY_SHIFT)); | |
if ( lr_reg->active ) | |
lrv |= GICH_V2_LR_ACTIVE; | |
@@ -596,13 +597,13 @@ static void gicv2_hcr_status(uint32_t flag, bool status) | |
static unsigned int gicv2_read_vmcr_priority(void) | |
{ | |
- return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT) | |
- & GICH_V2_VMCR_PRIORITY_MASK); | |
+ return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT) & | |
+ GICH_V2_VMCR_PRIORITY_MASK); | |
} | |
static unsigned int gicv2_read_apr(int apr_reg) | |
{ | |
- return readl_gich(GICH_APR); | |
+ return readl_gich(GICH_APR); | |
} | |
static bool gicv2_read_pending_state(struct irq_desc *irqd) | |
@@ -669,7 +670,8 @@ static void gicv2_guest_irq_end(struct irq_desc *desc) | |
/* Deactivation happens in maintenance interrupt / via GICV */ | |
} | |
-static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask) | |
+static void gicv2_irq_set_affinity(struct irq_desc *desc, | |
+ const cpumask_t *cpu_mask) | |
{ | |
unsigned int mask; | |
@@ -690,12 +692,13 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d) | |
const struct v2m_data *v2m_data; | |
/* For the moment, we'll assign all v2m frames to the hardware domain. */ | |
- list_for_each_entry( v2m_data, &gicv2m_info, entry ) | |
+ list_for_each_entry (v2m_data, &gicv2m_info, entry) | |
{ | |
int ret; | |
u32 spi; | |
- printk("GICv2: Mapping v2m frame to d%d: addr=0x%"PRIpaddr" size=0x%"PRIpaddr" spi_base=%u num_spis=%u\n", | |
+ printk("GICv2: Mapping v2m frame to d%d: addr=0x%" PRIpaddr | |
+ " size=0x%" PRIpaddr " spi_base=%u num_spis=%u\n", | |
d->domain_id, v2m_data->addr, v2m_data->size, | |
v2m_data->spi_start, v2m_data->nr_spis); | |
@@ -724,7 +727,8 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d) | |
if ( ret ) | |
{ | |
printk(XENLOG_ERR | |
- "GICv2: Failed to set v2m MSI SPI[%d] type.\n", spi); | |
+ "GICv2: Failed to set v2m MSI SPI[%d] type.\n", | |
+ spi); | |
return ret; | |
} | |
@@ -758,8 +762,7 @@ static int gicv2_map_hwdown_extra_mappings(struct domain *d) | |
* https://www.kernel.org/doc/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt | |
*/ | |
static int gicv2m_make_dt_node(const struct domain *d, | |
- const struct dt_device_node *gic, | |
- void *fdt) | |
+ const struct dt_device_node *gic, void *fdt) | |
{ | |
u32 len; | |
int res; | |
@@ -783,11 +786,12 @@ static int gicv2m_make_dt_node(const struct domain *d, | |
if ( res ) | |
return res; | |
- list_for_each_entry( v2m_data, &gicv2m_info, entry ) | |
+ list_for_each_entry (v2m_data, &gicv2m_info, entry) | |
{ | |
v2m = v2m_data->dt_node; | |
- printk("GICv2: Creating v2m DT node for d%d: addr=0x%"PRIpaddr" size=0x%"PRIpaddr" spi_base=%u num_spis=%u\n", | |
+ printk("GICv2: Creating v2m DT node for d%d: addr=0x%" PRIpaddr | |
+ " size=0x%" PRIpaddr " spi_base=%u num_spis=%u\n", | |
d->domain_id, v2m_data->addr, v2m_data->size, | |
v2m_data->spi_start, v2m_data->nr_spis); | |
@@ -862,7 +866,8 @@ static int gicv2_make_hwdom_dt_node(const struct domain *d, | |
compatible = dt_get_property(gic, "compatible", &len); | |
if ( !compatible ) | |
{ | |
- dprintk(XENLOG_ERR, "Can't find compatible property for the gic node\n"); | |
+ dprintk(XENLOG_ERR, | |
+ "Can't find compatible property for the gic node\n"); | |
return -FDT_ERR_XEN(ENOENT); | |
} | |
@@ -896,24 +901,24 @@ static int gicv2_make_hwdom_dt_node(const struct domain *d, | |
/* XXX different for level vs edge */ | |
static hw_irq_controller gicv2_host_irq_type = { | |
- .typename = "gic-v2", | |
- .startup = gicv2_irq_startup, | |
- .shutdown = gicv2_irq_shutdown, | |
- .enable = gicv2_irq_enable, | |
- .disable = gicv2_irq_disable, | |
- .ack = gicv2_irq_ack, | |
- .end = gicv2_host_irq_end, | |
+ .typename = "gic-v2", | |
+ .startup = gicv2_irq_startup, | |
+ .shutdown = gicv2_irq_shutdown, | |
+ .enable = gicv2_irq_enable, | |
+ .disable = gicv2_irq_disable, | |
+ .ack = gicv2_irq_ack, | |
+ .end = gicv2_host_irq_end, | |
.set_affinity = gicv2_irq_set_affinity, | |
}; | |
static hw_irq_controller gicv2_guest_irq_type = { | |
- .typename = "gic-v2", | |
- .startup = gicv2_irq_startup, | |
- .shutdown = gicv2_irq_shutdown, | |
- .enable = gicv2_irq_enable, | |
- .disable = gicv2_irq_disable, | |
- .ack = gicv2_irq_ack, | |
- .end = gicv2_guest_irq_end, | |
+ .typename = "gic-v2", | |
+ .startup = gicv2_irq_startup, | |
+ .shutdown = gicv2_irq_shutdown, | |
+ .enable = gicv2_irq_enable, | |
+ .disable = gicv2_irq_disable, | |
+ .ack = gicv2_irq_ack, | |
+ .end = gicv2_guest_irq_end, | |
.set_affinity = gicv2_irq_set_affinity, | |
}; | |
@@ -964,9 +969,9 @@ static void gicv2_add_v2m_frame_to_list(paddr_t addr, paddr_t size, | |
if ( spi_start < V2M_MIN_SPI ) | |
panic("GICv2: Invalid v2m base SPI:%u\n", spi_start); | |
- if ( ( nr_spis == 0 ) || ( spi_start + nr_spis > V2M_MAX_SPI ) ) | |
- panic("GICv2: Number of v2m SPIs (%u) exceed maximum (%u)\n", | |
- nr_spis, V2M_MAX_SPI - V2M_MIN_SPI + 1); | |
+ if ( (nr_spis == 0) || (spi_start + nr_spis > V2M_MAX_SPI) ) | |
+ panic("GICv2: Number of v2m SPIs (%u) exceed maximum (%u)\n", nr_spis, | |
+ V2M_MAX_SPI - V2M_MIN_SPI + 1); | |
/* Allocate an entry to record new v2m frame information. */ | |
v2m_data = xzalloc_bytes(sizeof(struct v2m_data)); | |
@@ -981,12 +986,12 @@ static void gicv2_add_v2m_frame_to_list(paddr_t addr, paddr_t size, | |
v2m_data->dt_node = v2m; | |
printk("GICv2m extension register frame:\n" | |
- " gic_v2m_addr=%"PRIpaddr"\n" | |
- " gic_v2m_size=%"PRIpaddr"\n" | |
+ " gic_v2m_addr=%" PRIpaddr "\n" | |
+ " gic_v2m_size=%" PRIpaddr "\n" | |
" gic_v2m_spi_base=%u\n" | |
" gic_v2m_num_spis=%u\n", | |
- v2m_data->addr, v2m_data->size, | |
- v2m_data->spi_start, v2m_data->nr_spis); | |
+ v2m_data->addr, v2m_data->size, v2m_data->spi_start, | |
+ v2m_data->nr_spis); | |
list_add_tail(&v2m_data->entry, &gicv2m_info); | |
} | |
@@ -1017,8 +1022,9 @@ static void gicv2_extension_dt_init(const struct dt_device_node *node) | |
*/ | |
if ( dt_property_read_u32(v2m, "arm,msi-base-spi", &spi_start) && | |
dt_property_read_u32(v2m, "arm,msi-num-spis", &nr_spis) ) | |
- printk("GICv2: DT overriding v2m hardware setting (base:%u, num:%u)\n", | |
- spi_start, nr_spis); | |
+ printk( | |
+ "GICv2: DT overriding v2m hardware setting (base:%u, num:%u)\n", | |
+ spi_start, nr_spis); | |
/* Add this v2m frame information to list. */ | |
gicv2_add_v2m_frame_to_list(addr, size, spi_start, nr_spis, v2m); | |
@@ -1064,13 +1070,16 @@ static void __init gicv2_dt_init(void) | |
if ( csize < SZ_8K ) | |
{ | |
printk(XENLOG_WARNING "GICv2: WARNING: " | |
- "The GICC size is too small: %#"PRIx64" expected %#x\n", | |
+ "The GICC size is too small: %#" PRIx64 | |
+ " expected %#x\n", | |
csize, SZ_8K); | |
if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) ) | |
{ | |
- printk(XENLOG_WARNING "GICv2: enable platform quirk: 64K stride\n"); | |
+ printk(XENLOG_WARNING | |
+ "GICv2: enable platform quirk: 64K stride\n"); | |
vsize = csize = SZ_128K; | |
- } else | |
+ } | |
+ else | |
csize = SZ_8K; | |
} | |
@@ -1079,8 +1088,9 @@ static void __init gicv2_dt_init(void) | |
* same size. | |
*/ | |
if ( csize != vsize ) | |
- panic("GICv2: Sizes of GICC (%#"PRIpaddr") and GICV (%#"PRIpaddr") don't match\n", | |
- csize, vsize); | |
+ panic("GICv2: Sizes of GICC (%#" PRIpaddr ") and GICV (%#" PRIpaddr | |
+ ") don't match\n", | |
+ csize, vsize); | |
/* | |
* Check whether this GIC implements the v2m extension. If so, | |
@@ -1134,8 +1144,8 @@ static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset) | |
return -EINVAL; | |
} | |
- host_gicc = container_of(header, struct acpi_madt_generic_interrupt, | |
- header); | |
+ host_gicc = | |
+ container_of(header, struct acpi_madt_generic_interrupt, header); | |
size = sizeof(struct acpi_madt_generic_interrupt); | |
/* Add Generic Interrupt */ | |
for ( i = 0; i < d->max_vcpus; i++ ) | |
@@ -1157,13 +1167,12 @@ static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset) | |
return table_len; | |
} | |
-static int __init | |
-gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
static int cpu_base_assigned = 0; | |
struct acpi_madt_generic_interrupt *processor = | |
- container_of(header, struct acpi_madt_generic_interrupt, header); | |
+ container_of(header, struct acpi_madt_generic_interrupt, header); | |
if ( BAD_MADT_ENTRY(processor, end) ) | |
return -EINVAL; | |
@@ -1186,10 +1195,10 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, | |
} | |
else | |
{ | |
- if ( cbase != processor->base_address | |
- || hbase != processor->gich_base_address | |
- || vbase != processor->gicv_base_address | |
- || gicv2_info.maintenance_irq != processor->vgic_interrupt ) | |
+ if ( cbase != processor->base_address || | |
+ hbase != processor->gich_base_address || | |
+ vbase != processor->gicv_base_address || | |
+ gicv2_info.maintenance_irq != processor->vgic_interrupt ) | |
{ | |
printk("GICv2: GICC entries are not same in MADT table\n"); | |
return -EINVAL; | |
@@ -1199,12 +1208,11 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, | |
return 0; | |
} | |
-static int __init | |
-gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init gic_acpi_parse_madt_distributor( | |
+ struct acpi_subtable_header *header, const unsigned long end) | |
{ | |
struct acpi_madt_generic_distributor *dist = | |
- container_of(header, struct acpi_madt_generic_distributor, header); | |
+ container_of(header, struct acpi_madt_generic_distributor, header); | |
if ( BAD_MADT_ENTRY(dist, end) ) | |
return -EINVAL; | |
@@ -1247,7 +1255,9 @@ static void __init gicv2_acpi_init(void) | |
panic("GICv2: No valid GICD entries exists\n"); | |
} | |
#else | |
-static void __init gicv2_acpi_init(void) { } | |
+static void __init gicv2_acpi_init(void) | |
+{ | |
+} | |
static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset) | |
{ | |
return 0; | |
@@ -1264,13 +1274,12 @@ static int __init gicv2_init(void) | |
gicv2_acpi_init(); | |
printk("GICv2 initialization:\n" | |
- " gic_dist_addr=%"PRIpaddr"\n" | |
- " gic_cpu_addr=%"PRIpaddr"\n" | |
- " gic_hyp_addr=%"PRIpaddr"\n" | |
- " gic_vcpu_addr=%"PRIpaddr"\n" | |
- " gic_maintenance_irq=%u\n", | |
- dbase, cbase, hbase, vbase, | |
- gicv2_info.maintenance_irq); | |
+ " gic_dist_addr=%" PRIpaddr "\n" | |
+ " gic_cpu_addr=%" PRIpaddr "\n" | |
+ " gic_hyp_addr=%" PRIpaddr "\n" | |
+ " gic_vcpu_addr=%" PRIpaddr "\n" | |
+ " gic_maintenance_irq=%u\n", | |
+ dbase, cbase, hbase, vbase, gicv2_info.maintenance_irq); | |
if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) || | |
(hbase & ~PAGE_MASK) || (vbase & ~PAGE_MASK) ) | |
@@ -1298,11 +1307,12 @@ static int __init gicv2_init(void) | |
gicv2.map_cbase += aliased_offset; | |
printk(XENLOG_WARNING | |
- "GICv2: Adjusting CPU interface base to %#"PRIx64"\n", | |
+ "GICv2: Adjusting CPU interface base to %#" PRIx64 "\n", | |
cbase + aliased_offset); | |
- } else if ( csize == SZ_128K ) | |
- printk(XENLOG_WARNING | |
- "GICv2: GICC size=%#"PRIx64" but not aliased\n", | |
+ } | |
+ else if ( csize == SZ_128K ) | |
+ printk(XENLOG_WARNING "GICv2: GICC size=%#" PRIx64 | |
+ " but not aliased\n", | |
csize); | |
gicv2.map_hbase = ioremap_nocache(hbase, PAGE_SIZE); | |
@@ -1331,37 +1341,37 @@ static void gicv2_do_LPI(unsigned int lpi) | |
} | |
const static struct gic_hw_operations gicv2_ops = { | |
- .info = &gicv2_info, | |
- .init = gicv2_init, | |
- .secondary_init = gicv2_secondary_cpu_init, | |
- .save_state = gicv2_save_state, | |
- .restore_state = gicv2_restore_state, | |
- .dump_state = gicv2_dump_state, | |
- .gic_host_irq_type = &gicv2_host_irq_type, | |
- .gic_guest_irq_type = &gicv2_guest_irq_type, | |
- .eoi_irq = gicv2_eoi_irq, | |
- .deactivate_irq = gicv2_dir_irq, | |
- .read_irq = gicv2_read_irq, | |
- .set_active_state = gicv2_set_active_state, | |
- .set_pending_state = gicv2_set_pending_state, | |
- .set_irq_type = gicv2_set_irq_type, | |
- .set_irq_priority = gicv2_set_irq_priority, | |
- .send_SGI = gicv2_send_SGI, | |
- .disable_interface = gicv2_disable_interface, | |
- .update_lr = gicv2_update_lr, | |
- .update_hcr_status = gicv2_hcr_status, | |
- .clear_lr = gicv2_clear_lr, | |
- .read_lr = gicv2_read_lr, | |
- .write_lr = gicv2_write_lr, | |
- .read_vmcr_priority = gicv2_read_vmcr_priority, | |
- .read_apr = gicv2_read_apr, | |
- .read_pending_state = gicv2_read_pending_state, | |
- .make_hwdom_dt_node = gicv2_make_hwdom_dt_node, | |
- .make_hwdom_madt = gicv2_make_hwdom_madt, | |
+ .info = &gicv2_info, | |
+ .init = gicv2_init, | |
+ .secondary_init = gicv2_secondary_cpu_init, | |
+ .save_state = gicv2_save_state, | |
+ .restore_state = gicv2_restore_state, | |
+ .dump_state = gicv2_dump_state, | |
+ .gic_host_irq_type = &gicv2_host_irq_type, | |
+ .gic_guest_irq_type = &gicv2_guest_irq_type, | |
+ .eoi_irq = gicv2_eoi_irq, | |
+ .deactivate_irq = gicv2_dir_irq, | |
+ .read_irq = gicv2_read_irq, | |
+ .set_active_state = gicv2_set_active_state, | |
+ .set_pending_state = gicv2_set_pending_state, | |
+ .set_irq_type = gicv2_set_irq_type, | |
+ .set_irq_priority = gicv2_set_irq_priority, | |
+ .send_SGI = gicv2_send_SGI, | |
+ .disable_interface = gicv2_disable_interface, | |
+ .update_lr = gicv2_update_lr, | |
+ .update_hcr_status = gicv2_hcr_status, | |
+ .clear_lr = gicv2_clear_lr, | |
+ .read_lr = gicv2_read_lr, | |
+ .write_lr = gicv2_write_lr, | |
+ .read_vmcr_priority = gicv2_read_vmcr_priority, | |
+ .read_apr = gicv2_read_apr, | |
+ .read_pending_state = gicv2_read_pending_state, | |
+ .make_hwdom_dt_node = gicv2_make_hwdom_dt_node, | |
+ .make_hwdom_madt = gicv2_make_hwdom_madt, | |
.get_hwdom_extra_madt_size = gicv2_get_hwdom_extra_madt_size, | |
.map_hwdom_extra_mappings = gicv2_map_hwdown_extra_mappings, | |
- .iomem_deny_access = gicv2_iomem_deny_access, | |
- .do_LPI = gicv2_do_LPI, | |
+ .iomem_deny_access = gicv2_iomem_deny_access, | |
+ .do_LPI = gicv2_do_LPI, | |
}; | |
/* Set up the GIC */ | |
@@ -1376,20 +1386,18 @@ static int __init gicv2_dt_preinit(struct dt_device_node *node, | |
return 0; | |
} | |
-static const struct dt_device_match gicv2_dt_match[] __initconst = | |
-{ | |
+static const struct dt_device_match gicv2_dt_match[] __initconst = { | |
DT_MATCH_GIC_V2, | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
-DT_DEVICE_START(gicv2, "GICv2", DEVICE_GIC) | |
- .dt_match = gicv2_dt_match, | |
- .init = gicv2_dt_preinit, | |
-DT_DEVICE_END | |
+DT_DEVICE_START(gicv2, "GICv2", DEVICE_GIC).dt_match = gicv2_dt_match, | |
+ .init = gicv2_dt_preinit, | |
+ DT_DEVICE_END | |
#ifdef CONFIG_ACPI | |
-/* Set up the GIC */ | |
-static int __init gicv2_acpi_preinit(const void *data) | |
+ /* Set up the GIC */ | |
+ static int __init gicv2_acpi_preinit(const void *data) | |
{ | |
gicv2_info.hw_version = GIC_V2; | |
register_gic_ops(&gicv2_ops); | |
@@ -1397,16 +1405,16 @@ static int __init gicv2_acpi_preinit(const void *data) | |
return 0; | |
} | |
-ACPI_DEVICE_START(agicv2, "GICv2", DEVICE_GIC) | |
- .class_type = ACPI_MADT_GIC_VERSION_V2, | |
- .init = gicv2_acpi_preinit, | |
-ACPI_DEVICE_END | |
+ACPI_DEVICE_START(agicv2, "GICv2", DEVICE_GIC).class_type = | |
+ ACPI_MADT_GIC_VERSION_V2, | |
+ .init = gicv2_acpi_preinit, | |
+ ACPI_DEVICE_END | |
#endif | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c | |
index 9558bad96a..09810d65c8 100644 | |
--- a/xen/arch/arm/gic-v3-its.c | |
+++ b/xen/arch/arm/gic-v3-its.c | |
@@ -33,7 +33,7 @@ | |
#include <asm/io.h> | |
#include <asm/page.h> | |
-#define ITS_CMD_QUEUE_SZ SZ_1M | |
+#define ITS_CMD_QUEUE_SZ SZ_1M | |
/* | |
* No lock here, as this list gets only populated upon boot while scanning | |
@@ -49,16 +49,17 @@ LIST_HEAD(host_its_list); | |
* property of MSIs in general and we can easily get to the base address | |
* of the ITS and look that up. | |
*/ | |
-struct its_device { | |
+struct its_device | |
+{ | |
struct rb_node rbnode; | |
struct host_its *hw_its; | |
void *itt_addr; | |
- paddr_t guest_doorbell; /* Identifies the virtual ITS */ | |
+ paddr_t guest_doorbell; /* Identifies the virtual ITS */ | |
uint32_t host_devid; | |
uint32_t guest_devid; | |
- uint32_t eventids; /* Number of event IDs (MSIs) */ | |
- uint32_t *host_lpi_blocks; /* Which LPIs are used on the host */ | |
- struct pending_irq *pend_irqs; /* One struct per event */ | |
+ uint32_t eventids; /* Number of event IDs (MSIs) */ | |
+ uint32_t *host_lpi_blocks; /* Which LPIs are used on the host */ | |
+ struct pending_irq *pend_irqs; /* One struct per event */ | |
}; | |
bool gicv3_its_host_has_its(void) | |
@@ -66,7 +67,7 @@ bool gicv3_its_host_has_its(void) | |
return !list_empty(&host_its_list); | |
} | |
-#define BUFPTR_MASK GENMASK(19, 5) | |
+#define BUFPTR_MASK GENMASK(19, 5) | |
static int its_send_command(struct host_its *hw_its, const void *its_cmd) | |
{ | |
/* | |
@@ -182,9 +183,8 @@ static int its_send_cmd_sync(struct host_its *its, unsigned int cpu) | |
return its_send_command(its, cmd); | |
} | |
-static int its_send_cmd_mapti(struct host_its *its, | |
- uint32_t deviceid, uint32_t eventid, | |
- uint32_t pintid, uint16_t icid) | |
+static int its_send_cmd_mapti(struct host_its *its, uint32_t deviceid, | |
+ uint32_t eventid, uint32_t pintid, uint16_t icid) | |
{ | |
uint64_t cmd[4]; | |
@@ -234,8 +234,8 @@ static int its_send_cmd_mapd(struct host_its *its, uint32_t deviceid, | |
return its_send_command(its, cmd); | |
} | |
-static int its_send_cmd_inv(struct host_its *its, | |
- uint32_t deviceid, uint32_t eventid) | |
+static int its_send_cmd_inv(struct host_its *its, uint32_t deviceid, | |
+ uint32_t eventid) | |
{ | |
uint64_t cmd[4]; | |
@@ -253,7 +253,7 @@ int gicv3_its_setup_collection(unsigned int cpu) | |
struct host_its *its; | |
int ret; | |
- list_for_each_entry(its, &host_its_list, entry) | |
+ list_for_each_entry (its, &host_its_list, entry) | |
{ | |
ret = its_send_cmd_mapc(its, cpu, cpu); | |
if ( ret ) | |
@@ -271,11 +271,11 @@ int gicv3_its_setup_collection(unsigned int cpu) | |
return 0; | |
} | |
-#define BASER_ATTR_MASK \ | |
- ((0x3UL << GITS_BASER_SHAREABILITY_SHIFT) | \ | |
- (0x7UL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) | \ | |
- (0x7UL << GITS_BASER_INNER_CACHEABILITY_SHIFT)) | |
-#define BASER_RO_MASK (GENMASK(58, 56) | GENMASK(52, 48)) | |
+#define BASER_ATTR_MASK \ | |
+ ((0x3UL << GITS_BASER_SHAREABILITY_SHIFT) | \ | |
+ (0x7UL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) | \ | |
+ (0x7UL << GITS_BASER_INNER_CACHEABILITY_SHIFT)) | |
+#define BASER_RO_MASK (GENMASK(58, 56) | GENMASK(52, 48)) | |
/* Check that the physical address can be encoded in the PROPBASER register. */ | |
static bool check_baser_phys_addr(void *vaddr, unsigned int page_bits) | |
@@ -302,7 +302,7 @@ static void *its_map_cbaser(struct host_its *its) | |
uint64_t reg; | |
void *buffer; | |
- reg = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; | |
+ reg = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; | |
reg |= GIC_BASER_CACHE_SameAsInner << GITS_BASER_OUTER_CACHEABILITY_SHIFT; | |
reg |= GIC_BASER_CACHE_RaWaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT; | |
@@ -342,18 +342,18 @@ static void *its_map_cbaser(struct host_its *its) | |
} | |
/* The ITS BASE registers work with page sizes of 4K, 16K or 64K. */ | |
-#define BASER_PAGE_BITS(sz) ((sz) * 2 + 12) | |
+#define BASER_PAGE_BITS(sz) ((sz)*2 + 12) | |
static int its_map_baser(void __iomem *basereg, uint64_t regc, | |
unsigned int nr_items) | |
{ | |
uint64_t attr, reg; | |
unsigned int entry_size = GITS_BASER_ENTRY_SIZE(regc); | |
- unsigned int pagesz = 2; /* try 64K pages first, then go down. */ | |
+ unsigned int pagesz = 2; /* try 64K pages first, then go down. */ | |
unsigned int table_size; | |
void *buffer; | |
- attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; | |
+ attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; | |
attr |= GIC_BASER_CACHE_SameAsInner << GITS_BASER_OUTER_CACHEABILITY_SHIFT; | |
attr |= GIC_BASER_CACHE_RaWaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT; | |
@@ -363,8 +363,8 @@ static int its_map_baser(void __iomem *basereg, uint64_t regc, | |
* attributes), retrying if necessary. | |
*/ | |
retry: | |
- table_size = ROUNDUP(nr_items * entry_size, | |
- BIT(BASER_PAGE_BITS(pagesz), UL)); | |
+ table_size = | |
+ ROUNDUP(nr_items * entry_size, BIT(BASER_PAGE_BITS(pagesz), UL)); | |
/* The BASE registers support at most 256 pages. */ | |
table_size = min(table_size, 256U << BASER_PAGE_BITS(pagesz)); | |
@@ -378,13 +378,13 @@ retry: | |
return -ERANGE; | |
} | |
- reg = attr; | |
+ reg = attr; | |
reg |= (pagesz << GITS_BASER_PAGE_SIZE_SHIFT); | |
reg |= (table_size >> BASER_PAGE_BITS(pagesz)) - 1; | |
reg |= regc & BASER_RO_MASK; | |
reg |= GITS_VALID_BIT; | |
- reg |= encode_baser_phys_addr(virt_to_maddr(buffer), | |
- BASER_PAGE_BITS(pagesz)); | |
+ reg |= | |
+ encode_baser_phys_addr(virt_to_maddr(buffer), BASER_PAGE_BITS(pagesz)); | |
writeq_relaxed(reg, basereg); | |
regc = readq_relaxed(basereg); | |
@@ -479,7 +479,7 @@ static int gicv3_its_init_single_its(struct host_its *hw_its) | |
reg = readq_relaxed(basereg); | |
type = (reg & GITS_BASER_TYPE_MASK) >> GITS_BASER_TYPE_SHIFT; | |
- switch ( type ) | |
+ switch (type) | |
{ | |
case GITS_BASER_TYPE_NONE: | |
continue; | |
@@ -553,7 +553,7 @@ static struct host_its *gicv3_its_find_by_doorbell(paddr_t doorbell_address) | |
{ | |
struct host_its *hw_its; | |
- list_for_each_entry(hw_its, &host_its_list, entry) | |
+ list_for_each_entry (hw_its, &host_its_list, entry) | |
{ | |
if ( hw_its->addr + ITS_DOORBELL_OFFSET == doorbell_address ) | |
return hw_its; | |
@@ -562,8 +562,8 @@ static struct host_its *gicv3_its_find_by_doorbell(paddr_t doorbell_address) | |
return NULL; | |
} | |
-static int compare_its_guest_devices(struct its_device *dev, | |
- paddr_t vdoorbell, uint32_t vdevid) | |
+static int compare_its_guest_devices(struct its_device *dev, paddr_t vdoorbell, | |
+ uint32_t vdevid) | |
{ | |
if ( dev->guest_doorbell < vdoorbell ) | |
return -1; | |
@@ -585,9 +585,9 @@ static int compare_its_guest_devices(struct its_device *dev, | |
* The mapping connects a device @devid and event @eventid pair to LPI @lpi, | |
* increasing both @eventid and @lpi to cover the number of requested LPIs. | |
*/ | |
-static int gicv3_its_map_host_events(struct host_its *its, | |
- uint32_t devid, uint32_t eventid, | |
- uint32_t lpi, uint32_t nr_events) | |
+static int gicv3_its_map_host_events(struct host_its *its, uint32_t devid, | |
+ uint32_t eventid, uint32_t lpi, | |
+ uint32_t nr_events) | |
{ | |
uint32_t i; | |
int ret; | |
@@ -620,16 +620,16 @@ static int gicv3_its_map_host_events(struct host_its *its, | |
* This does not check if this particular hardware device is already mapped | |
* at another domain, it is expected that this would be done by the caller. | |
*/ | |
-int gicv3_its_map_guest_device(struct domain *d, | |
- paddr_t host_doorbell, uint32_t host_devid, | |
- paddr_t guest_doorbell, uint32_t guest_devid, | |
- uint64_t nr_events, bool valid) | |
+int gicv3_its_map_guest_device(struct domain *d, paddr_t host_doorbell, | |
+ uint32_t host_devid, paddr_t guest_doorbell, | |
+ uint32_t guest_devid, uint64_t nr_events, | |
+ bool valid) | |
{ | |
void *itt_addr = NULL; | |
struct host_its *hw_its; | |
struct its_device *dev = NULL; | |
struct rb_node **new = &d->arch.vgic.its_devices.rb_node, *parent = NULL; | |
- int i, ret = -ENOENT; /* "i" must be signed to check for >= 0 below. */ | |
+ int i, ret = -ENOENT; /* "i" must be signed to check for >= 0 below. */ | |
hw_its = gicv3_its_find_by_doorbell(host_doorbell); | |
if ( !hw_its ) | |
@@ -672,8 +672,10 @@ int gicv3_its_map_guest_device(struct domain *d, | |
if ( valid ) | |
{ | |
- printk(XENLOG_G_WARNING "d%d tried to remap guest ITS device 0x%x to host device 0x%x\n", | |
- d->domain_id, guest_devid, host_devid); | |
+ printk( | |
+ XENLOG_G_WARNING | |
+ "d%d tried to remap guest ITS device 0x%x to host device 0x%x\n", | |
+ d->domain_id, guest_devid, host_devid); | |
return -EBUSY; | |
} | |
@@ -798,7 +800,7 @@ static struct its_device *get_its_device(struct domain *d, paddr_t vdoorbell, | |
ASSERT(spin_is_locked(&d->arch.vgic.its_devices_lock)); | |
- while (node) | |
+ while ( node ) | |
{ | |
int cmp; | |
@@ -817,11 +819,9 @@ static struct its_device *get_its_device(struct domain *d, paddr_t vdoorbell, | |
return NULL; | |
} | |
-static struct pending_irq *get_event_pending_irq(struct domain *d, | |
- paddr_t vdoorbell_address, | |
- uint32_t vdevid, | |
- uint32_t eventid, | |
- uint32_t *host_lpi) | |
+static struct pending_irq * | |
+get_event_pending_irq(struct domain *d, paddr_t vdoorbell_address, | |
+ uint32_t vdevid, uint32_t eventid, uint32_t *host_lpi) | |
{ | |
struct its_device *dev; | |
struct pending_irq *pirq = NULL; | |
@@ -898,7 +898,7 @@ int gicv3_its_deny_access(const struct domain *d) | |
unsigned long mfn, nr; | |
const struct host_its *its_data; | |
- list_for_each_entry( its_data, &host_its_list, entry ) | |
+ list_for_each_entry (its_data, &host_its_list, entry) | |
{ | |
mfn = paddr_to_pfn(its_data->addr); | |
nr = PFN_UP(its_data->size); | |
@@ -919,8 +919,7 @@ int gicv3_its_deny_access(const struct domain *d) | |
* as the host. | |
*/ | |
int gicv3_its_make_hwdom_dt_nodes(const struct domain *d, | |
- const struct dt_device_node *gic, | |
- void *fdt) | |
+ const struct dt_device_node *gic, void *fdt) | |
{ | |
uint32_t len; | |
int res; | |
@@ -943,7 +942,7 @@ int gicv3_its_make_hwdom_dt_nodes(const struct domain *d, | |
if ( res ) | |
return res; | |
- list_for_each_entry(its_data, &host_its_list, entry) | |
+ list_for_each_entry (its_data, &host_its_list, entry) | |
{ | |
its = its_data->dt_node; | |
@@ -1059,8 +1058,8 @@ unsigned long gicv3_its_make_hwdom_madt(const struct domain *d, void *base_ptr) | |
for ( i = 0; i < vgic_v3_its_count(d); i++ ) | |
{ | |
- fw_its = acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, | |
- i); | |
+ fw_its = | |
+ acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, i); | |
memcpy(hwdom_its, fw_its, sizeof(struct acpi_madt_generic_translator)); | |
hwdom_its++; | |
} | |
@@ -1086,7 +1085,7 @@ int gicv3_its_init(void) | |
else | |
gicv3_its_acpi_init(); | |
- list_for_each_entry(hw_its, &host_its_list, entry) | |
+ list_for_each_entry (hw_its, &host_its_list, entry) | |
{ | |
ret = gicv3_its_init_single_its(hw_its); | |
if ( ret ) | |
@@ -1096,7 +1095,6 @@ int gicv3_its_init(void) | |
return 0; | |
} | |
- | |
/* | |
* Local variables: | |
* mode: C | |
diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c | |
index 78b9521b21..eaf4663491 100644 | |
--- a/xen/arch/arm/gic-v3-lpi.c | |
+++ b/xen/arch/arm/gic-v3-lpi.c | |
@@ -45,17 +45,19 @@ | |
*/ | |
union host_lpi { | |
uint64_t data; | |
- struct { | |
+ struct | |
+ { | |
uint32_t virt_lpi; | |
uint16_t dom_id; | |
uint16_t pad; | |
}; | |
}; | |
-#define LPI_PROPTABLE_NEEDS_FLUSHING (1U << 0) | |
+#define LPI_PROPTABLE_NEEDS_FLUSHING (1U << 0) | |
/* Global state */ | |
-static struct { | |
+static struct | |
+{ | |
/* The global LPI property table, shared by all redistributors. */ | |
uint8_t *lpi_property; | |
/* | |
@@ -79,16 +81,17 @@ static struct { | |
unsigned int flags; | |
} lpi_data; | |
-struct lpi_redist_data { | |
- paddr_t redist_addr; | |
- unsigned int redist_id; | |
- void *pending_table; | |
+struct lpi_redist_data | |
+{ | |
+ paddr_t redist_addr; | |
+ unsigned int redist_id; | |
+ void *pending_table; | |
}; | |
static DEFINE_PER_CPU(struct lpi_redist_data, lpi_redist); | |
-#define MAX_NR_HOST_LPIS (lpi_data.max_host_lpi_ids - LPI_OFFSET) | |
-#define HOST_LPIS_PER_PAGE (PAGE_SIZE / sizeof(union host_lpi)) | |
+#define MAX_NR_HOST_LPIS (lpi_data.max_host_lpi_ids - LPI_OFFSET) | |
+#define HOST_LPIS_PER_PAGE (PAGE_SIZE / sizeof(union host_lpi)) | |
static union host_lpi *gic_get_host_lpi(uint32_t plpi) | |
{ | |
@@ -152,7 +155,7 @@ void vgic_vcpu_inject_lpi(struct domain *d, unsigned int virq) | |
vcpu_id = ACCESS_ONCE(p->lpi_vcpu_id); | |
if ( vcpu_id >= d->max_vcpus ) | |
- return; | |
+ return; | |
vgic_inject_irq(d, d->vcpu[vcpu_id], virq, true); | |
} | |
@@ -225,7 +228,8 @@ void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id, | |
host_lpi -= LPI_OFFSET; | |
- hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE][host_lpi % HOST_LPIS_PER_PAGE]; | |
+ hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE] | |
+ [host_lpi % HOST_LPIS_PER_PAGE]; | |
hlpi.virt_lpi = virt_lpi; | |
hlpi.dom_id = domain_id; | |
@@ -241,8 +245,9 @@ static int gicv3_lpi_allocate_pendtable(uint64_t *reg) | |
if ( this_cpu(lpi_redist).pending_table ) | |
return -EBUSY; | |
- val = GIC_BASER_CACHE_RaWaWb << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT; | |
- val |= GIC_BASER_CACHE_SameAsInner << GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT; | |
+ val = GIC_BASER_CACHE_RaWaWb << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT; | |
+ val |= GIC_BASER_CACHE_SameAsInner | |
+ << GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT; | |
val |= GIC_BASER_InnerShareable << GICR_PENDBASER_SHAREABILITY_SHIFT; | |
/* | |
@@ -279,12 +284,13 @@ static int gicv3_lpi_allocate_pendtable(uint64_t *reg) | |
* Tell a redistributor about the (shared) property table, allocating one | |
* if not already done. | |
*/ | |
-static int gicv3_lpi_set_proptable(void __iomem * rdist_base) | |
+static int gicv3_lpi_set_proptable(void __iomem *rdist_base) | |
{ | |
uint64_t reg; | |
- reg = GIC_BASER_CACHE_RaWaWb << GICR_PROPBASER_INNER_CACHEABILITY_SHIFT; | |
- reg |= GIC_BASER_CACHE_SameAsInner << GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT; | |
+ reg = GIC_BASER_CACHE_RaWaWb << GICR_PROPBASER_INNER_CACHEABILITY_SHIFT; | |
+ reg |= GIC_BASER_CACHE_SameAsInner | |
+ << GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT; | |
reg |= GIC_BASER_InnerShareable << GICR_PROPBASER_SHAREABILITY_SHIFT; | |
/* | |
@@ -336,7 +342,7 @@ static int gicv3_lpi_set_proptable(void __iomem * rdist_base) | |
return 0; | |
} | |
-int gicv3_lpi_init_rdist(void __iomem * rdist_base) | |
+int gicv3_lpi_init_rdist(void __iomem *rdist_base) | |
{ | |
uint32_t reg; | |
uint64_t table_reg; | |
@@ -361,7 +367,8 @@ int gicv3_lpi_init_rdist(void __iomem * rdist_base) | |
if ( !(table_reg & GICR_PENDBASER_SHAREABILITY_MASK) ) | |
{ | |
table_reg &= ~GICR_PENDBASER_INNER_CACHEABILITY_MASK; | |
- table_reg |= GIC_BASER_CACHE_nC << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT; | |
+ table_reg |= GIC_BASER_CACHE_nC | |
+ << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT; | |
writeq_relaxed(table_reg, rdist_base + GICR_PENDBASER); | |
} | |
@@ -389,7 +396,9 @@ int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits) | |
* Tell the user about it, the actual number is reported below. | |
*/ | |
if ( max_lpi_bits < 14 || max_lpi_bits > 32 ) | |
- printk(XENLOG_WARNING "WARNING: max_lpi_bits must be between 14 and 32, adjusting.\n"); | |
+ printk( | |
+ XENLOG_WARNING | |
+ "WARNING: max_lpi_bits must be between 14 and 32, adjusting.\n"); | |
max_lpi_bits = max(max_lpi_bits, 14U); | |
lpi_data.max_host_lpi_ids = BIT(min(host_lpi_bits, max_lpi_bits), UL); | |
@@ -400,7 +409,8 @@ int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits) | |
* It's very unlikely that we need more than 24 bits worth of LPIs. | |
*/ | |
if ( lpi_data.max_host_lpi_ids > BIT(24, UL) ) | |
- warning_add("Using high number of LPIs, limit memory usage with max_lpi_bits\n"); | |
+ warning_add( | |
+ "Using high number of LPIs, limit memory usage with max_lpi_bits\n"); | |
spin_lock_init(&lpi_data.host_lpis_lock); | |
lpi_data.next_free_lpi = 0; | |
@@ -422,8 +432,7 @@ static int find_unused_host_lpi(uint32_t start, uint32_t *index) | |
ASSERT(spin_is_locked(&lpi_data.host_lpis_lock)); | |
- for ( chunk = start; | |
- chunk < MAX_NR_HOST_LPIS / HOST_LPIS_PER_PAGE; | |
+ for ( chunk = start; chunk < MAX_NR_HOST_LPIS / HOST_LPIS_PER_PAGE; | |
chunk++ ) | |
{ | |
/* If we hit an unallocated chunk, use entry 0 in that one. */ | |
@@ -464,7 +473,7 @@ int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi) | |
chunk = find_unused_host_lpi(lpi_data.next_free_lpi / HOST_LPIS_PER_PAGE, | |
&lpi_idx); | |
- if ( chunk == - 1 ) /* rescan for a hole from the beginning */ | |
+ if ( chunk == -1 ) /* rescan for a hole from the beginning */ | |
{ | |
lpi_idx = 0; | |
chunk = find_unused_host_lpi(0, &lpi_idx); | |
@@ -543,7 +552,7 @@ int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi) | |
void gicv3_free_host_lpi_block(uint32_t first_lpi) | |
{ | |
- union host_lpi *hlpi, empty_lpi = { .dom_id = DOMID_INVALID }; | |
+ union host_lpi *hlpi, empty_lpi = {.dom_id = DOMID_INVALID}; | |
int i; | |
/* This should only be called with the beginning of a block. */ | |
@@ -551,7 +560,7 @@ void gicv3_free_host_lpi_block(uint32_t first_lpi) | |
hlpi = gic_get_host_lpi(first_lpi); | |
if ( !hlpi ) | |
- return; /* Nothing to free here. */ | |
+ return; /* Nothing to free here. */ | |
spin_lock(&lpi_data.host_lpis_lock); | |
diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c | |
index 0f6cbf6224..6a5d657c63 100644 | |
--- a/xen/arch/arm/gic-v3.c | |
+++ b/xen/arch/arm/gic-v3.c | |
@@ -45,10 +45,11 @@ | |
#include <asm/sysregs.h> | |
/* Global state */ | |
-static struct { | |
- void __iomem *map_dbase; /* Mapped address of distributor registers */ | |
+static struct | |
+{ | |
+ void __iomem *map_dbase; /* Mapped address of distributor registers */ | |
struct rdist_region *rdist_regions; | |
- uint32_t rdist_stride; | |
+ uint32_t rdist_stride; | |
unsigned int rdist_count; /* Number of rdist regions count */ | |
unsigned int nr_priorities; | |
spinlock_t lock; | |
@@ -57,11 +58,11 @@ static struct { | |
static struct gic_info gicv3_info; | |
/* per-cpu re-distributor base */ | |
-static DEFINE_PER_CPU(void __iomem*, rbase); | |
+static DEFINE_PER_CPU(void __iomem *, rbase); | |
-#define GICD (gicv3.map_dbase) | |
-#define GICD_RDIST_BASE (this_cpu(rbase)) | |
-#define GICD_RDIST_SGI_BASE (GICD_RDIST_BASE + SZ_64K) | |
+#define GICD (gicv3.map_dbase) | |
+#define GICD_RDIST_BASE (this_cpu(rbase)) | |
+#define GICD_RDIST_SGI_BASE (GICD_RDIST_BASE + SZ_64K) | |
/* | |
* Saves all 16(Max) LR registers. Though number of LRs implemented | |
@@ -70,7 +71,7 @@ static DEFINE_PER_CPU(void __iomem*, rbase); | |
static inline void gicv3_save_lrs(struct vcpu *v) | |
{ | |
/* Fall through for all the cases */ | |
- switch ( gicv3_info.nr_lrs ) | |
+ switch (gicv3_info.nr_lrs) | |
{ | |
case 16: | |
v->arch.gic.v3.lr[15] = READ_SYSREG(ICH_LR15_EL2); | |
@@ -103,10 +104,10 @@ static inline void gicv3_save_lrs(struct vcpu *v) | |
case 2: | |
v->arch.gic.v3.lr[1] = READ_SYSREG(ICH_LR1_EL2); | |
case 1: | |
- v->arch.gic.v3.lr[0] = READ_SYSREG(ICH_LR0_EL2); | |
- break; | |
+ v->arch.gic.v3.lr[0] = READ_SYSREG(ICH_LR0_EL2); | |
+ break; | |
default: | |
- BUG(); | |
+ BUG(); | |
} | |
} | |
@@ -117,7 +118,7 @@ static inline void gicv3_save_lrs(struct vcpu *v) | |
static inline void gicv3_restore_lrs(const struct vcpu *v) | |
{ | |
/* Fall through for all the cases */ | |
- switch ( gicv3_info.nr_lrs ) | |
+ switch (gicv3_info.nr_lrs) | |
{ | |
case 16: | |
WRITE_SYSREG(v->arch.gic.v3.lr[15], ICH_LR15_EL2); | |
@@ -153,30 +154,46 @@ static inline void gicv3_restore_lrs(const struct vcpu *v) | |
WRITE_SYSREG(v->arch.gic.v3.lr[0], ICH_LR0_EL2); | |
break; | |
default: | |
- BUG(); | |
+ BUG(); | |
} | |
} | |
static uint64_t gicv3_ich_read_lr(int lr) | |
{ | |
- switch ( lr ) | |
+ switch (lr) | |
{ | |
- case 0: return READ_SYSREG(ICH_LR0_EL2); | |
- case 1: return READ_SYSREG(ICH_LR1_EL2); | |
- case 2: return READ_SYSREG(ICH_LR2_EL2); | |
- case 3: return READ_SYSREG(ICH_LR3_EL2); | |
- case 4: return READ_SYSREG(ICH_LR4_EL2); | |
- case 5: return READ_SYSREG(ICH_LR5_EL2); | |
- case 6: return READ_SYSREG(ICH_LR6_EL2); | |
- case 7: return READ_SYSREG(ICH_LR7_EL2); | |
- case 8: return READ_SYSREG(ICH_LR8_EL2); | |
- case 9: return READ_SYSREG(ICH_LR9_EL2); | |
- case 10: return READ_SYSREG(ICH_LR10_EL2); | |
- case 11: return READ_SYSREG(ICH_LR11_EL2); | |
- case 12: return READ_SYSREG(ICH_LR12_EL2); | |
- case 13: return READ_SYSREG(ICH_LR13_EL2); | |
- case 14: return READ_SYSREG(ICH_LR14_EL2); | |
- case 15: return READ_SYSREG(ICH_LR15_EL2); | |
+ case 0: | |
+ return READ_SYSREG(ICH_LR0_EL2); | |
+ case 1: | |
+ return READ_SYSREG(ICH_LR1_EL2); | |
+ case 2: | |
+ return READ_SYSREG(ICH_LR2_EL2); | |
+ case 3: | |
+ return READ_SYSREG(ICH_LR3_EL2); | |
+ case 4: | |
+ return READ_SYSREG(ICH_LR4_EL2); | |
+ case 5: | |
+ return READ_SYSREG(ICH_LR5_EL2); | |
+ case 6: | |
+ return READ_SYSREG(ICH_LR6_EL2); | |
+ case 7: | |
+ return READ_SYSREG(ICH_LR7_EL2); | |
+ case 8: | |
+ return READ_SYSREG(ICH_LR8_EL2); | |
+ case 9: | |
+ return READ_SYSREG(ICH_LR9_EL2); | |
+ case 10: | |
+ return READ_SYSREG(ICH_LR10_EL2); | |
+ case 11: | |
+ return READ_SYSREG(ICH_LR11_EL2); | |
+ case 12: | |
+ return READ_SYSREG(ICH_LR12_EL2); | |
+ case 13: | |
+ return READ_SYSREG(ICH_LR13_EL2); | |
+ case 14: | |
+ return READ_SYSREG(ICH_LR14_EL2); | |
+ case 15: | |
+ return READ_SYSREG(ICH_LR15_EL2); | |
default: | |
BUG(); | |
} | |
@@ -184,7 +201,7 @@ static uint64_t gicv3_ich_read_lr(int lr) | |
static void gicv3_ich_write_lr(int lr, uint64_t val) | |
{ | |
- switch ( lr ) | |
+ switch (lr) | |
{ | |
case 0: | |
WRITE_SYSREG(val, ICH_LR0_EL2); | |
@@ -292,9 +309,9 @@ static void gicv3_redist_wait_for_rwp(void) | |
static void gicv3_wait_for_rwp(int irq) | |
{ | |
if ( irq < NR_LOCAL_IRQS ) | |
- gicv3_redist_wait_for_rwp(); | |
+ gicv3_redist_wait_for_rwp(); | |
else | |
- gicv3_dist_wait_for_rwp(); | |
+ gicv3_dist_wait_for_rwp(); | |
} | |
static unsigned int gicv3_get_cpu_from_mask(const cpumask_t *cpumask) | |
@@ -312,7 +329,7 @@ static void restore_aprn_regs(const union gic_state_data *d) | |
{ | |
/* Write APRn register based on number of priorities | |
platform has implemented */ | |
- switch ( gicv3.nr_priorities ) | |
+ switch (gicv3.nr_priorities) | |
{ | |
case 7: | |
WRITE_SYSREG32(d->v3.apr0[2], ICH_AP0R2_EL2); | |
@@ -335,7 +352,7 @@ static void save_aprn_regs(union gic_state_data *d) | |
{ | |
/* Read APRn register based on number of priorities | |
platform has implemented */ | |
- switch ( gicv3.nr_priorities ) | |
+ switch (gicv3.nr_priorities) | |
{ | |
case 7: | |
d->v3.apr0[2] = READ_SYSREG32(ICH_AP0R2_EL2); | |
@@ -360,7 +377,6 @@ static void save_aprn_regs(union gic_state_data *d) | |
*/ | |
static void gicv3_save_state(struct vcpu *v) | |
{ | |
- | |
/* No need for spinlocks here because interrupts are disabled around | |
* this call and it only accesses struct vcpu fields that cannot be | |
* accessed simultaneously by another pCPU. | |
@@ -426,7 +442,8 @@ static void gicv3_dump_state(const struct vcpu *v) | |
} | |
} | |
-static void gicv3_poke_irq(struct irq_desc *irqd, u32 offset, bool wait_for_rwp) | |
+static void gicv3_poke_irq(struct irq_desc *irqd, u32 offset, | |
+ bool wait_for_rwp) | |
{ | |
u32 mask = 1U << (irqd->irq % 32); | |
void __iomem *base; | |
@@ -447,7 +464,7 @@ static bool gicv3_peek_irq(struct irq_desc *irqd, u32 offset) | |
void __iomem *base; | |
unsigned int irq = irqd->irq; | |
- if ( irq >= NR_GIC_LOCAL_IRQS) | |
+ if ( irq >= NR_GIC_LOCAL_IRQS ) | |
base = GICD + (irq / 32) * 4; | |
else | |
base = GICD_RDIST_SGI_BASE; | |
@@ -525,11 +542,11 @@ static void gicv3_set_pending_state(struct irq_desc *irqd, bool pending) | |
static inline uint64_t gicv3_mpidr_to_affinity(int cpu) | |
{ | |
- uint64_t mpidr = cpu_logical_map(cpu); | |
- return (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | | |
- MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | | |
- MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | | |
- MPIDR_AFFINITY_LEVEL(mpidr, 0)); | |
+ uint64_t mpidr = cpu_logical_map(cpu); | |
+ return (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | | |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | | |
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | | |
+ MPIDR_AFFINITY_LEVEL(mpidr, 0)); | |
} | |
static void gicv3_set_irq_type(struct irq_desc *desc, unsigned int type) | |
@@ -543,7 +560,7 @@ static void gicv3_set_irq_type(struct irq_desc *desc, unsigned int type) | |
spin_lock(&gicv3.lock); | |
- if ( irq >= NR_GIC_LOCAL_IRQS) | |
+ if ( irq >= NR_GIC_LOCAL_IRQS ) | |
base = GICD + GICD_ICFGR + (irq / 16) * 4; | |
else | |
base = GICD_RDIST_SGI_BASE + GICR_ICFGR1; | |
@@ -559,17 +576,16 @@ static void gicv3_set_irq_type(struct irq_desc *desc, unsigned int type) | |
writel_relaxed(cfg, base); | |
actual = readl_relaxed(base); | |
- if ( ( cfg & edgebit ) ^ ( actual & edgebit ) ) | |
+ if ( (cfg & edgebit) ^ (actual & edgebit) ) | |
{ | |
- printk(XENLOG_WARNING "GICv3: WARNING: " | |
+ printk(XENLOG_WARNING | |
+ "GICv3: WARNING: " | |
"CPU%d: Failed to configure IRQ%u as %s-triggered. " | |
"H/w forces to %s-triggered.\n", | |
- smp_processor_id(), desc->irq, | |
- cfg & edgebit ? "Edge" : "Level", | |
+ smp_processor_id(), desc->irq, cfg & edgebit ? "Edge" : "Level", | |
actual & edgebit ? "Edge" : "Level"); | |
- desc->arch.type = actual & edgebit ? | |
- IRQ_TYPE_EDGE_RISING : | |
- IRQ_TYPE_LEVEL_HIGH; | |
+ desc->arch.type = | |
+ actual & edgebit ? IRQ_TYPE_EDGE_RISING : IRQ_TYPE_LEVEL_HIGH; | |
} | |
spin_unlock(&gicv3.lock); | |
} | |
@@ -611,8 +627,8 @@ static void __init gicv3_dist_init(void) | |
nr_lines = min(1020U, nr_lines); | |
gicv3_info.nr_lines = nr_lines; | |
- printk("GICv3: %d lines, (IID %8.8x).\n", | |
- nr_lines, readl_relaxed(GICD + GICD_IIDR)); | |
+ printk("GICv3: %d lines, (IID %8.8x).\n", nr_lines, | |
+ readl_relaxed(GICD + GICD_IIDR)); | |
/* Default all global IRQs to level, active low */ | |
for ( i = NR_GIC_LOCAL_IRQS; i < nr_lines; i += 16 ) | |
@@ -621,8 +637,8 @@ static void __init gicv3_dist_init(void) | |
/* Default priority for global interrupts */ | |
for ( i = NR_GIC_LOCAL_IRQS; i < nr_lines; i += 4 ) | |
{ | |
- priority = (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | | |
- GIC_PRI_IRQ << 8 | GIC_PRI_IRQ); | |
+ priority = (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 | | |
+ GIC_PRI_IRQ); | |
writel_relaxed(priority, GICD + GICD_IPRIORITYR + (i / 4) * 4); | |
} | |
@@ -643,8 +659,9 @@ static void __init gicv3_dist_init(void) | |
gicv3_dist_wait_for_rwp(); | |
/* Turn on the distributor */ | |
- writel_relaxed(GICD_CTL_ENABLE | GICD_CTLR_ARE_NS | | |
- GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, GICD + GICD_CTLR); | |
+ writel_relaxed(GICD_CTL_ENABLE | GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | | |
+ GICD_CTLR_ENABLE_G1, | |
+ GICD + GICD_CTLR); | |
/* Route all global IRQs to this CPU */ | |
affinity = gicv3_mpidr_to_affinity(smp_processor_id()); | |
@@ -715,16 +732,16 @@ static int __init gicv3_populate_rdist(void) | |
* If we ever get a cluster of more than 16 CPUs, just scream. | |
*/ | |
if ( (mpidr & 0xff) >= 16 ) | |
- dprintk(XENLOG_WARNING, "GICv3:Cluster with more than 16's cpus\n"); | |
+ dprintk(XENLOG_WARNING, "GICv3:Cluster with more than 16's cpus\n"); | |
/* | |
* Convert affinity to a 32bit value that can be matched to GICR_TYPER | |
* bits [63:32] | |
*/ | |
- aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | | |
- MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | | |
- MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | | |
- MPIDR_AFFINITY_LEVEL(mpidr, 0)); | |
+ aff = | |
+ (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | | |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | | |
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); | |
for ( i = 0; i < gicv3.rdist_count; i++ ) | |
{ | |
@@ -734,7 +751,7 @@ static int __init gicv3_populate_rdist(void) | |
if ( reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4 ) | |
{ | |
dprintk(XENLOG_ERR, | |
- "GICv3: No redistributor present @%"PRIpaddr"\n", | |
+ "GICv3: No redistributor present @%" PRIpaddr "\n", | |
gicv3.rdist_regions[i].base); | |
break; | |
} | |
@@ -753,9 +770,9 @@ static int __init gicv3_populate_rdist(void) | |
int ret; | |
/* | |
- * The ITS refers to redistributors either by their physical | |
- * address or by their ID. Which one to use is an ITS | |
- * choice. So determine those two values here (which we | |
+ * The ITS refers to redistributors either by their | |
+ * physical address or by their ID. Which one to use is an | |
+ * ITS choice. So determine those two values here (which we | |
* can do only here in GICv3 code) and tell the | |
* ITS code about it, so it can use them later to be able | |
* to address those redistributors accordingly. | |
@@ -777,7 +794,7 @@ static int __init gicv3_populate_rdist(void) | |
} | |
printk("GICv3: CPU%d: Found redistributor in region %d @%p\n", | |
- smp_processor_id(), i, ptr); | |
+ smp_processor_id(), i, ptr); | |
return 0; | |
} | |
@@ -796,7 +813,8 @@ static int __init gicv3_populate_rdist(void) | |
} while ( !(typer & GICR_TYPER_LAST) ); | |
} | |
- dprintk(XENLOG_ERR, "GICv3: CPU%d: mpidr 0x%"PRIregister" has no re-distributor!\n", | |
+ dprintk(XENLOG_ERR, | |
+ "GICv3: CPU%d: mpidr 0x%" PRIregister " has no re-distributor!\n", | |
smp_processor_id(), cpu_logical_map(smp_processor_id())); | |
return -ENODEV; | |
@@ -827,15 +845,15 @@ static int gicv3_cpu_init(void) | |
/* Set priority on PPI and SGI interrupts */ | |
priority = (GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 | GIC_PRI_IPI << 8 | | |
GIC_PRI_IPI); | |
- for (i = 0; i < NR_GIC_SGI; i += 4) | |
+ for ( i = 0; i < NR_GIC_SGI; i += 4 ) | |
writel_relaxed(priority, | |
- GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4); | |
+ GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4); | |
priority = (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 | GIC_PRI_IRQ << 8 | | |
GIC_PRI_IRQ); | |
- for (i = NR_GIC_SGI; i < NR_GIC_LOCAL_IRQS; i += 4) | |
+ for ( i = NR_GIC_SGI; i < NR_GIC_LOCAL_IRQS; i += 4 ) | |
writel_relaxed(priority, | |
- GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4); | |
+ GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + (i / 4) * 4); | |
/* | |
* The activate state is unknown at boot, so make sure all | |
@@ -885,9 +903,9 @@ static void gicv3_hyp_init(void) | |
uint32_t vtr; | |
vtr = READ_SYSREG32(ICH_VTR_EL2); | |
- gicv3_info.nr_lrs = (vtr & ICH_VTR_NRLRGS) + 1; | |
- gicv3.nr_priorities = ((vtr >> ICH_VTR_PRIBITS_SHIFT) & | |
- ICH_VTR_PRIBITS_MASK) + 1; | |
+ gicv3_info.nr_lrs = (vtr & ICH_VTR_NRLRGS) + 1; | |
+ gicv3.nr_priorities = | |
+ ((vtr >> ICH_VTR_PRIBITS_SHIFT) & ICH_VTR_PRIBITS_MASK) + 1; | |
if ( !((gicv3.nr_priorities > 4) && (gicv3.nr_priorities < 8)) ) | |
panic("GICv3: Invalid number of priority bits\n"); | |
@@ -949,7 +967,8 @@ static u16 gicv3_compute_target_list(int *base_cpu, const struct cpumask *mask, | |
} | |
mpidr = cpu_logical_map(cpu); | |
- if ( cluster_id != (mpidr & ~MPIDR_AFF0_MASK) ) { | |
+ if ( cluster_id != (mpidr & ~MPIDR_AFF0_MASK) ) | |
+ { | |
cpu--; | |
goto out; | |
} | |
@@ -965,7 +984,7 @@ static void gicv3_send_sgi_list(enum gic_sgi sgi, const cpumask_t *cpumask) | |
int cpu = 0; | |
uint64_t val; | |
- for_each_cpu(cpu, cpumask) | |
+ for_each_cpu (cpu, cpumask) | |
{ | |
/* Mask lower 8 bits. It represent cpu in affinity level 0 */ | |
uint64_t cluster_id = cpu_logical_map(cpu) & ~MPIDR_AFF0_MASK; | |
@@ -978,11 +997,9 @@ static void gicv3_send_sgi_list(enum gic_sgi sgi, const cpumask_t *cpumask) | |
* Prepare affinity path of the cluster for which SGI is generated | |
* along with SGI number | |
*/ | |
- val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | | |
- MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | | |
- sgi << 24 | | |
- MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | | |
- tlist); | |
+ val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | | |
+ MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | sgi << 24 | | |
+ MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | tlist); | |
WRITE_SYSREG64(val, ICC_SGI1R_EL1); | |
} | |
@@ -999,11 +1016,11 @@ static void gicv3_send_sgi(enum gic_sgi sgi, enum gic_sgi_mode mode, | |
*/ | |
dsb(st); | |
- switch ( mode ) | |
+ switch (mode) | |
{ | |
case SGI_TARGET_OTHERS: | |
WRITE_SYSREG64(ICH_SGI_TARGET_OTHERS << ICH_SGI_IRQMODE_SHIFT | | |
- (uint64_t)sgi << ICH_SGI_IRQ_SHIFT, | |
+ (uint64_t)sgi << ICH_SGI_IRQ_SHIFT, | |
ICC_SGI1R_EL1); | |
isb(); | |
break; | |
@@ -1037,7 +1054,7 @@ static void gicv3_update_lr(int lr, unsigned int virq, uint8_t priority, | |
BUG_ON(lr >= gicv3_info.nr_lrs); | |
BUG_ON(lr < 0); | |
- val = (((uint64_t)state & 0x3) << ICH_LR_STATE_SHIFT); | |
+ val = (((uint64_t)state & 0x3) << ICH_LR_STATE_SHIFT); | |
/* | |
* When the guest is GICv3, all guest IRQs are Group 1, as Group0 | |
@@ -1049,9 +1066,9 @@ static void gicv3_update_lr(int lr, unsigned int virq, uint8_t priority, | |
val |= (uint64_t)priority << ICH_LR_PRIORITY_SHIFT; | |
val |= ((uint64_t)virq & ICH_LR_VIRTUAL_MASK) << ICH_LR_VIRTUAL_SHIFT; | |
- if ( hw_irq != INVALID_IRQ ) | |
- val |= ICH_LR_HW | (((uint64_t)hw_irq & ICH_LR_PHYSICAL_MASK) | |
- << ICH_LR_PHYSICAL_SHIFT); | |
+ if ( hw_irq != INVALID_IRQ ) | |
+ val |= ICH_LR_HW | (((uint64_t)hw_irq & ICH_LR_PHYSICAL_MASK) | |
+ << ICH_LR_PHYSICAL_SHIFT); | |
gicv3_ich_write_lr(lr, val); | |
} | |
@@ -1069,13 +1086,14 @@ static void gicv3_read_lr(int lr, struct gic_lr *lr_reg) | |
lr_reg->virq = (lrv >> ICH_LR_VIRTUAL_SHIFT) & ICH_LR_VIRTUAL_MASK; | |
- lr_reg->priority = (lrv >> ICH_LR_PRIORITY_SHIFT) & ICH_LR_PRIORITY_MASK; | |
- lr_reg->pending = lrv & ICH_LR_STATE_PENDING; | |
- lr_reg->active = lrv & ICH_LR_STATE_ACTIVE; | |
+ lr_reg->priority = (lrv >> ICH_LR_PRIORITY_SHIFT) & ICH_LR_PRIORITY_MASK; | |
+ lr_reg->pending = lrv & ICH_LR_STATE_PENDING; | |
+ lr_reg->active = lrv & ICH_LR_STATE_ACTIVE; | |
lr_reg->hw_status = lrv & ICH_LR_HW; | |
if ( lr_reg->hw_status ) | |
- lr_reg->hw.pirq = (lrv >> ICH_LR_PHYSICAL_SHIFT) & ICH_LR_PHYSICAL_MASK; | |
+ lr_reg->hw.pirq = | |
+ (lrv >> ICH_LR_PHYSICAL_SHIFT) & ICH_LR_PHYSICAL_MASK; | |
else | |
{ | |
lr_reg->virt.eoi = (lrv & ICH_LR_MAINTENANCE_IRQ); | |
@@ -1086,8 +1104,8 @@ static void gicv3_read_lr(int lr, struct gic_lr *lr_reg) | |
* This is only valid for SGI, but it does not matter to always | |
* read it as it should be 0 by default. | |
*/ | |
- lr_reg->virt.source = (lrv >> ICH_LR_CPUID_SHIFT) | |
- & ICH_LR_CPUID_MASK; | |
+ lr_reg->virt.source = | |
+ (lrv >> ICH_LR_CPUID_SHIFT) & ICH_LR_CPUID_MASK; | |
} | |
} | |
} | |
@@ -1097,9 +1115,9 @@ static void gicv3_write_lr(int lr_reg, const struct gic_lr *lr) | |
uint64_t lrv = 0; | |
const enum gic_version vgic_version = current->domain->arch.vgic.version; | |
- | |
- lrv = ( ((u64)(lr->virq & ICH_LR_VIRTUAL_MASK) << ICH_LR_VIRTUAL_SHIFT) | | |
- ((u64)(lr->priority & ICH_LR_PRIORITY_MASK) << ICH_LR_PRIORITY_SHIFT) ); | |
+ lrv = (((u64)(lr->virq & ICH_LR_VIRTUAL_MASK) << ICH_LR_VIRTUAL_SHIFT) | | |
+ ((u64)(lr->priority & ICH_LR_PRIORITY_MASK) | |
+ << ICH_LR_PRIORITY_SHIFT)); | |
if ( lr->active ) | |
lrv |= ICH_LR_STATE_ACTIVE; | |
@@ -1152,14 +1170,14 @@ static void gicv3_hcr_status(uint32_t flag, bool status) | |
static unsigned int gicv3_read_vmcr_priority(void) | |
{ | |
- return ((READ_SYSREG32(ICH_VMCR_EL2) >> ICH_VMCR_PRIORITY_SHIFT) & | |
+ return ((READ_SYSREG32(ICH_VMCR_EL2) >> ICH_VMCR_PRIORITY_SHIFT) & | |
ICH_VMCR_PRIORITY_MASK); | |
} | |
/* Only support reading GRP1 APRn registers */ | |
static unsigned int gicv3_read_apr(int apr_reg) | |
{ | |
- switch ( apr_reg ) | |
+ switch (apr_reg) | |
{ | |
case 0: | |
ASSERT(gicv3.nr_priorities > 4 && gicv3.nr_priorities < 8); | |
@@ -1239,7 +1257,8 @@ static void gicv3_guest_irq_end(struct irq_desc *desc) | |
/* Deactivation happens in maintenance interrupt / via GICV */ | |
} | |
-static void gicv3_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask) | |
+static void gicv3_irq_set_affinity(struct irq_desc *desc, | |
+ const cpumask_t *mask) | |
{ | |
unsigned int cpu; | |
uint64_t affinity; | |
@@ -1270,7 +1289,8 @@ static int gicv3_make_hwdom_dt_node(const struct domain *d, | |
compatible = dt_get_property(gic, "compatible", &len); | |
if ( !compatible ) | |
{ | |
- dprintk(XENLOG_ERR, "Can't find compatible property for the gic node\n"); | |
+ dprintk(XENLOG_ERR, | |
+ "Can't find compatible property for the gic node\n"); | |
return -FDT_ERR_XEN(ENOENT); | |
} | |
@@ -1307,24 +1327,24 @@ static int gicv3_make_hwdom_dt_node(const struct domain *d, | |
} | |
static const hw_irq_controller gicv3_host_irq_type = { | |
- .typename = "gic-v3", | |
- .startup = gicv3_irq_startup, | |
- .shutdown = gicv3_irq_shutdown, | |
- .enable = gicv3_irq_enable, | |
- .disable = gicv3_irq_disable, | |
- .ack = gicv3_irq_ack, | |
- .end = gicv3_host_irq_end, | |
+ .typename = "gic-v3", | |
+ .startup = gicv3_irq_startup, | |
+ .shutdown = gicv3_irq_shutdown, | |
+ .enable = gicv3_irq_enable, | |
+ .disable = gicv3_irq_disable, | |
+ .ack = gicv3_irq_ack, | |
+ .end = gicv3_host_irq_end, | |
.set_affinity = gicv3_irq_set_affinity, | |
}; | |
static const hw_irq_controller gicv3_guest_irq_type = { | |
- .typename = "gic-v3", | |
- .startup = gicv3_irq_startup, | |
- .shutdown = gicv3_irq_shutdown, | |
- .enable = gicv3_irq_enable, | |
- .disable = gicv3_irq_disable, | |
- .ack = gicv3_irq_ack, | |
- .end = gicv3_guest_irq_end, | |
+ .typename = "gic-v3", | |
+ .startup = gicv3_irq_startup, | |
+ .shutdown = gicv3_irq_shutdown, | |
+ .enable = gicv3_irq_enable, | |
+ .disable = gicv3_irq_disable, | |
+ .ack = gicv3_irq_ack, | |
+ .end = gicv3_guest_irq_end, | |
.set_affinity = gicv3_irq_set_affinity, | |
}; | |
@@ -1347,12 +1367,13 @@ static void __init gicv3_init_v2(void) | |
{ | |
printk(XENLOG_WARNING | |
"GICv3: WARNING: Not enabling support for GICv2 compat mode.\n" | |
- "Size of GICV (%#"PRIpaddr") must at least be %#llx.\n", | |
+ "Size of GICV (%#" PRIpaddr ") must at least be %#llx.\n", | |
vsize, GUEST_GICC_SIZE); | |
return; | |
} | |
- printk("GICv3 compatible with GICv2 cbase %#"PRIpaddr" vbase %#"PRIpaddr"\n", | |
+ printk("GICv3 compatible with GICv2 cbase %#" PRIpaddr " vbase %#" PRIpaddr | |
+ "\n", | |
cbase, vbase); | |
vgic_v2_setup_hw(dbase, cbase, csize, vbase, 0); | |
@@ -1361,7 +1382,7 @@ static void __init gicv3_init_v2(void) | |
static void __init gicv3_ioremap_distributor(paddr_t dist_paddr) | |
{ | |
if ( dist_paddr & ~PAGE_MASK ) | |
- panic("GICv3: Found unaligned distributor address %"PRIpaddr"\n", | |
+ panic("GICv3: Found unaligned distributor address %" PRIpaddr "\n", | |
dbase); | |
gicv3.map_dbase = ioremap_nocache(dist_paddr, SZ_64K); | |
@@ -1382,7 +1403,7 @@ static void __init gicv3_dt_init(void) | |
gicv3_ioremap_distributor(dbase); | |
if ( !dt_property_read_u32(node, "#redistributor-regions", | |
- &gicv3.rdist_count) ) | |
+ &gicv3.rdist_count) ) | |
gicv3.rdist_count = 1; | |
rdist_regs = xzalloc_array(struct rdist_region, gicv3.rdist_count); | |
@@ -1401,10 +1422,11 @@ static void __init gicv3_dt_init(void) | |
rdist_regs[i].size = rdist_size; | |
} | |
- if ( !dt_property_read_u32(node, "redistributor-stride", &gicv3.rdist_stride) ) | |
+ if ( !dt_property_read_u32(node, "redistributor-stride", | |
+ &gicv3.rdist_stride) ) | |
gicv3.rdist_stride = 0; | |
- gicv3.rdist_regions= rdist_regs; | |
+ gicv3.rdist_regions = rdist_regs; | |
res = platform_get_irq(node, 0); | |
if ( res < 0 ) | |
@@ -1415,11 +1437,9 @@ static void __init gicv3_dt_init(void) | |
* For GICv3 supporting GICv2, GICC and GICV base address will be | |
* provided. | |
*/ | |
- res = dt_device_get_address(node, 1 + gicv3.rdist_count, | |
- &cbase, &csize); | |
+ res = dt_device_get_address(node, 1 + gicv3.rdist_count, &cbase, &csize); | |
if ( !res ) | |
- dt_device_get_address(node, 1 + gicv3.rdist_count + 2, | |
- &vbase, &vsize); | |
+ dt_device_get_address(node, 1 + gicv3.rdist_count + 2, &vbase, &vsize); | |
} | |
static int gicv3_iomem_deny_access(const struct domain *d) | |
@@ -1466,8 +1486,8 @@ static int gicv3_iomem_deny_access(const struct domain *d) | |
} | |
#ifdef CONFIG_ACPI | |
-static void __init | |
-gic_acpi_add_rdist_region(paddr_t base, paddr_t size, bool single_rdist) | |
+static void __init gic_acpi_add_rdist_region(paddr_t base, paddr_t size, | |
+ bool single_rdist) | |
{ | |
unsigned int idx = gicv3.rdist_count++; | |
@@ -1497,8 +1517,8 @@ static int gicv3_make_hwdom_madt(const struct domain *d, u32 offset) | |
return -EINVAL; | |
} | |
- host_gicc = container_of(header, struct acpi_madt_generic_interrupt, | |
- header); | |
+ host_gicc = | |
+ container_of(header, struct acpi_madt_generic_interrupt, header); | |
size = sizeof(struct acpi_madt_generic_interrupt); | |
for ( i = 0; i < d->max_vcpus; i++ ) | |
{ | |
@@ -1525,7 +1545,8 @@ static int gicv3_make_hwdom_madt(const struct domain *d, u32 offset) | |
*/ | |
for ( i = 0; i < d->arch.vgic.nr_regions; i++ ) | |
{ | |
- gicr = (struct acpi_madt_generic_redistributor *)(base_ptr + table_len); | |
+ gicr = | |
+ (struct acpi_madt_generic_redistributor *)(base_ptr + table_len); | |
gicr->header.type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; | |
gicr->header.length = size; | |
gicr->base_address = gicv3.rdist_regions[i].base; | |
@@ -1544,19 +1565,17 @@ static unsigned long gicv3_get_hwdom_extra_madt_size(const struct domain *d) | |
size = sizeof(struct acpi_madt_generic_redistributor) * gicv3.rdist_count; | |
- size += sizeof(struct acpi_madt_generic_translator) | |
- * vgic_v3_its_count(d); | |
+ size += sizeof(struct acpi_madt_generic_translator) * vgic_v3_its_count(d); | |
return size; | |
} | |
-static int __init | |
-gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
static int cpu_base_assigned = 0; | |
struct acpi_madt_generic_interrupt *processor = | |
- container_of(header, struct acpi_madt_generic_interrupt, header); | |
+ container_of(header, struct acpi_madt_generic_interrupt, header); | |
if ( BAD_MADT_ENTRY(processor, end) ) | |
return -EINVAL; | |
@@ -1577,9 +1596,9 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, | |
} | |
else | |
{ | |
- if ( cbase != processor->base_address | |
- || vbase != processor->gicv_base_address | |
- || gicv3_info.maintenance_irq != processor->vgic_interrupt ) | |
+ if ( cbase != processor->base_address || | |
+ vbase != processor->gicv_base_address || | |
+ gicv3_info.maintenance_irq != processor->vgic_interrupt ) | |
{ | |
printk("GICv3: GICC entries are not same in MADT table\n"); | |
return -EINVAL; | |
@@ -1589,12 +1608,11 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, | |
return 0; | |
} | |
-static int __init | |
-gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init gic_acpi_parse_madt_distributor( | |
+ struct acpi_subtable_header *header, const unsigned long end) | |
{ | |
struct acpi_madt_generic_distributor *dist = | |
- container_of(header, struct acpi_madt_generic_distributor, header); | |
+ container_of(header, struct acpi_madt_generic_distributor, header); | |
if ( BAD_MADT_ENTRY(dist, end) ) | |
return -EINVAL; | |
@@ -1604,9 +1622,8 @@ gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header, | |
return 0; | |
} | |
-static int __init | |
-gic_acpi_parse_cpu_redistributor(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init gic_acpi_parse_cpu_redistributor( | |
+ struct acpi_subtable_header *header, const unsigned long end) | |
{ | |
struct acpi_madt_generic_interrupt *processor; | |
u32 size; | |
@@ -1621,9 +1638,8 @@ gic_acpi_parse_cpu_redistributor(struct acpi_subtable_header *header, | |
return 0; | |
} | |
-static int __init | |
-gic_acpi_get_madt_cpu_num(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init gic_acpi_get_madt_cpu_num( | |
+ struct acpi_subtable_header *header, const unsigned long end) | |
{ | |
struct acpi_madt_generic_interrupt *cpuif; | |
@@ -1634,9 +1650,8 @@ gic_acpi_get_madt_cpu_num(struct acpi_subtable_header *header, | |
return 0; | |
} | |
-static int __init | |
-gic_acpi_parse_madt_redistributor(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init gic_acpi_parse_madt_redistributor( | |
+ struct acpi_subtable_header *header, const unsigned long end) | |
{ | |
struct acpi_madt_generic_redistributor *rdist; | |
@@ -1649,9 +1664,8 @@ gic_acpi_parse_madt_redistributor(struct acpi_subtable_header *header, | |
return 0; | |
} | |
-static int __init | |
-gic_acpi_get_madt_redistributor_num(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init gic_acpi_get_madt_redistributor_num( | |
+ struct acpi_subtable_header *header, const unsigned long end) | |
{ | |
/* Nothing to do here since it only wants to get the number of GIC | |
* redistributors. | |
@@ -1680,10 +1694,11 @@ static void __init gicv3_acpi_init(void) | |
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, | |
gic_acpi_get_madt_redistributor_num, 0); | |
/* Count the total number of CPU interface entries */ | |
- if ( count <= 0 ) { | |
+ if ( count <= 0 ) | |
+ { | |
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, | |
gic_acpi_get_madt_cpu_num, 0); | |
- if (count <= 0) | |
+ if ( count <= 0 ) | |
panic("GICv3: No valid GICR entries exists\n"); | |
gicr_table = false; | |
@@ -1697,8 +1712,9 @@ static void __init gicv3_acpi_init(void) | |
if ( gicr_table ) | |
/* Parse always-on power domain Re-distributor entries */ | |
- count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, | |
- gic_acpi_parse_madt_redistributor, count); | |
+ count = | |
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, | |
+ gic_acpi_parse_madt_redistributor, count); | |
else | |
/* Parse Re-distributor entries described in CPU interface table */ | |
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, | |
@@ -1731,10 +1747,11 @@ static void __init gicv3_acpi_init(void) | |
vbase = INVALID_PADDR; | |
else | |
vsize = GUEST_GICC_SIZE; | |
- | |
} | |
#else | |
-static void __init gicv3_acpi_init(void) { } | |
+static void __init gicv3_acpi_init(void) | |
+{ | |
+} | |
static int gicv3_make_hwdom_madt(const struct domain *d, u32 offset) | |
{ | |
return 0; | |
@@ -1760,7 +1777,8 @@ static int __init gicv3_init(void) | |
if ( !cpu_has_gicv3 ) | |
{ | |
- dprintk(XENLOG_ERR, "GICv3: driver requires system register support\n"); | |
+ dprintk(XENLOG_ERR, | |
+ "GICv3: driver requires system register support\n"); | |
return -ENODEV; | |
} | |
@@ -1771,39 +1789,39 @@ static int __init gicv3_init(void) | |
reg = readl_relaxed(GICD + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; | |
if ( reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4 ) | |
- panic("GICv3: no distributor detected\n"); | |
+ panic("GICv3: no distributor detected\n"); | |
for ( i = 0; i < gicv3.rdist_count; i++ ) | |
{ | |
/* map dbase & rdist regions */ | |
- gicv3.rdist_regions[i].map_base = | |
- ioremap_nocache(gicv3.rdist_regions[i].base, | |
- gicv3.rdist_regions[i].size); | |
+ gicv3.rdist_regions[i].map_base = ioremap_nocache( | |
+ gicv3.rdist_regions[i].base, gicv3.rdist_regions[i].size); | |
if ( !gicv3.rdist_regions[i].map_base ) | |
panic("GICv3: Failed to ioremap rdist region for region %d\n", i); | |
} | |
printk("GICv3 initialization:\n" | |
- " gic_dist_addr=%#"PRIpaddr"\n" | |
+ " gic_dist_addr=%#" PRIpaddr "\n" | |
" gic_maintenance_irq=%u\n" | |
" gic_rdist_stride=%#x\n" | |
" gic_rdist_regions=%d\n", | |
- dbase, gicv3_info.maintenance_irq, | |
- gicv3.rdist_stride, gicv3.rdist_count); | |
+ dbase, gicv3_info.maintenance_irq, gicv3.rdist_stride, | |
+ gicv3.rdist_count); | |
printk(" redistributor regions:\n"); | |
for ( i = 0; i < gicv3.rdist_count; i++ ) | |
{ | |
const struct rdist_region *r = &gicv3.rdist_regions[i]; | |
- printk(" - region %u: %#"PRIpaddr" - %#"PRIpaddr"\n", | |
- i, r->base, r->base + r->size); | |
+ printk(" - region %u: %#" PRIpaddr " - %#" PRIpaddr "\n", i, | |
+ r->base, r->base + r->size); | |
} | |
reg = readl_relaxed(GICD + GICD_TYPER); | |
intid_bits = GICD_TYPE_ID_BITS(reg); | |
- vgic_v3_setup_hw(dbase, gicv3.rdist_count, gicv3.rdist_regions, intid_bits); | |
+ vgic_v3_setup_hw(dbase, gicv3.rdist_count, gicv3.rdist_regions, | |
+ intid_bits); | |
gicv3_init_v2(); | |
spin_lock_init(&gicv3.lock); | |
@@ -1832,39 +1850,40 @@ out: | |
} | |
static const struct gic_hw_operations gicv3_ops = { | |
- .info = &gicv3_info, | |
- .init = gicv3_init, | |
- .save_state = gicv3_save_state, | |
- .restore_state = gicv3_restore_state, | |
- .dump_state = gicv3_dump_state, | |
- .gic_host_irq_type = &gicv3_host_irq_type, | |
- .gic_guest_irq_type = &gicv3_guest_irq_type, | |
- .eoi_irq = gicv3_eoi_irq, | |
- .deactivate_irq = gicv3_dir_irq, | |
- .read_irq = gicv3_read_irq, | |
- .set_active_state = gicv3_set_active_state, | |
- .set_pending_state = gicv3_set_pending_state, | |
- .set_irq_type = gicv3_set_irq_type, | |
- .set_irq_priority = gicv3_set_irq_priority, | |
- .send_SGI = gicv3_send_sgi, | |
- .disable_interface = gicv3_disable_interface, | |
- .update_lr = gicv3_update_lr, | |
- .update_hcr_status = gicv3_hcr_status, | |
- .clear_lr = gicv3_clear_lr, | |
- .read_lr = gicv3_read_lr, | |
- .write_lr = gicv3_write_lr, | |
- .read_vmcr_priority = gicv3_read_vmcr_priority, | |
- .read_apr = gicv3_read_apr, | |
- .read_pending_state = gicv3_read_pending_state, | |
- .secondary_init = gicv3_secondary_cpu_init, | |
- .make_hwdom_dt_node = gicv3_make_hwdom_dt_node, | |
- .make_hwdom_madt = gicv3_make_hwdom_madt, | |
+ .info = &gicv3_info, | |
+ .init = gicv3_init, | |
+ .save_state = gicv3_save_state, | |
+ .restore_state = gicv3_restore_state, | |
+ .dump_state = gicv3_dump_state, | |
+ .gic_host_irq_type = &gicv3_host_irq_type, | |
+ .gic_guest_irq_type = &gicv3_guest_irq_type, | |
+ .eoi_irq = gicv3_eoi_irq, | |
+ .deactivate_irq = gicv3_dir_irq, | |
+ .read_irq = gicv3_read_irq, | |
+ .set_active_state = gicv3_set_active_state, | |
+ .set_pending_state = gicv3_set_pending_state, | |
+ .set_irq_type = gicv3_set_irq_type, | |
+ .set_irq_priority = gicv3_set_irq_priority, | |
+ .send_SGI = gicv3_send_sgi, | |
+ .disable_interface = gicv3_disable_interface, | |
+ .update_lr = gicv3_update_lr, | |
+ .update_hcr_status = gicv3_hcr_status, | |
+ .clear_lr = gicv3_clear_lr, | |
+ .read_lr = gicv3_read_lr, | |
+ .write_lr = gicv3_write_lr, | |
+ .read_vmcr_priority = gicv3_read_vmcr_priority, | |
+ .read_apr = gicv3_read_apr, | |
+ .read_pending_state = gicv3_read_pending_state, | |
+ .secondary_init = gicv3_secondary_cpu_init, | |
+ .make_hwdom_dt_node = gicv3_make_hwdom_dt_node, | |
+ .make_hwdom_madt = gicv3_make_hwdom_madt, | |
.get_hwdom_extra_madt_size = gicv3_get_hwdom_extra_madt_size, | |
- .iomem_deny_access = gicv3_iomem_deny_access, | |
- .do_LPI = gicv3_do_LPI, | |
+ .iomem_deny_access = gicv3_iomem_deny_access, | |
+ .do_LPI = gicv3_do_LPI, | |
}; | |
-static int __init gicv3_dt_preinit(struct dt_device_node *node, const void *data) | |
+static int __init gicv3_dt_preinit(struct dt_device_node *node, | |
+ const void *data) | |
{ | |
gicv3_info.hw_version = GIC_V3; | |
gicv3_info.node = node; | |
@@ -1874,20 +1893,18 @@ static int __init gicv3_dt_preinit(struct dt_device_node *node, const void *data | |
return 0; | |
} | |
-static const struct dt_device_match gicv3_dt_match[] __initconst = | |
-{ | |
+static const struct dt_device_match gicv3_dt_match[] __initconst = { | |
DT_MATCH_GIC_V3, | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
-DT_DEVICE_START(gicv3, "GICv3", DEVICE_GIC) | |
- .dt_match = gicv3_dt_match, | |
- .init = gicv3_dt_preinit, | |
-DT_DEVICE_END | |
+DT_DEVICE_START(gicv3, "GICv3", DEVICE_GIC).dt_match = gicv3_dt_match, | |
+ .init = gicv3_dt_preinit, | |
+ DT_DEVICE_END | |
#ifdef CONFIG_ACPI | |
-/* Set up the GIC */ | |
-static int __init gicv3_acpi_preinit(const void *data) | |
+ /* Set up the GIC */ | |
+ static int __init gicv3_acpi_preinit(const void *data) | |
{ | |
gicv3_info.hw_version = GIC_V3; | |
register_gic_ops(&gicv3_ops); | |
@@ -1896,21 +1913,21 @@ static int __init gicv3_acpi_preinit(const void *data) | |
} | |
ACPI_DEVICE_START(agicv3, "GICv3", DEVICE_GIC) | |
- .class_type = ACPI_MADT_GIC_VERSION_V3, | |
- .init = gicv3_acpi_preinit, | |
-ACPI_DEVICE_END | |
- | |
-ACPI_DEVICE_START(agicv4, "GICv4", DEVICE_GIC) | |
- .class_type = ACPI_MADT_GIC_VERSION_V4, | |
- .init = gicv3_acpi_preinit, | |
-ACPI_DEVICE_END | |
+ .class_type = ACPI_MADT_GIC_VERSION_V3, | |
+ .init = gicv3_acpi_preinit, | |
+ ACPI_DEVICE_END | |
+ | |
+ ACPI_DEVICE_START(agicv4, "GICv4", DEVICE_GIC) | |
+ .class_type = ACPI_MADT_GIC_VERSION_V4, | |
+ .init = gicv3_acpi_preinit, | |
+ ACPI_DEVICE_END | |
#endif | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c | |
index 98c021f1a8..b22e233339 100644 | |
--- a/xen/arch/arm/gic-vgic.c | |
+++ b/xen/arch/arm/gic-vgic.c | |
@@ -55,7 +55,7 @@ static inline void gic_add_to_lr_pending(struct vcpu *v, struct pending_irq *n) | |
if ( !list_empty(&n->lr_queue) ) | |
return; | |
- list_for_each_entry ( iter, &v->arch.vgic.lr_pending, lr_queue ) | |
+ list_for_each_entry (iter, &v->arch.vgic.lr_pending, lr_queue) | |
{ | |
if ( iter->priority > n->priority ) | |
{ | |
@@ -94,8 +94,10 @@ void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq) | |
} | |
#ifdef GIC_DEBUG | |
else | |
- gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into %pv, when it is still lr_pending\n", | |
- virtual_irq, v); | |
+ gdprintk( | |
+ XENLOG_DEBUG, | |
+ "trying to inject irq=%u into %pv, when it is still lr_pending\n", | |
+ virtual_irq, v); | |
#endif | |
} | |
@@ -106,12 +108,11 @@ void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq) | |
* event gets discarded while the LPI is in an LR, and a new LPI with the | |
* same number gets mapped quickly afterwards. | |
*/ | |
-static unsigned int gic_find_unused_lr(struct vcpu *v, | |
- struct pending_irq *p, | |
+static unsigned int gic_find_unused_lr(struct vcpu *v, struct pending_irq *p, | |
unsigned int lr) | |
{ | |
unsigned int nr_lrs = gic_get_nr_lrs(); | |
- unsigned long *lr_mask = (unsigned long *) &this_cpu(lr_mask); | |
+ unsigned long *lr_mask = (unsigned long *)&this_cpu(lr_mask); | |
struct gic_lr lr_val; | |
ASSERT(spin_is_locked(&v->arch.vgic.lock)); | |
@@ -120,7 +121,7 @@ static unsigned int gic_find_unused_lr(struct vcpu *v, | |
{ | |
unsigned int used_lr; | |
- for_each_set_bit(used_lr, lr_mask, nr_lrs) | |
+ for_each_set_bit (used_lr, lr_mask, nr_lrs) | |
{ | |
gic_hw_ops->read_lr(used_lr, &lr_val); | |
if ( lr_val.virq == p->irq ) | |
@@ -134,7 +135,7 @@ static unsigned int gic_find_unused_lr(struct vcpu *v, | |
} | |
void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq, | |
- unsigned int priority) | |
+ unsigned int priority) | |
{ | |
int i; | |
unsigned int nr_lrs = gic_get_nr_lrs(); | |
@@ -150,7 +151,8 @@ void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq, | |
{ | |
i = gic_find_unused_lr(v, p, 0); | |
- if (i < nr_lrs) { | |
+ if ( i < nr_lrs ) | |
+ { | |
set_bit(i, &this_cpu(lr_mask)); | |
gic_set_lr(i, p, GICH_LR_PENDING); | |
return; | |
@@ -201,17 +203,22 @@ static void gic_update_one_lr(struct vcpu *v, int i) | |
gic_hw_ops->write_lr(i, &lr_val); | |
} | |
else | |
- gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into %pv: already active in LR%d\n", | |
- irq, v, i); | |
+ gdprintk( | |
+ XENLOG_WARNING, | |
+ "unable to inject hw irq=%d into %pv: already active in LR%d\n", | |
+ irq, v, i); | |
} | |
} | |
else if ( lr_val.pending ) | |
{ | |
- int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status); | |
+ int q __attribute__((unused)) = | |
+ test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status); | |
#ifdef GIC_DEBUG | |
if ( q ) | |
- gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into %pv, when it is already pending in LR%d\n", | |
- irq, v, i); | |
+ gdprintk( | |
+ XENLOG_DEBUG, | |
+ "trying to inject irq=%d into %pv, when it is already pending in LR%d\n", | |
+ irq, v, i); | |
#endif | |
} | |
else | |
@@ -230,7 +237,8 @@ static void gic_update_one_lr(struct vcpu *v, int i) | |
test_bit(GIC_IRQ_GUEST_QUEUED, &p->status) && | |
!test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) ) | |
gic_raise_guest_irq(v, irq, p->priority); | |
- else { | |
+ else | |
+ { | |
list_del_init(&p->inflight); | |
/* | |
* Remove from inflight, then change physical affinity. It | |
@@ -265,8 +273,9 @@ void vgic_sync_from_lrs(struct vcpu *v) | |
spin_lock_irqsave(&v->arch.vgic.lock, flags); | |
- while ((i = find_next_bit((const unsigned long *) &this_cpu(lr_mask), | |
- nr_lrs, i)) < nr_lrs ) { | |
+ while ( (i = find_next_bit((const unsigned long *)&this_cpu(lr_mask), | |
+ nr_lrs, i)) < nr_lrs ) | |
+ { | |
gic_update_one_lr(v, i); | |
i++; | |
} | |
@@ -290,13 +299,13 @@ static void gic_restore_pending_irqs(struct vcpu *v) | |
goto out; | |
inflight_r = &v->arch.vgic.inflight_irqs; | |
- list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue ) | |
+ list_for_each_entry_safe(p, t, &v->arch.vgic.lr_pending, lr_queue) | |
{ | |
lr = gic_find_unused_lr(v, p, lr); | |
if ( lr >= nr_lrs ) | |
{ | |
/* No more free LRs: find a lower priority irq to evict */ | |
- list_for_each_entry_reverse( p_r, inflight_r, inflight ) | |
+ list_for_each_entry_reverse(p_r, inflight_r, inflight) | |
{ | |
if ( p_r->priority == p->priority ) | |
goto out; | |
@@ -308,7 +317,7 @@ static void gic_restore_pending_irqs(struct vcpu *v) | |
* time, so quit */ | |
goto out; | |
-found: | |
+ found: | |
lr = p_r->lr; | |
p_r->lr = GIC_INVALID_LR; | |
set_bit(GIC_IRQ_GUEST_QUEUED, &p_r->status); | |
@@ -338,7 +347,7 @@ void gic_clear_pending_irqs(struct vcpu *v) | |
ASSERT(spin_is_locked(&v->arch.vgic.lock)); | |
v->arch.lr_mask = 0; | |
- list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue ) | |
+ list_for_each_entry_safe(p, t, &v->arch.vgic.lr_pending, lr_queue) | |
gic_remove_from_lr_pending(v, p); | |
} | |
@@ -374,7 +383,7 @@ int vgic_vcpu_pending_irq(struct vcpu *v) | |
/* find the first enabled non-active irq, the queue is already | |
* ordered by priority */ | |
- list_for_each_entry( p, &v->arch.vgic.inflight_irqs, inflight ) | |
+ list_for_each_entry (p, &v->arch.vgic.inflight_irqs, inflight) | |
{ | |
if ( GIC_PRI_TO_GUEST(p->priority) >= mask_priority ) | |
goto out; | |
@@ -406,10 +415,10 @@ void gic_dump_vgic_info(struct vcpu *v) | |
{ | |
struct pending_irq *p; | |
- list_for_each_entry ( p, &v->arch.vgic.inflight_irqs, inflight ) | |
+ list_for_each_entry (p, &v->arch.vgic.inflight_irqs, inflight) | |
printk("Inflight irq=%u lr=%u\n", p->irq, p->lr); | |
- list_for_each_entry( p, &v->arch.vgic.lr_pending, lr_queue ) | |
+ list_for_each_entry (p, &v->arch.vgic.lr_pending, lr_queue) | |
printk("Pending irq=%d\n", p->irq); | |
} | |
@@ -452,8 +461,7 @@ int vgic_connect_hw_irq(struct domain *d, struct vcpu *v, unsigned int virq, | |
if ( connect ) | |
{ | |
/* The VIRQ should not be already enabled by the guest */ | |
- if ( !p->desc && | |
- !test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) ) | |
+ if ( !p->desc && !test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) ) | |
p->desc = desc; | |
else | |
ret = -EBUSY; | |
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c | |
index 113655a789..253dbc34b9 100644 | |
--- a/xen/arch/arm/gic.c | |
+++ b/xen/arch/arm/gic.c | |
@@ -56,7 +56,7 @@ static void clear_cpu_lr_mask(void) | |
enum gic_version gic_hw_version(void) | |
{ | |
- return gic_hw_ops->info->hw_version; | |
+ return gic_hw_ops->info->hw_version; | |
} | |
unsigned int gic_number_lines(void) | |
@@ -113,8 +113,9 @@ static void gic_set_irq_priority(struct irq_desc *desc, unsigned int priority) | |
*/ | |
void gic_route_irq_to_xen(struct irq_desc *desc, unsigned int priority) | |
{ | |
- ASSERT(priority <= 0xff); /* Only 8 bits of priority */ | |
- ASSERT(desc->irq < gic_number_lines());/* Can't route interrupts that don't exist */ | |
+ ASSERT(priority <= 0xff); /* Only 8 bits of priority */ | |
+ ASSERT(desc->irq < | |
+ gic_number_lines()); /* Can't route interrupts that don't exist */ | |
ASSERT(test_bit(_IRQ_DISABLED, &desc->status)); | |
ASSERT(spin_is_locked(&desc->lock)); | |
@@ -195,8 +196,7 @@ int gic_remove_irq_from_guest(struct domain *d, unsigned int virq, | |
} | |
int gic_irq_xlate(const u32 *intspec, unsigned int intsize, | |
- unsigned int *out_hwirq, | |
- unsigned int *out_type) | |
+ unsigned int *out_hwirq, unsigned int *out_type) | |
{ | |
if ( intsize < 3 ) | |
return -EINVAL; | |
@@ -229,7 +229,7 @@ static void __init gic_dt_preinit(void) | |
struct dt_device_node *node; | |
uint8_t num_gics = 0; | |
- dt_for_each_device_node( dt_host, node ) | |
+ dt_for_each_device_node (dt_host, node) | |
{ | |
if ( !dt_get_property(node, "interrupt-controller", NULL) ) | |
continue; | |
@@ -269,7 +269,9 @@ static void __init gic_acpi_preinit(void) | |
panic("Unable to find compatible GIC in the ACPI table\n"); | |
} | |
#else | |
-static void __init gic_acpi_preinit(void) { } | |
+static void __init gic_acpi_preinit(void) | |
+{ | |
+} | |
#endif | |
/* Find the interrupt controller and set up the callback to translate | |
@@ -313,9 +315,9 @@ void send_SGI_self(enum gic_sgi sgi) | |
void send_SGI_allbutself(enum gic_sgi sgi) | |
{ | |
- ASSERT(sgi < 16); /* There are only 16 SGIs */ | |
+ ASSERT(sgi < 16); /* There are only 16 SGIs */ | |
- gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL); | |
+ gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL); | |
} | |
void smp_send_state_dump(unsigned int cpu) | |
@@ -380,7 +382,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) | |
{ | |
unsigned int irq; | |
- do { | |
+ do { | |
/* Reading IRQ will ACK it */ | |
irq = gic_hw_ops->read_irq(); | |
@@ -403,10 +405,11 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) | |
local_irq_disable(); | |
break; | |
} | |
- } while (1); | |
+ } while ( 1 ); | |
} | |
-static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) | |
+static void maintenance_interrupt(int irq, void *dev_id, | |
+ struct cpu_user_regs *regs) | |
{ | |
/* | |
* This is a dummy interrupt handler. | |
@@ -424,7 +427,8 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r | |
void gic_dump_info(struct vcpu *v) | |
{ | |
- printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id, v->arch.lr_mask); | |
+ printk("GICH_LRs (vcpu %d) mask=%" PRIx64 "\n", v->vcpu_id, | |
+ v->arch.lr_mask); | |
gic_hw_ops->dump_state(v); | |
} | |
@@ -435,8 +439,7 @@ void init_maintenance_interrupt(void) | |
} | |
int gic_make_hwdom_dt_node(const struct domain *d, | |
- const struct dt_device_node *gic, | |
- void *fdt) | |
+ const struct dt_device_node *gic, void *fdt) | |
{ | |
ASSERT(gic == dt_interrupt_controller); | |
@@ -452,10 +455,10 @@ unsigned long gic_get_hwdom_madt_size(const struct domain *d) | |
{ | |
unsigned long madt_size; | |
- madt_size = sizeof(struct acpi_table_madt) | |
- + sizeof(struct acpi_madt_generic_interrupt) * d->max_vcpus | |
- + sizeof(struct acpi_madt_generic_distributor) | |
- + gic_hw_ops->get_hwdom_extra_madt_size(d); | |
+ madt_size = sizeof(struct acpi_table_madt) + | |
+ sizeof(struct acpi_madt_generic_interrupt) * d->max_vcpus + | |
+ sizeof(struct acpi_madt_generic_distributor) + | |
+ gic_hw_ops->get_hwdom_extra_madt_size(d); | |
return madt_size; | |
} | |
@@ -465,11 +468,10 @@ int gic_iomem_deny_access(const struct domain *d) | |
return gic_hw_ops->iomem_deny_access(d); | |
} | |
-static int cpu_gic_callback(struct notifier_block *nfb, | |
- unsigned long action, | |
+static int cpu_gic_callback(struct notifier_block *nfb, unsigned long action, | |
void *hcpu) | |
{ | |
- switch ( action ) | |
+ switch (action) | |
{ | |
case CPU_DYING: | |
/* This is reverting the work done in init_maintenance_interrupt */ | |
diff --git a/xen/arch/arm/guest_atomics.c b/xen/arch/arm/guest_atomics.c | |
index 1b78a062f0..ff20731f65 100644 | |
--- a/xen/arch/arm/guest_atomics.c | |
+++ b/xen/arch/arm/guest_atomics.c | |
@@ -32,34 +32,33 @@ static void calibrate_safe_atomic(void) | |
unsigned int counter = 0; | |
unsigned long mem = 0; | |
- do | |
- { | |
+ do { | |
unsigned long res, tmp; | |
#ifdef CONFIG_ARM_32 | |
- asm volatile (" ldrex %2, %1\n" | |
- " add %2, %2, #1\n" | |
- " strex %0, %2, %1\n" | |
- : "=&r" (res), "+Q" (mem), "=&r" (tmp)); | |
+ asm volatile(" ldrex %2, %1\n" | |
+ " add %2, %2, #1\n" | |
+ " strex %0, %2, %1\n" | |
+ : "=&r"(res), "+Q"(mem), "=&r"(tmp)); | |
#else | |
- asm volatile (" ldxr %w2, %1\n" | |
- " add %w2, %w2, #1\n" | |
- " stxr %w0, %w2, %1\n" | |
- : "=&r" (res), "+Q" (mem), "=&r" (tmp)); | |
+ asm volatile(" ldxr %w2, %1\n" | |
+ " add %w2, %w2, #1\n" | |
+ " stxr %w0, %w2, %1\n" | |
+ : "=&r"(res), "+Q"(mem), "=&r"(tmp)); | |
#endif | |
counter++; | |
- } while (NOW() < deadline); | |
+ } while ( NOW() < deadline ); | |
this_cpu(guest_safe_atomic_max) = counter; | |
- printk(XENLOG_DEBUG | |
- "CPU%u: Guest atomics will try %u times before pausing the domain\n", | |
- smp_processor_id(), counter); | |
+ printk( | |
+ XENLOG_DEBUG | |
+ "CPU%u: Guest atomics will try %u times before pausing the domain\n", | |
+ smp_processor_id(), counter); | |
} | |
static int cpu_guest_safe_atomic_callback(struct notifier_block *nfb, | |
- unsigned long action, | |
- void *hcpu) | |
+ unsigned long action, void *hcpu) | |
{ | |
if ( action == CPU_STARTING ) | |
calibrate_safe_atomic(); | |
diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c | |
index c6d6e23bf5..b164ff63a2 100644 | |
--- a/xen/arch/arm/guest_walk.c | |
+++ b/xen/arch/arm/guest_walk.c | |
@@ -28,8 +28,7 @@ | |
* page table on a different vCPU, the following registers would need to be | |
* loaded: TCR_EL1, TTBR0_EL1, TTBR1_EL1, and SCTLR_EL1. | |
*/ | |
-static bool guest_walk_sd(const struct vcpu *v, | |
- vaddr_t gva, paddr_t *ipa, | |
+static bool guest_walk_sd(const struct vcpu *v, vaddr_t gva, paddr_t *ipa, | |
unsigned int *perms) | |
{ | |
int ret; | |
@@ -95,11 +94,12 @@ static bool guest_walk_sd(const struct vcpu *v, | |
paddr |= (gva & mask) >> 18; | |
/* Access the guest's memory to read only one PTE. */ | |
- ret = access_guest_memory_by_ipa(d, paddr, &pte, sizeof(short_desc_t), false); | |
+ ret = access_guest_memory_by_ipa(d, paddr, &pte, sizeof(short_desc_t), | |
+ false); | |
if ( ret ) | |
return false; | |
- switch ( pte.walk.dt ) | |
+ switch (pte.walk.dt) | |
{ | |
case L1DESC_INVALID: | |
return false; | |
@@ -120,7 +120,8 @@ static bool guest_walk_sd(const struct vcpu *v, | |
paddr = ((paddr_t)pte.walk.base << 10) | ((gva & mask) >> 10); | |
/* Access the guest's memory to read only one PTE. */ | |
- ret = access_guest_memory_by_ipa(d, paddr, &pte, sizeof(short_desc_t), false); | |
+ ret = access_guest_memory_by_ipa(d, paddr, &pte, sizeof(short_desc_t), | |
+ false); | |
if ( ret ) | |
return false; | |
@@ -130,7 +131,8 @@ static bool guest_walk_sd(const struct vcpu *v, | |
if ( pte.pg.page ) /* Small page. */ | |
{ | |
mask = (1ULL << L2DESC_SMALL_PAGE_SHIFT) - 1; | |
- *ipa = ((paddr_t)pte.pg.base << L2DESC_SMALL_PAGE_SHIFT) | (gva & mask); | |
+ *ipa = ((paddr_t)pte.pg.base << L2DESC_SMALL_PAGE_SHIFT) | | |
+ (gva & mask); | |
/* Set execute permissions associated with the small page. */ | |
if ( !pte.pg.xn ) | |
@@ -139,14 +141,16 @@ static bool guest_walk_sd(const struct vcpu *v, | |
else /* Large page. */ | |
{ | |
mask = (1ULL << L2DESC_LARGE_PAGE_SHIFT) - 1; | |
- *ipa = ((paddr_t)pte.lpg.base << L2DESC_LARGE_PAGE_SHIFT) | (gva & mask); | |
+ *ipa = ((paddr_t)pte.lpg.base << L2DESC_LARGE_PAGE_SHIFT) | | |
+ (gva & mask); | |
/* Set execute permissions associated with the large page. */ | |
if ( !pte.lpg.xn ) | |
*perms |= GV2M_EXEC; | |
} | |
- /* Set permissions so that the caller can check the flags by herself. */ | |
+ /* Set permissions so that the caller can check the flags by herself. | |
+ */ | |
if ( !pte.pg.ro ) | |
*perms |= GV2M_WRITE; | |
@@ -157,18 +161,22 @@ static bool guest_walk_sd(const struct vcpu *v, | |
if ( !pte.sec.supersec ) /* Section */ | |
{ | |
mask = (1ULL << L1DESC_SECTION_SHIFT) - 1; | |
- *ipa = ((paddr_t)pte.sec.base << L1DESC_SECTION_SHIFT) | (gva & mask); | |
+ *ipa = | |
+ ((paddr_t)pte.sec.base << L1DESC_SECTION_SHIFT) | (gva & mask); | |
} | |
else /* Supersection */ | |
{ | |
mask = (1ULL << L1DESC_SUPERSECTION_SHIFT) - 1; | |
*ipa = gva & mask; | |
*ipa |= (paddr_t)(pte.supersec.base) << L1DESC_SUPERSECTION_SHIFT; | |
- *ipa |= (paddr_t)(pte.supersec.extbase1) << L1DESC_SUPERSECTION_EXT_BASE1_SHIFT; | |
- *ipa |= (paddr_t)(pte.supersec.extbase2) << L1DESC_SUPERSECTION_EXT_BASE2_SHIFT; | |
+ *ipa |= (paddr_t)(pte.supersec.extbase1) | |
+ << L1DESC_SUPERSECTION_EXT_BASE1_SHIFT; | |
+ *ipa |= (paddr_t)(pte.supersec.extbase2) | |
+ << L1DESC_SUPERSECTION_EXT_BASE2_SHIFT; | |
} | |
- /* Set permissions so that the caller can check the flags by herself. */ | |
+ /* Set permissions so that the caller can check the flags by herself. | |
+ */ | |
if ( !pte.sec.ro ) | |
*perms |= GV2M_WRITE; | |
if ( !pte.sec.xn ) | |
@@ -189,14 +197,9 @@ static int get_ipa_output_size(struct domain *d, register_t tcr, | |
register_t ips; | |
static const unsigned int ipa_sizes[7] = { | |
- TCR_EL1_IPS_32_BIT_VAL, | |
- TCR_EL1_IPS_36_BIT_VAL, | |
- TCR_EL1_IPS_40_BIT_VAL, | |
- TCR_EL1_IPS_42_BIT_VAL, | |
- TCR_EL1_IPS_44_BIT_VAL, | |
- TCR_EL1_IPS_48_BIT_VAL, | |
- TCR_EL1_IPS_52_BIT_VAL | |
- }; | |
+ TCR_EL1_IPS_32_BIT_VAL, TCR_EL1_IPS_36_BIT_VAL, TCR_EL1_IPS_40_BIT_VAL, | |
+ TCR_EL1_IPS_42_BIT_VAL, TCR_EL1_IPS_44_BIT_VAL, TCR_EL1_IPS_48_BIT_VAL, | |
+ TCR_EL1_IPS_52_BIT_VAL}; | |
if ( is_64bit_domain(d) ) | |
{ | |
@@ -222,14 +225,16 @@ static int get_ipa_output_size(struct domain *d, register_t tcr, | |
} | |
/* Normalized page granule size indices. */ | |
-enum granule_size_index { | |
+enum granule_size_index | |
+{ | |
GRANULE_SIZE_INDEX_4K, | |
GRANULE_SIZE_INDEX_16K, | |
GRANULE_SIZE_INDEX_64K | |
}; | |
/* Represent whether TTBR0 or TTBR1 is active. */ | |
-enum active_ttbr { | |
+enum active_ttbr | |
+{ | |
TTBR0_ACTIVE, | |
TTBR1_ACTIVE | |
}; | |
@@ -248,7 +253,7 @@ static bool get_ttbr_and_gran_64bit(uint64_t *ttbr, unsigned int *gran, | |
if ( ttbrx == TTBR0_ACTIVE ) | |
{ | |
/* Normalize granule size. */ | |
- switch ( tcr & TCR_TG0_MASK ) | |
+ switch (tcr & TCR_TG0_MASK) | |
{ | |
case TCR_TG0_16K: | |
*gran = GRANULE_SIZE_INDEX_16K; | |
@@ -279,7 +284,7 @@ static bool get_ttbr_and_gran_64bit(uint64_t *ttbr, unsigned int *gran, | |
else | |
{ | |
/* Normalize granule size. */ | |
- switch ( tcr & TCR_EL1_TG1_MASK ) | |
+ switch (tcr & TCR_EL1_TG1_MASK) | |
{ | |
case TCR_EL1_TG1_16K: | |
*gran = GRANULE_SIZE_INDEX_16K; | |
@@ -355,8 +360,7 @@ static bool check_base_size(unsigned int output_size, uint64_t base) | |
* page table on a different vCPU, the following registers would need to be | |
* loaded: TCR_EL1, TTBR0_EL1, TTBR1_EL1, and SCTLR_EL1. | |
*/ | |
-static bool guest_walk_ld(const struct vcpu *v, | |
- vaddr_t gva, paddr_t *ipa, | |
+static bool guest_walk_ld(const struct vcpu *v, vaddr_t gva, paddr_t *ipa, | |
unsigned int *perms) | |
{ | |
int ret; | |
@@ -371,43 +375,29 @@ static bool guest_walk_ld(const struct vcpu *v, | |
register_t tcr = READ_SYSREG(TCR_EL1); | |
struct domain *d = v->domain; | |
-#define OFFSETS(gva, gran) \ | |
-{ \ | |
- zeroeth_table_offset_##gran(gva), \ | |
- first_table_offset_##gran(gva), \ | |
- second_table_offset_##gran(gva), \ | |
- third_table_offset_##gran(gva) \ | |
-} | |
+#define OFFSETS(gva, gran) \ | |
+ { \ | |
+ zeroeth_table_offset_##gran(gva), first_table_offset_##gran(gva), \ | |
+ second_table_offset_##gran(gva), third_table_offset_##gran(gva) \ | |
+ } | |
- const paddr_t offsets[3][4] = { | |
- OFFSETS(gva, 4K), | |
- OFFSETS(gva, 16K), | |
- OFFSETS(gva, 64K) | |
- }; | |
+ const paddr_t offsets[3][4] = {OFFSETS(gva, 4K), OFFSETS(gva, 16K), | |
+ OFFSETS(gva, 64K)}; | |
#undef OFFSETS | |
-#define MASKS(gran) \ | |
-{ \ | |
- zeroeth_size(gran) - 1, \ | |
- first_size(gran) - 1, \ | |
- second_size(gran) - 1, \ | |
- third_size(gran) - 1 \ | |
-} | |
+#define MASKS(gran) \ | |
+ { \ | |
+ zeroeth_size(gran) - 1, first_size(gran) - 1, second_size(gran) - 1, \ | |
+ third_size(gran) - 1 \ | |
+ } | |
- static const paddr_t masks[3][4] = { | |
- MASKS(4K), | |
- MASKS(16K), | |
- MASKS(64K) | |
- }; | |
+ static const paddr_t masks[3][4] = {MASKS(4K), MASKS(16K), MASKS(64K)}; | |
#undef MASKS | |
- static const unsigned int grainsizes[3] = { | |
- PAGE_SHIFT_4K, | |
- PAGE_SHIFT_16K, | |
- PAGE_SHIFT_64K | |
- }; | |
+ static const unsigned int grainsizes[3] = {PAGE_SHIFT_4K, PAGE_SHIFT_16K, | |
+ PAGE_SHIFT_64K}; | |
t0_sz = (tcr >> TCR_T0SZ_SHIFT) & TCR_SZ_MASK; | |
t1_sz = (tcr >> TCR_T1SZ_SHIFT) & TCR_SZ_MASK; | |
@@ -417,21 +407,24 @@ static bool guest_walk_ld(const struct vcpu *v, | |
if ( is_64bit_domain(d) ) | |
{ | |
- /* Select the TTBR(0|1)_EL1 that will be used for address translation. */ | |
+ /* Select the TTBR(0|1)_EL1 that will be used for address translation. | |
+ */ | |
if ( (gva & BIT(topbit, ULL)) == 0 ) | |
{ | |
input_size = 64 - t0_sz; | |
/* Get TTBR0 and configured page granularity. */ | |
- disabled = get_ttbr_and_gran_64bit(&ttbr, &gran, tcr, TTBR0_ACTIVE); | |
+ disabled = | |
+ get_ttbr_and_gran_64bit(&ttbr, &gran, tcr, TTBR0_ACTIVE); | |
} | |
else | |
{ | |
input_size = 64 - t1_sz; | |
/* Get TTBR1 and configured page granularity. */ | |
- disabled = get_ttbr_and_gran_64bit(&ttbr, &gran, tcr, TTBR1_ACTIVE); | |
+ disabled = | |
+ get_ttbr_and_gran_64bit(&ttbr, &gran, tcr, TTBR1_ACTIVE); | |
} | |
/* | |
@@ -449,7 +442,8 @@ static bool guest_walk_ld(const struct vcpu *v, | |
/* Granule size of AArch32 architectures is always 4K. */ | |
gran = GRANULE_SIZE_INDEX_4K; | |
- /* Select the TTBR(0|1)_EL1 that will be used for address translation. */ | |
+ /* Select the TTBR(0|1)_EL1 that will be used for address translation. | |
+ */ | |
/* | |
* Check if the bits <31:32-t0_sz> of the GVA are set to 0 (DDI 0487B.a | |
@@ -493,7 +487,8 @@ static bool guest_walk_ld(const struct vcpu *v, | |
* The starting level is the number of strides (grainsizes[gran] - 3) | |
* needed to consume the input address (ARM DDI 0487B.a J1-5924). | |
*/ | |
- level = 4 - DIV_ROUND_UP((input_size - grainsizes[gran]), (grainsizes[gran] - 3)); | |
+ level = 4 - DIV_ROUND_UP((input_size - grainsizes[gran]), | |
+ (grainsizes[gran] - 3)); | |
/* Get the IPA output_size. */ | |
ret = get_ipa_output_size(d, tcr, &output_size); | |
@@ -512,7 +507,7 @@ static bool guest_walk_ld(const struct vcpu *v, | |
mask = GENMASK_ULL(47, grainsizes[gran]); | |
paddr = (ttbr & mask); | |
- for ( ; ; level++ ) | |
+ for ( ;; level++ ) | |
{ | |
/* | |
* Add offset given by the GVA to the translation table base address. | |
@@ -521,7 +516,8 @@ static bool guest_walk_ld(const struct vcpu *v, | |
paddr |= offsets[gran][level] << 3; | |
/* Access the guest's memory to read only one PTE. */ | |
- ret = access_guest_memory_by_ipa(d, paddr, &pte, sizeof(lpae_t), false); | |
+ ret = | |
+ access_guest_memory_by_ipa(d, paddr, &pte, sizeof(lpae_t), false); | |
if ( ret ) | |
return false; | |
@@ -535,8 +531,7 @@ static bool guest_walk_ld(const struct vcpu *v, | |
* appropriately. | |
*/ | |
if ( (output_size < TCR_EL1_IPS_52_BIT_VAL) && | |
- (gran == GRANULE_SIZE_INDEX_64K) && | |
- (pte.walk.base & 0xf) ) | |
+ (gran == GRANULE_SIZE_INDEX_64K) && (pte.walk.base & 0xf) ) | |
return false; | |
/* | |
@@ -546,15 +541,16 @@ static bool guest_walk_ld(const struct vcpu *v, | |
* - The PTE is not valid. | |
* - If (level < 3) and the PTE is valid, we found a block descriptor. | |
*/ | |
- if ( level == 3 || !lpae_is_valid(pte) || lpae_is_superpage(pte, level) ) | |
+ if ( level == 3 || !lpae_is_valid(pte) || | |
+ lpae_is_superpage(pte, level) ) | |
break; | |
/* | |
* Temporarily store permissions of the table descriptor as they are | |
* inherited by page table attributes (ARM DDI 0487B.a J1-5928). | |
*/ | |
- xn_table |= pte.pt.xnt; /* Execute-Never */ | |
- ro_table |= pte.pt.apt & BIT(1, UL);/* Read-Only */ | |
+ xn_table |= pte.pt.xnt; /* Execute-Never */ | |
+ ro_table |= pte.pt.apt & BIT(1, UL); /* Read-Only */ | |
/* Compute the base address of the next level translation table. */ | |
mask = GENMASK_ULL(47, grainsizes[gran]); | |
@@ -586,8 +582,8 @@ static bool guest_walk_ld(const struct vcpu *v, | |
return true; | |
} | |
-bool guest_walk_tables(const struct vcpu *v, vaddr_t gva, | |
- paddr_t *ipa, unsigned int *perms) | |
+bool guest_walk_tables(const struct vcpu *v, vaddr_t gva, paddr_t *ipa, | |
+ unsigned int *perms) | |
{ | |
uint32_t sctlr = READ_SYSREG(SCTLR_EL1); | |
register_t tcr = READ_SYSREG(TCR_EL1); | |
@@ -617,7 +613,7 @@ bool guest_walk_tables(const struct vcpu *v, vaddr_t gva, | |
*ipa = gva; | |
/* Memory can be accessed without any restrictions. */ | |
- *perms = GV2M_READ|GV2M_WRITE|GV2M_EXEC; | |
+ *perms = GV2M_READ | GV2M_WRITE | GV2M_EXEC; | |
return true; | |
} | |
diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c | |
index 7a0f3e9d5f..9f07efde7d 100644 | |
--- a/xen/arch/arm/guestcopy.c | |
+++ b/xen/arch/arm/guestcopy.c | |
@@ -5,14 +5,13 @@ | |
#include <asm/current.h> | |
#include <asm/guest_access.h> | |
-#define COPY_flush_dcache (1U << 0) | |
-#define COPY_from_guest (0U << 1) | |
-#define COPY_to_guest (1U << 1) | |
-#define COPY_ipa (0U << 2) | |
-#define COPY_linear (1U << 2) | |
+#define COPY_flush_dcache (1U << 0) | |
+#define COPY_from_guest (0U << 1) | |
+#define COPY_to_guest (1U << 1) | |
+#define COPY_ipa (0U << 2) | |
+#define COPY_linear (1U << 2) | |
-typedef union | |
-{ | |
+typedef union { | |
struct | |
{ | |
struct vcpu *v; | |
@@ -24,8 +23,8 @@ typedef union | |
} gpa; | |
} copy_info_t; | |
-#define GVA_INFO(vcpu) ((copy_info_t) { .gva = { vcpu } }) | |
-#define GPA_INFO(domain) ((copy_info_t) { .gpa = { domain } }) | |
+#define GVA_INFO(vcpu) ((copy_info_t){.gva = {vcpu}}) | |
+#define GPA_INFO(domain) ((copy_info_t){.gpa = {domain}}) | |
static struct page_info *translate_get_page(copy_info_t info, uint64_t addr, | |
bool linear, bool write) | |
@@ -107,8 +106,8 @@ static unsigned long copy_guest(void *buf, uint64_t addr, unsigned int len, | |
unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len) | |
{ | |
- return copy_guest((void *)from, (vaddr_t)to, len, | |
- GVA_INFO(current), COPY_to_guest | COPY_linear); | |
+ return copy_guest((void *)from, (vaddr_t)to, len, GVA_INFO(current), | |
+ COPY_to_guest | COPY_linear); | |
} | |
unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from, | |
@@ -124,16 +123,15 @@ unsigned long raw_clear_guest(void *to, unsigned len) | |
COPY_to_guest | COPY_linear); | |
} | |
-unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len) | |
+unsigned long raw_copy_from_guest(void *to, const void __user *from, | |
+ unsigned len) | |
{ | |
return copy_guest(to, (vaddr_t)from, len, GVA_INFO(current), | |
COPY_from_guest | COPY_linear); | |
} | |
-unsigned long copy_to_guest_phys_flush_dcache(struct domain *d, | |
- paddr_t gpa, | |
- void *buf, | |
- unsigned int len) | |
+unsigned long copy_to_guest_phys_flush_dcache(struct domain *d, paddr_t gpa, | |
+ void *buf, unsigned int len) | |
{ | |
return copy_guest(buf, gpa, len, GPA_INFO(d), | |
COPY_to_guest | COPY_ipa | COPY_flush_dcache); | |
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c | |
index 76b27c9168..af0a4317b6 100644 | |
--- a/xen/arch/arm/hvm.c | |
+++ b/xen/arch/arm/hvm.c | |
@@ -35,7 +35,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) | |
{ | |
long rc = 0; | |
- switch ( op ) | |
+ switch (op) | |
{ | |
case HVMOP_set_param: | |
case HVMOP_get_param: | |
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c | |
index ae7ef96981..7deb86bc1a 100644 | |
--- a/xen/arch/arm/io.c | |
+++ b/xen/arch/arm/io.c | |
@@ -27,8 +27,7 @@ | |
#include "decode.h" | |
static enum io_state handle_read(const struct mmio_handler *handler, | |
- struct vcpu *v, | |
- mmio_info_t *info) | |
+ struct vcpu *v, mmio_info_t *info) | |
{ | |
const struct hsr_dabt dabt = info->dabt; | |
struct cpu_user_regs *regs = guest_cpu_user_regs(); | |
@@ -65,8 +64,7 @@ static enum io_state handle_read(const struct mmio_handler *handler, | |
} | |
static enum io_state handle_write(const struct mmio_handler *handler, | |
- struct vcpu *v, | |
- mmio_info_t *info) | |
+ struct vcpu *v, mmio_info_t *info) | |
{ | |
const struct hsr_dabt dabt = info->dabt; | |
struct cpu_user_regs *regs = guest_cpu_user_regs(); | |
@@ -107,17 +105,13 @@ static const struct mmio_handler *find_mmio_handler(struct domain *d, | |
return handler; | |
} | |
-enum io_state try_handle_mmio(struct cpu_user_regs *regs, | |
- const union hsr hsr, | |
+enum io_state try_handle_mmio(struct cpu_user_regs *regs, const union hsr hsr, | |
paddr_t gpa) | |
{ | |
struct vcpu *v = current; | |
const struct mmio_handler *handler = NULL; | |
const struct hsr_dabt dabt = hsr.dabt; | |
- mmio_info_t info = { | |
- .gpa = gpa, | |
- .dabt = dabt | |
- }; | |
+ mmio_info_t info = {.gpa = gpa, .dabt = dabt}; | |
ASSERT(hsr.ec == HSR_EC_DATA_ABORT_LOWER_EL); | |
@@ -133,8 +127,7 @@ enum io_state try_handle_mmio(struct cpu_user_regs *regs, | |
* Erratum 766422: Thumb store translation fault to Hypervisor may | |
* not have correct HSR Rt value. | |
*/ | |
- if ( check_workaround_766422() && (regs->cpsr & PSR_THUMB) && | |
- dabt.write ) | |
+ if ( check_workaround_766422() && (regs->cpsr & PSR_THUMB) && dabt.write ) | |
{ | |
int rc; | |
@@ -153,8 +146,8 @@ enum io_state try_handle_mmio(struct cpu_user_regs *regs, | |
} | |
void register_mmio_handler(struct domain *d, | |
- const struct mmio_handler_ops *ops, | |
- paddr_t addr, paddr_t size, void *priv) | |
+ const struct mmio_handler_ops *ops, paddr_t addr, | |
+ paddr_t size, void *priv) | |
{ | |
struct vmmio *vmmio = &d->arch.vmmio; | |
struct mmio_handler *handler; | |
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c | |
index c51cf333ce..d6ce5a6f41 100644 | |
--- a/xen/arch/arm/irq.c | |
+++ b/xen/arch/arm/irq.c | |
@@ -53,15 +53,13 @@ static void end_none(struct irq_desc *irq) | |
gic_hw_ops->gic_host_irq_type->end(irq); | |
} | |
-hw_irq_controller no_irq_type = { | |
- .typename = "none", | |
- .startup = irq_startup_none, | |
- .shutdown = irq_shutdown_none, | |
- .enable = irq_enable_none, | |
- .disable = irq_disable_none, | |
- .ack = ack_none, | |
- .end = end_none | |
-}; | |
+hw_irq_controller no_irq_type = {.typename = "none", | |
+ .startup = irq_startup_none, | |
+ .shutdown = irq_shutdown_none, | |
+ .enable = irq_enable_none, | |
+ .disable = irq_disable_none, | |
+ .ack = ack_none, | |
+ .end = end_none}; | |
static irq_desc_t irq_desc[NR_IRQS]; | |
static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc); | |
@@ -71,7 +69,7 @@ irq_desc_t *__irq_to_desc(int irq) | |
if ( irq < NR_LOCAL_IRQS ) | |
return &this_cpu(local_irq_desc)[irq]; | |
- return &irq_desc[irq-NR_LOCAL_IRQS]; | |
+ return &irq_desc[irq - NR_LOCAL_IRQS]; | |
} | |
int arch_init_one_irq_desc(struct irq_desc *desc) | |
@@ -80,7 +78,6 @@ int arch_init_one_irq_desc(struct irq_desc *desc) | |
return 0; | |
} | |
- | |
static int __init init_irq_data(void) | |
{ | |
int irq; | |
@@ -90,7 +87,7 @@ static int __init init_irq_data(void) | |
struct irq_desc *desc = irq_to_desc(irq); | |
init_one_irq_desc(desc); | |
desc->irq = irq; | |
- desc->action = NULL; | |
+ desc->action = NULL; | |
} | |
return 0; | |
@@ -107,7 +104,7 @@ static int init_local_irq_data(void) | |
struct irq_desc *desc = irq_to_desc(irq); | |
init_one_irq_desc(desc); | |
desc->irq = irq; | |
- desc->action = NULL; | |
+ desc->action = NULL; | |
/* PPIs are included in local_irqs, we copy the IRQ type from | |
* local_irqs_type when bringing up local IRQ for this CPU in | |
@@ -219,8 +216,7 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq) | |
#ifndef NDEBUG | |
if ( !desc->action ) | |
{ | |
- printk("Unknown %s %#3.3x\n", | |
- is_fiq ? "FIQ" : "IRQ", irq); | |
+ printk("Unknown %s %#3.3x\n", is_fiq ? "FIQ" : "IRQ", irq); | |
goto out; | |
} | |
#endif | |
@@ -263,8 +259,7 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq) | |
spin_unlock_irq(&desc->lock); | |
- do | |
- { | |
+ do { | |
action->handler(irq, action->dev_id, regs); | |
action = action->next; | |
} while ( action ); | |
@@ -289,7 +284,7 @@ void release_irq(unsigned int irq, const void *dev_id) | |
desc = irq_to_desc(irq); | |
- spin_lock_irqsave(&desc->lock,flags); | |
+ spin_lock_irqsave(&desc->lock, flags); | |
action_ptr = &desc->action; | |
for ( ;; ) | |
@@ -318,10 +313,12 @@ void release_irq(unsigned int irq, const void *dev_id) | |
clear_bit(_IRQ_GUEST, &desc->status); | |
} | |
- spin_unlock_irqrestore(&desc->lock,flags); | |
+ spin_unlock_irqrestore(&desc->lock, flags); | |
/* Wait to make sure it's not being used on another CPU */ | |
- do { smp_mb(); } while ( test_bit(_IRQ_INPROGRESS, &desc->status) ); | |
+ do { | |
+ smp_mb(); | |
+ } while ( test_bit(_IRQ_INPROGRESS, &desc->status) ); | |
if ( action->free_on_release ) | |
xfree(action); | |
@@ -338,7 +335,8 @@ static int __setup_irq(struct irq_desc *desc, unsigned int irqflags, | |
* - if the IRQ is marked as shared | |
* - dev_id is not NULL when IRQF_SHARED is set | |
*/ | |
- if ( desc->action != NULL && (!test_bit(_IRQF_SHARED, &desc->status) || !shared) ) | |
+ if ( desc->action != NULL && | |
+ (!test_bit(_IRQF_SHARED, &desc->status) || !shared) ) | |
return -EINVAL; | |
if ( shared && new->dev_id == NULL ) | |
return -EINVAL; | |
@@ -423,8 +421,8 @@ bool irq_type_set_by_domain(const struct domain *d) | |
* Route an IRQ to a specific guest. | |
* For now only SPIs are assignable to the guest. | |
*/ | |
-int route_irq_to_guest(struct domain *d, unsigned int virq, | |
- unsigned int irq, const char * devname) | |
+int route_irq_to_guest(struct domain *d, unsigned int virq, unsigned int irq, | |
+ const char *devname) | |
{ | |
struct irqaction *action; | |
struct irq_guest *info; | |
@@ -616,7 +614,7 @@ void pirq_set_affinity(struct domain *d, int pirq, const cpumask_t *mask) | |
static bool irq_validate_new_type(unsigned int curr, unsigned new) | |
{ | |
- return (curr == IRQ_TYPE_INVALID || curr == new ); | |
+ return (curr == IRQ_TYPE_INVALID || curr == new); | |
} | |
int irq_set_spi_type(unsigned int spi, unsigned int type) | |
@@ -667,7 +665,7 @@ static int irq_local_set_type(unsigned int irq, unsigned int type) | |
local_irqs_type[irq] = type; | |
- for_each_cpu( cpu, &cpu_online_map ) | |
+ for_each_cpu (cpu, &cpu_online_map) | |
{ | |
desc = &per_cpu(local_irq_desc, cpu)[irq]; | |
spin_lock_irqsave(&desc->lock, flags); | |
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c | |
index 389bef2afa..a6a376874e 100644 | |
--- a/xen/arch/arm/kernel.c | |
+++ b/xen/arch/arm/kernel.c | |
@@ -18,20 +18,21 @@ | |
#include <asm/guest_access.h> | |
#include <asm/kernel.h> | |
-#define UIMAGE_MAGIC 0x27051956 | |
-#define UIMAGE_NMLEN 32 | |
+#define UIMAGE_MAGIC 0x27051956 | |
+#define UIMAGE_NMLEN 32 | |
#define ZIMAGE32_MAGIC_OFFSET 0x24 | |
#define ZIMAGE32_START_OFFSET 0x28 | |
-#define ZIMAGE32_END_OFFSET 0x2c | |
-#define ZIMAGE32_HEADER_LEN 0x30 | |
+#define ZIMAGE32_END_OFFSET 0x2c | |
+#define ZIMAGE32_HEADER_LEN 0x30 | |
#define ZIMAGE32_MAGIC 0x016f2818 | |
#define ZIMAGE64_MAGIC_V0 0x14000008 | |
#define ZIMAGE64_MAGIC_V1 0x644d5241 /* "ARM\x64" */ | |
-struct minimal_dtb_header { | |
+struct minimal_dtb_header | |
+{ | |
uint32_t magic; | |
uint32_t total_size; | |
/* There are other fields but we don't use them yet. */ | |
@@ -49,10 +50,11 @@ void __init copy_from_paddr(void *dst, paddr_t paddr, unsigned long len) | |
{ | |
void *src = (void *)FIXMAP_ADDR(FIXMAP_MISC); | |
- while (len) { | |
+ while ( len ) | |
+ { | |
unsigned long l, s; | |
- s = paddr & (PAGE_SIZE-1); | |
+ s = paddr & (PAGE_SIZE - 1); | |
l = min(PAGE_SIZE - s, len); | |
set_fixmap(FIXMAP_MISC, maddr_to_mfn(paddr), PAGE_HYPERVISOR_WC); | |
@@ -66,10 +68,11 @@ void __init copy_from_paddr(void *dst, paddr_t paddr, unsigned long len) | |
} | |
} | |
-static void __init place_modules(struct kernel_info *info, | |
- paddr_t kernbase, paddr_t kernend) | |
+static void __init place_modules(struct kernel_info *info, paddr_t kernbase, | |
+ paddr_t kernend) | |
{ | |
- /* Align DTB and initrd size to 2Mb. Linux only requires 4 byte alignment */ | |
+ /* Align DTB and initrd size to 2Mb. Linux only requires 4 byte alignment | |
+ */ | |
const struct bootmodule *mod = info->initrd_bootmodule; | |
const paddr_t initrd_len = ROUNDUP(mod ? mod->size : 0, MB(2)); | |
const paddr_t dtb_len = ROUNDUP(fdt_totalsize(info->fdt), MB(2)); | |
@@ -85,7 +88,8 @@ static void __init place_modules(struct kernel_info *info, | |
paddr_t modbase; | |
if ( modsize + kernsize > ramsize ) | |
- panic("Not enough memory in the first bank for the kernel+dtb+initrd\n"); | |
+ panic( | |
+ "Not enough memory in the first bank for the kernel+dtb+initrd\n"); | |
/* | |
* DTB must be loaded such that it does not conflict with the | |
@@ -164,15 +168,15 @@ static void __init kernel_zimage_load(struct kernel_info *info) | |
place_modules(info, load_addr, load_addr + len); | |
- printk("Loading zImage from %"PRIpaddr" to %"PRIpaddr"-%"PRIpaddr"\n", | |
+ printk("Loading zImage from %" PRIpaddr " to %" PRIpaddr "-%" PRIpaddr | |
+ "\n", | |
paddr, load_addr, load_addr + len); | |
kernel = ioremap_wc(paddr, len); | |
if ( !kernel ) | |
panic("Unable to map the hwdom kernel\n"); | |
- rc = copy_to_guest_phys_flush_dcache(info->d, load_addr, | |
- kernel, len); | |
+ rc = copy_to_guest_phys_flush_dcache(info->d, load_addr, kernel, len); | |
if ( rc != 0 ) | |
panic("Unable to copy the kernel in the hwdom memory\n"); | |
@@ -182,27 +186,28 @@ static void __init kernel_zimage_load(struct kernel_info *info) | |
/* | |
* Uimage CPU Architecture Codes | |
*/ | |
-#define IH_ARCH_ARM 2 /* ARM */ | |
-#define IH_ARCH_ARM64 22 /* ARM64 */ | |
+#define IH_ARCH_ARM 2 /* ARM */ | |
+#define IH_ARCH_ARM64 22 /* ARM64 */ | |
/* | |
* Check if the image is a uImage and setup kernel_info | |
*/ | |
-static int __init kernel_uimage_probe(struct kernel_info *info, | |
- paddr_t addr, paddr_t size) | |
+static int __init kernel_uimage_probe(struct kernel_info *info, paddr_t addr, | |
+ paddr_t size) | |
{ | |
- struct { | |
- __be32 magic; /* Image Header Magic Number */ | |
- __be32 hcrc; /* Image Header CRC Checksum */ | |
- __be32 time; /* Image Creation Timestamp */ | |
- __be32 size; /* Image Data Size */ | |
- __be32 load; /* Data Load Address */ | |
- __be32 ep; /* Entry Point Address */ | |
- __be32 dcrc; /* Image Data CRC Checksum */ | |
- uint8_t os; /* Operating System */ | |
- uint8_t arch; /* CPU architecture */ | |
- uint8_t type; /* Image Type */ | |
- uint8_t comp; /* Compression Type */ | |
+ struct | |
+ { | |
+ __be32 magic; /* Image Header Magic Number */ | |
+ __be32 hcrc; /* Image Header CRC Checksum */ | |
+ __be32 time; /* Image Creation Timestamp */ | |
+ __be32 size; /* Image Data Size */ | |
+ __be32 load; /* Data Load Address */ | |
+ __be32 ep; /* Entry Point Address */ | |
+ __be32 dcrc; /* Image Data CRC Checksum */ | |
+ uint8_t os; /* Operating System */ | |
+ uint8_t arch; /* CPU architecture */ | |
+ uint8_t type; /* Image Type */ | |
+ uint8_t comp; /* Compression Type */ | |
uint8_t name[UIMAGE_NMLEN]; /* Image Name */ | |
} uimage; | |
@@ -228,7 +233,7 @@ static int __init kernel_uimage_probe(struct kernel_info *info, | |
info->load = kernel_zimage_load; | |
#ifdef CONFIG_ARM_64 | |
- switch ( uimage.arch ) | |
+ switch (uimage.arch) | |
{ | |
case IH_ARCH_ARM: | |
info->type = DOMAIN_32BIT; | |
@@ -285,7 +290,8 @@ static __init int kernel_decompress(struct bootmodule *mod) | |
return -ENOMEM; | |
} | |
mfn = page_to_mfn(pages); | |
- output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT); | |
+ output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, | |
+ VMAP_DEFAULT); | |
rc = perform_gunzip(output, input, size); | |
clean_dcache_va_range(output, output_size); | |
@@ -316,14 +322,15 @@ static __init int kernel_decompress(struct bootmodule *mod) | |
/* | |
* Check if the image is a 64-bit Image. | |
*/ | |
-static int __init kernel_zimage64_probe(struct kernel_info *info, | |
- paddr_t addr, paddr_t size) | |
+static int __init kernel_zimage64_probe(struct kernel_info *info, paddr_t addr, | |
+ paddr_t size) | |
{ | |
/* linux/Documentation/arm64/booting.txt */ | |
- struct { | |
+ struct | |
+ { | |
uint32_t magic0; | |
uint32_t res0; | |
- uint64_t text_offset; /* Image load offset */ | |
+ uint64_t text_offset; /* Image load offset */ | |
uint64_t res1; | |
uint64_t res2; | |
/* zImage V1 only from here */ | |
@@ -370,10 +377,10 @@ static int __init kernel_zimage64_probe(struct kernel_info *info, | |
/* | |
* Check if the image is a 32-bit zImage and setup kernel_info | |
*/ | |
-static int __init kernel_zimage32_probe(struct kernel_info *info, | |
- paddr_t addr, paddr_t size) | |
+static int __init kernel_zimage32_probe(struct kernel_info *info, paddr_t addr, | |
+ paddr_t size) | |
{ | |
- uint32_t zimage[ZIMAGE32_HEADER_LEN/4]; | |
+ uint32_t zimage[ZIMAGE32_HEADER_LEN / 4]; | |
uint32_t start, end; | |
struct minimal_dtb_header dtb_hdr; | |
@@ -382,11 +389,11 @@ static int __init kernel_zimage32_probe(struct kernel_info *info, | |
copy_from_paddr(zimage, addr, sizeof(zimage)); | |
- if (zimage[ZIMAGE32_MAGIC_OFFSET/4] != ZIMAGE32_MAGIC) | |
+ if ( zimage[ZIMAGE32_MAGIC_OFFSET / 4] != ZIMAGE32_MAGIC ) | |
return -EINVAL; | |
- start = zimage[ZIMAGE32_START_OFFSET/4]; | |
- end = zimage[ZIMAGE32_END_OFFSET/4]; | |
+ start = zimage[ZIMAGE32_START_OFFSET / 4]; | |
+ end = zimage[ZIMAGE32_END_OFFSET / 4]; | |
if ( (end - start) > size ) | |
return -EINVAL; | |
@@ -397,7 +404,8 @@ static int __init kernel_zimage32_probe(struct kernel_info *info, | |
if ( addr + end - start + sizeof(dtb_hdr) <= size ) | |
{ | |
copy_from_paddr(&dtb_hdr, addr + end - start, sizeof(dtb_hdr)); | |
- if (be32_to_cpu(dtb_hdr.magic) == DTB_MAGIC) { | |
+ if ( be32_to_cpu(dtb_hdr.magic) == DTB_MAGIC ) | |
+ { | |
end += be32_to_cpu(dtb_hdr.total_size); | |
if ( end > addr + size ) | |
@@ -455,8 +463,8 @@ int __init kernel_probe(struct kernel_info *info, | |
val = dt_get_property(node, "reg", &len); | |
dt_get_range(&val, node, &kernel_addr, &size); | |
- mod = boot_module_find_by_addr_and_kind( | |
- BOOTMOD_KERNEL, kernel_addr); | |
+ mod = boot_module_find_by_addr_and_kind(BOOTMOD_KERNEL, | |
+ kernel_addr); | |
info->kernel_bootmodule = mod; | |
} | |
else if ( dt_device_is_compatible(node, "multiboot,ramdisk") ) | |
@@ -467,7 +475,7 @@ int __init kernel_probe(struct kernel_info *info, | |
val = dt_get_property(node, "reg", &len); | |
dt_get_range(&val, node, &initrd_addr, &size); | |
info->initrd_bootmodule = boot_module_find_by_addr_and_kind( | |
- BOOTMOD_RAMDISK, initrd_addr); | |
+ BOOTMOD_RAMDISK, initrd_addr); | |
} | |
else | |
continue; | |
@@ -483,23 +491,23 @@ int __init kernel_probe(struct kernel_info *info, | |
return -ENOENT; | |
} | |
- printk("Loading %pd kernel from boot module @ %"PRIpaddr"\n", | |
- info->d, info->kernel_bootmodule->start); | |
+ printk("Loading %pd kernel from boot module @ %" PRIpaddr "\n", info->d, | |
+ info->kernel_bootmodule->start); | |
if ( info->initrd_bootmodule ) | |
- printk("Loading ramdisk from boot module @ %"PRIpaddr"\n", | |
+ printk("Loading ramdisk from boot module @ %" PRIpaddr "\n", | |
info->initrd_bootmodule->start); | |
/* if it is a gzip'ed image, 32bit or 64bit, uncompress it */ | |
rc = kernel_decompress(mod); | |
- if (rc < 0 && rc != -EINVAL) | |
+ if ( rc < 0 && rc != -EINVAL ) | |
return rc; | |
#ifdef CONFIG_ARM_64 | |
rc = kernel_zimage64_probe(info, mod->start, mod->size); | |
- if (rc < 0) | |
+ if ( rc < 0 ) | |
#endif | |
rc = kernel_uimage_probe(info, mod->start, mod->size); | |
- if (rc < 0) | |
+ if ( rc < 0 ) | |
rc = kernel_zimage32_probe(info, mod->start, mod->size); | |
return rc; | |
diff --git a/xen/arch/arm/livepatch.c b/xen/arch/arm/livepatch.c | |
index 279d52cc6c..e6898bd2e8 100644 | |
--- a/xen/arch/arm/livepatch.c | |
+++ b/xen/arch/arm/livepatch.c | |
@@ -33,12 +33,13 @@ int arch_livepatch_quiesce(void) | |
* The text section is read-only. So re-map Xen to be able to patch | |
* the code. | |
*/ | |
- vmap_of_xen_text = __vmap(&text_mfn, 1U << text_order, 1, 1, PAGE_HYPERVISOR, | |
- VMAP_DEFAULT); | |
+ vmap_of_xen_text = __vmap(&text_mfn, 1U << text_order, 1, 1, | |
+ PAGE_HYPERVISOR, VMAP_DEFAULT); | |
if ( !vmap_of_xen_text ) | |
{ | |
- printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of hypervisor! (order=%u)\n", | |
+ printk(XENLOG_ERR LIVEPATCH | |
+ "Failed to setup vmap of hypervisor! (order=%u)\n", | |
text_order); | |
return -ENOMEM; | |
} | |
@@ -64,7 +65,7 @@ int arch_livepatch_verify_func(const struct livepatch_func *func) | |
{ | |
/* If NOPing only do up to maximum amount we can put in the ->opaque. */ | |
if ( !func->new_addr && (func->new_size > sizeof(func->opaque) || | |
- func->new_size % ARCH_PATCH_INSN_SIZE) ) | |
+ func->new_size % ARCH_PATCH_INSN_SIZE) ) | |
return -EOPNOTSUPP; | |
if ( func->old_size < ARCH_PATCH_INSN_SIZE ) | |
@@ -120,7 +121,7 @@ bool arch_livepatch_symbol_ok(const struct livepatch_elf *elf, | |
char p = sym->name[1]; | |
size_t len = strlen(sym->name); | |
- if ( (len >= 3 && (sym->name[2] == '.' )) || (len == 2) ) | |
+ if ( (len >= 3 && (sym->name[2] == '.')) || (len == 2) ) | |
{ | |
if ( p == 'd' || | |
#ifdef CONFIG_ARM_32 | |
@@ -128,14 +129,15 @@ bool arch_livepatch_symbol_ok(const struct livepatch_elf *elf, | |
#else | |
p == 'x' | |
#endif | |
- ) | |
+ ) | |
return false; | |
} | |
} | |
return true; | |
} | |
-int arch_livepatch_secure(const void *va, unsigned int pages, enum va_type type) | |
+int arch_livepatch_secure(const void *va, unsigned int pages, | |
+ enum va_type type) | |
{ | |
unsigned long start = (unsigned long)va; | |
unsigned int flags = 0; | |
@@ -143,7 +145,7 @@ int arch_livepatch_secure(const void *va, unsigned int pages, enum va_type type) | |
ASSERT(va); | |
ASSERT(pages); | |
- switch ( type ) | |
+ switch (type) | |
{ | |
case LIVEPATCH_VA_RX: | |
flags = PAGE_HYPERVISOR_RX; | |
diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c | |
index 3e3620294c..f5b2c7b787 100644 | |
--- a/xen/arch/arm/mem_access.c | |
+++ b/xen/arch/arm/mem_access.c | |
@@ -33,16 +33,8 @@ static int __p2m_get_mem_access(struct domain *d, gfn_t gfn, | |
static const xenmem_access_t memaccess[] = { | |
#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac | |
- ACCESS(n), | |
- ACCESS(r), | |
- ACCESS(w), | |
- ACCESS(rw), | |
- ACCESS(x), | |
- ACCESS(rx), | |
- ACCESS(wx), | |
- ACCESS(rwx), | |
- ACCESS(rx2rw), | |
- ACCESS(n2rwx), | |
+ ACCESS(n), ACCESS(r), ACCESS(w), ACCESS(rw), ACCESS(x), | |
+ ACCESS(rx), ACCESS(wx), ACCESS(rwx), ACCESS(rx2rw), ACCESS(n2rwx), | |
#undef ACCESS | |
}; | |
@@ -98,9 +90,9 @@ static int __p2m_get_mem_access(struct domain *d, gfn_t gfn, | |
* Only in these cases we do a software-based type check and fetch the page if | |
* we indeed found a conflicting mem_access setting. | |
*/ | |
-struct page_info* | |
-p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag, | |
- const struct vcpu *v) | |
+struct page_info *p2m_mem_access_check_and_get_page(vaddr_t gva, | |
+ unsigned long flag, | |
+ const struct vcpu *v) | |
{ | |
long rc; | |
unsigned int perms; | |
@@ -156,21 +148,22 @@ p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag, | |
goto err; | |
/* Let's check if mem_access limited the access. */ | |
- switch ( xma ) | |
+ switch (xma) | |
{ | |
default: | |
case XENMEM_access_rwx: | |
case XENMEM_access_rw: | |
/* | |
- * If mem_access contains no rw perm restrictions at all then the original | |
- * fault was correct. | |
+ * If mem_access contains no rw perm restrictions at all then the | |
+ * original fault was correct. | |
*/ | |
goto err; | |
case XENMEM_access_n2rwx: | |
case XENMEM_access_n: | |
case XENMEM_access_x: | |
/* | |
- * If no r/w is permitted by mem_access, this was a fault caused by mem_access. | |
+ * If no r/w is permitted by mem_access, this was a fault caused by | |
+ * mem_access. | |
*/ | |
break; | |
case XENMEM_access_wx: | |
@@ -242,7 +235,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) | |
return true; | |
/* Now check for mem_access violation. */ | |
- switch ( xma ) | |
+ switch (xma) | |
{ | |
case XENMEM_access_rwx: | |
violation = false; | |
@@ -279,14 +272,14 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) | |
/* First, handle rx2rw and n2rwx conversion automatically. */ | |
if ( npfec.write_access && xma == XENMEM_access_rx2rw ) | |
{ | |
- rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, | |
- 0, ~0, XENMEM_access_rw, 0); | |
+ rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, 0, ~0, | |
+ XENMEM_access_rw, 0); | |
return false; | |
} | |
else if ( xma == XENMEM_access_n2rwx ) | |
{ | |
- rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, | |
- 0, ~0, XENMEM_access_rwx, 0); | |
+ rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, 0, ~0, | |
+ XENMEM_access_rwx, 0); | |
} | |
/* Otherwise, check if there is a vm_event monitor subscriber */ | |
@@ -295,9 +288,10 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) | |
/* No listener */ | |
if ( p2m->access_required ) | |
{ | |
- gdprintk(XENLOG_INFO, "Memory access permissions failure, " | |
- "no vm_event listener VCPU %d, dom %d\n", | |
- v->vcpu_id, v->domain->domain_id); | |
+ gdprintk(XENLOG_INFO, | |
+ "Memory access permissions failure, " | |
+ "no vm_event listener VCPU %d, dom %d\n", | |
+ v->vcpu_id, v->domain->domain_id); | |
domain_crash(v->domain); | |
} | |
else | |
@@ -307,8 +301,8 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) | |
{ | |
/* A listener is not required, so clear the access | |
* restrictions. */ | |
- rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, | |
- 0, ~0, XENMEM_access_rwx, 0); | |
+ rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1, 0, ~0, | |
+ XENMEM_access_rwx, 0); | |
} | |
} | |
@@ -323,7 +317,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) | |
/* Send request to mem access subscriber */ | |
req->u.mem_access.gfn = gpa >> PAGE_SHIFT; | |
- req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1); | |
+ req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1); | |
if ( npfec.gla_valid ) | |
{ | |
req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID; | |
@@ -334,9 +328,9 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec) | |
else if ( npfec.kind == npfec_kind_in_gpt ) | |
req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT; | |
} | |
- req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0; | |
- req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0; | |
- req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0; | |
+ req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0; | |
+ req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0; | |
+ req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0; | |
if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 ) | |
domain_crash(v->domain); | |
@@ -362,20 +356,12 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, | |
static const p2m_access_t memaccess[] = { | |
#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac | |
- ACCESS(n), | |
- ACCESS(r), | |
- ACCESS(w), | |
- ACCESS(rw), | |
- ACCESS(x), | |
- ACCESS(rx), | |
- ACCESS(wx), | |
- ACCESS(rwx), | |
- ACCESS(rx2rw), | |
- ACCESS(n2rwx), | |
+ ACCESS(n), ACCESS(r), ACCESS(w), ACCESS(rw), ACCESS(x), | |
+ ACCESS(rx), ACCESS(wx), ACCESS(rwx), ACCESS(rx2rw), ACCESS(n2rwx), | |
#undef ACCESS | |
}; | |
- switch ( access ) | |
+ switch (access) | |
{ | |
case 0 ... ARRAY_SIZE(memaccess) - 1: | |
a = memaccess[access]; | |
@@ -408,7 +394,6 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, | |
p2m_type_t t; | |
mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order, NULL); | |
- | |
if ( !mfn_eq(mfn, INVALID_MFN) ) | |
{ | |
order = 0; | |
@@ -441,8 +426,8 @@ long p2m_set_mem_access_multi(struct domain *d, | |
return -EOPNOTSUPP; | |
} | |
-int p2m_get_mem_access(struct domain *d, gfn_t gfn, | |
- xenmem_access_t *access, unsigned int altp2m_idx) | |
+int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access, | |
+ unsigned int altp2m_idx) | |
{ | |
int ret; | |
struct p2m_domain *p2m = p2m_get_hostp2m(d); | |
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c | |
index 44258ad89c..d907a79420 100644 | |
--- a/xen/arch/arm/mm.c | |
+++ b/xen/arch/arm/mm.c | |
@@ -50,20 +50,20 @@ | |
#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn)) | |
#ifdef NDEBUG | |
-static inline void | |
-__attribute__ ((__format__ (__printf__, 1, 2))) | |
-mm_printk(const char *fmt, ...) {} | |
+static inline void __attribute__((__format__(__printf__, 1, 2))) | |
+mm_printk(const char *fmt, ...) | |
+{ | |
+} | |
#else | |
-#define mm_printk(fmt, args...) \ | |
- do \ | |
- { \ | |
- dprintk(XENLOG_ERR, fmt, ## args); \ | |
- WARN(); \ | |
- } while (0); | |
+#define mm_printk(fmt, args...) \ | |
+ do { \ | |
+ dprintk(XENLOG_ERR, fmt, ##args); \ | |
+ WARN(); \ | |
+ } while ( 0 ); | |
#endif | |
-#define DEFINE_PAGE_TABLES(name, nr) \ | |
-lpae_t __aligned(PAGE_SIZE) name[LPAE_ENTRIES * (nr)] | |
+#define DEFINE_PAGE_TABLES(name, nr) \ | |
+ lpae_t __aligned(PAGE_SIZE) name[LPAE_ENTRIES * (nr)] | |
#define DEFINE_PAGE_TABLE(name) DEFINE_PAGE_TABLES(name, 1) | |
@@ -116,7 +116,8 @@ static DEFINE_PAGE_TABLE(xen_first); | |
#else | |
#define HYP_PT_ROOT_LEVEL 1 | |
/* Per-CPU pagetable pages */ | |
-/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */ | |
+/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) | |
+ */ | |
static DEFINE_PER_CPU(lpae_t *, xen_pgtable); | |
#define THIS_CPU_PGTABLE this_cpu(xen_pgtable) | |
/* xen_dommap == pages used by map_domain_page, these pages contain | |
@@ -176,7 +177,8 @@ unsigned long total_pages; | |
extern char __init_begin[], __init_end[]; | |
/* Checking VA memory layout alignment. */ | |
-static inline void check_memory_layout_alignment_constraints(void) { | |
+static inline void check_memory_layout_alignment_constraints(void) | |
+{ | |
/* 2MB aligned regions */ | |
BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK); | |
BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK); | |
@@ -198,18 +200,14 @@ static inline void check_memory_layout_alignment_constraints(void) { | |
#endif | |
} | |
-void dump_pt_walk(paddr_t ttbr, paddr_t addr, | |
- unsigned int root_level, | |
+void dump_pt_walk(paddr_t ttbr, paddr_t addr, unsigned int root_level, | |
unsigned int nr_root_tables) | |
{ | |
- static const char *level_strs[4] = { "0TH", "1ST", "2ND", "3RD" }; | |
+ static const char *level_strs[4] = {"0TH", "1ST", "2ND", "3RD"}; | |
const mfn_t root_mfn = maddr_to_mfn(ttbr); | |
const unsigned int offsets[4] = { | |
- zeroeth_table_offset(addr), | |
- first_table_offset(addr), | |
- second_table_offset(addr), | |
- third_table_offset(addr) | |
- }; | |
+ zeroeth_table_offset(addr), first_table_offset(addr), | |
+ second_table_offset(addr), third_table_offset(addr)}; | |
lpae_t pte, *mapping; | |
unsigned int level, root_table; | |
@@ -239,15 +237,15 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr, | |
mapping = map_domain_page(mfn_add(root_mfn, root_table)); | |
- for ( level = root_level; ; level++ ) | |
+ for ( level = root_level;; level++ ) | |
{ | |
if ( offsets[level] > LPAE_ENTRIES ) | |
break; | |
pte = mapping[offsets[level]]; | |
- printk("%s[0x%x] = 0x%"PRIpaddr"\n", | |
- level_strs[level], offsets[level], pte.bits); | |
+ printk("%s[0x%x] = 0x%" PRIpaddr "\n", level_strs[level], | |
+ offsets[level], pte.bits); | |
if ( level == 3 || !pte.walk.valid || !pte.walk.table ) | |
break; | |
@@ -265,14 +263,14 @@ void dump_hyp_walk(vaddr_t addr) | |
uint64_t ttbr = READ_SYSREG64(TTBR0_EL2); | |
lpae_t *pgtable = THIS_CPU_PGTABLE; | |
- printk("Walking Hypervisor VA 0x%"PRIvaddr" " | |
- "on CPU%d via TTBR 0x%016"PRIx64"\n", | |
+ printk("Walking Hypervisor VA 0x%" PRIvaddr " " | |
+ "on CPU%d via TTBR 0x%016" PRIx64 "\n", | |
addr, smp_processor_id(), ttbr); | |
if ( smp_processor_id() == 0 ) | |
- BUG_ON( (lpae_t *)(unsigned long)(ttbr - phys_offset) != pgtable ); | |
+ BUG_ON((lpae_t *)(unsigned long)(ttbr - phys_offset) != pgtable); | |
else | |
- BUG_ON( virt_to_maddr(pgtable) != ttbr ); | |
+ BUG_ON(virt_to_maddr(pgtable) != ttbr); | |
dump_pt_walk(ttbr, addr, HYP_PT_ROOT_LEVEL, 1); | |
} | |
@@ -283,20 +281,20 @@ void dump_hyp_walk(vaddr_t addr) | |
*/ | |
static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr) | |
{ | |
- lpae_t e = (lpae_t) { | |
- .pt = { | |
- .valid = 1, /* Mappings are present */ | |
- .table = 0, /* Set to 1 for links and 4k maps */ | |
- .ai = attr, | |
- .ns = 1, /* Hyp mode is in the non-secure world */ | |
- .up = 1, /* See below */ | |
- .ro = 0, /* Assume read-write */ | |
- .af = 1, /* No need for access tracking */ | |
- .ng = 1, /* Makes TLB flushes easier */ | |
- .contig = 0, /* Assume non-contiguous */ | |
- .xn = 1, /* No need to execute outside .text */ | |
- .avail = 0, /* Reference count for domheap mapping */ | |
- }}; | |
+ lpae_t e = | |
+ (lpae_t){.pt = { | |
+ .valid = 1, /* Mappings are present */ | |
+ .table = 0, /* Set to 1 for links and 4k maps */ | |
+ .ai = attr, | |
+ .ns = 1, /* Hyp mode is in the non-secure world */ | |
+ .up = 1, /* See below */ | |
+ .ro = 0, /* Assume read-write */ | |
+ .af = 1, /* No need for access tracking */ | |
+ .ng = 1, /* Makes TLB flushes easier */ | |
+ .contig = 0, /* Assume non-contiguous */ | |
+ .xn = 1, /* No need to execute outside .text */ | |
+ .avail = 0, /* Reference count for domheap mapping */ | |
+ }}; | |
/* | |
* For EL2 stage-1 page table, up (aka AP[1]) is RES1 as the translation | |
* regime applies to only one exception level (see D4.4.4 and G4.6.1 | |
@@ -304,7 +302,7 @@ static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr) | |
* hard-coded values in head.S too. | |
*/ | |
- switch ( attr ) | |
+ switch (attr) | |
{ | |
case MT_NORMAL_NC: | |
/* | |
@@ -333,7 +331,7 @@ static inline lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned attr) | |
e.pt.sh = LPAE_SH_OUTER; | |
break; | |
default: | |
- e.pt.sh = LPAE_SH_INNER; /* Xen mappings are SMP coherent */ | |
+ e.pt.sh = LPAE_SH_INNER; /* Xen mappings are SMP coherent */ | |
break; | |
} | |
@@ -368,8 +366,7 @@ void clear_fixmap(unsigned map) | |
* Size must be a multiple of mapping_size. | |
* second must be a contiguous set of second level page tables | |
* covering the region starting at virt_offset. */ | |
-static void __init create_mappings(lpae_t *second, | |
- unsigned long virt_offset, | |
+static void __init create_mappings(lpae_t *second, unsigned long virt_offset, | |
unsigned long base_mfn, | |
unsigned long nr_mfns, | |
unsigned int mapping_size) | |
@@ -387,7 +384,7 @@ static void __init create_mappings(lpae_t *second, | |
p = second + second_linear_offset(virt_offset); | |
pte = mfn_to_xen_entry(_mfn(base_mfn), MT_NORMAL); | |
if ( granularity == 16 * LPAE_ENTRIES ) | |
- pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */ | |
+ pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */ | |
for ( i = 0; i < count; i++ ) | |
{ | |
write_pte(p + i, pte); | |
@@ -424,11 +421,9 @@ void *map_domain_page(mfn_t mfn) | |
* PTE as a reference count; when the refcount is zero the slot can | |
* be reused. */ | |
for ( slot = (slot_mfn >> LPAE_SHIFT) % DOMHEAP_ENTRIES, i = 0; | |
- i < DOMHEAP_ENTRIES; | |
- slot = (slot + 1) % DOMHEAP_ENTRIES, i++ ) | |
+ i < DOMHEAP_ENTRIES; slot = (slot + 1) % DOMHEAP_ENTRIES, i++ ) | |
{ | |
- if ( map[slot].pt.avail < 0xf && | |
- map[slot].pt.base == slot_mfn && | |
+ if ( map[slot].pt.avail < 0xf && map[slot].pt.base == slot_mfn && | |
map[slot].pt.valid ) | |
{ | |
/* This slot already points to the right place; reuse it */ | |
@@ -443,7 +438,6 @@ void *map_domain_page(mfn_t mfn) | |
write_pte(map + slot, pte); | |
break; | |
} | |
- | |
} | |
/* If the map fills up, the callers have misbehaved. */ | |
BUG_ON(i == DOMHEAP_ENTRIES); | |
@@ -463,9 +457,8 @@ void *map_domain_page(mfn_t mfn) | |
local_irq_restore(flags); | |
- va = (DOMHEAP_VIRT_START | |
- + (slot << SECOND_SHIFT) | |
- + ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT)); | |
+ va = (DOMHEAP_VIRT_START + (slot << SECOND_SHIFT) + | |
+ ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT)); | |
/* | |
* We may not have flushed this specific subpage at map time, | |
@@ -481,7 +474,7 @@ void unmap_domain_page(const void *va) | |
{ | |
unsigned long flags; | |
lpae_t *map = this_cpu(xen_dommap); | |
- int slot = ((unsigned long) va - DOMHEAP_VIRT_START) >> SECOND_SHIFT; | |
+ int slot = ((unsigned long)va - DOMHEAP_VIRT_START) >> SECOND_SHIFT; | |
local_irq_save(flags); | |
@@ -498,7 +491,7 @@ mfn_t domain_page_map_to_mfn(const void *ptr) | |
unsigned long va = (unsigned long)ptr; | |
lpae_t *map = this_cpu(xen_dommap); | |
int slot = (va - DOMHEAP_VIRT_START) >> SECOND_SHIFT; | |
- unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK; | |
+ unsigned long offset = (va >> THIRD_SHIFT) & LPAE_ENTRY_MASK; | |
if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END ) | |
return virt_to_mfn(va); | |
@@ -536,7 +529,7 @@ static inline lpae_t pte_of_xenaddr(vaddr_t va) | |
return mfn_to_xen_entry(maddr_to_mfn(ma), MT_NORMAL); | |
} | |
-void * __init early_fdt_map(paddr_t fdt_paddr) | |
+void *__init early_fdt_map(paddr_t fdt_paddr) | |
{ | |
/* We are using 2MB superpage for mapping the FDT */ | |
paddr_t base_paddr = fdt_paddr & SECOND_MASK; | |
@@ -574,8 +567,8 @@ void * __init early_fdt_map(paddr_t fdt_paddr) | |
if ( (offset + size) > SZ_2M ) | |
{ | |
create_mappings(xen_second, BOOT_FDT_VIRT_START + SZ_2M, | |
- paddr_to_pfn(base_paddr + SZ_2M), | |
- SZ_2M >> PAGE_SHIFT, SZ_2M); | |
+ paddr_to_pfn(base_paddr + SZ_2M), SZ_2M >> PAGE_SHIFT, | |
+ SZ_2M); | |
} | |
return fdt_virt; | |
@@ -627,19 +620,19 @@ void __init setup_pagetables(unsigned long boot_phys_offset) | |
phys_offset = boot_phys_offset; | |
#ifdef CONFIG_ARM_64 | |
- p = (void *) xen_pgtable; | |
+ p = (void *)xen_pgtable; | |
p[0] = pte_of_xenaddr((uintptr_t)xen_first); | |
p[0].pt.table = 1; | |
p[0].pt.xn = 0; | |
- p = (void *) xen_first; | |
+ p = (void *)xen_first; | |
#else | |
- p = (void *) cpu0_pgtable; | |
+ p = (void *)cpu0_pgtable; | |
#endif | |
/* Initialise first level entries, to point to second level entries */ | |
- for ( i = 0; i < 2; i++) | |
+ for ( i = 0; i < 2; i++ ) | |
{ | |
- p[i] = pte_of_xenaddr((uintptr_t)(xen_second+i*LPAE_ENTRIES)); | |
+ p[i] = pte_of_xenaddr((uintptr_t)(xen_second + i * LPAE_ENTRIES)); | |
p[i].pt.table = 1; | |
p[i].pt.xn = 0; | |
} | |
@@ -647,9 +640,10 @@ void __init setup_pagetables(unsigned long boot_phys_offset) | |
#ifdef CONFIG_ARM_32 | |
for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) | |
{ | |
- p[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)] | |
- = pte_of_xenaddr((uintptr_t)(cpu0_dommap+i*LPAE_ENTRIES)); | |
- p[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)].pt.table = 1; | |
+ p[first_table_offset(DOMHEAP_VIRT_START + i * FIRST_SIZE)] = | |
+ pte_of_xenaddr((uintptr_t)(cpu0_dommap + i * LPAE_ENTRIES)); | |
+ p[first_table_offset(DOMHEAP_VIRT_START + i * FIRST_SIZE)].pt.table = | |
+ 1; | |
} | |
#endif | |
@@ -685,9 +679,9 @@ void __init setup_pagetables(unsigned long boot_phys_offset) | |
xen_second[second_table_offset(FIXMAP_ADDR(0))] = pte; | |
#ifdef CONFIG_ARM_64 | |
- ttbr = (uintptr_t) xen_pgtable + phys_offset; | |
+ ttbr = (uintptr_t)xen_pgtable + phys_offset; | |
#else | |
- ttbr = (uintptr_t) cpu0_pgtable + phys_offset; | |
+ ttbr = (uintptr_t)cpu0_pgtable + phys_offset; | |
#endif | |
switch_ttbr(ttbr); | |
@@ -715,7 +709,7 @@ int init_secondary_pagetables(int cpu) | |
{ | |
/* Set init_ttbr for this CPU coming up. All CPus share a single setof | |
* pagetables, but rewrite it each time for consistency with 32 bit. */ | |
- init_ttbr = (uintptr_t) xen_pgtable + phys_offset; | |
+ init_ttbr = (uintptr_t)xen_pgtable + phys_offset; | |
clean_dcache(init_ttbr); | |
return 0; | |
} | |
@@ -725,13 +719,16 @@ int init_secondary_pagetables(int cpu) | |
lpae_t *first, *domheap, pte; | |
int i; | |
- first = alloc_xenheap_page(); /* root == first level on 32-bit 3-level trie */ | |
- domheap = alloc_xenheap_pages(get_order_from_pages(DOMHEAP_SECOND_PAGES), 0); | |
+ first = | |
+ alloc_xenheap_page(); /* root == first level on 32-bit 3-level trie */ | |
+ domheap = | |
+ alloc_xenheap_pages(get_order_from_pages(DOMHEAP_SECOND_PAGES), 0); | |
if ( domheap == NULL || first == NULL ) | |
{ | |
printk("Not enough free memory for secondary CPU%d pagetables\n", cpu); | |
- free_xenheap_pages(domheap, get_order_from_pages(DOMHEAP_SECOND_PAGES)); | |
+ free_xenheap_pages(domheap, | |
+ get_order_from_pages(DOMHEAP_SECOND_PAGES)); | |
free_xenheap_page(first); | |
return -ENOMEM; | |
} | |
@@ -740,16 +737,18 @@ int init_secondary_pagetables(int cpu) | |
memcpy(first, cpu0_pgtable, PAGE_SIZE); | |
/* Ensure the domheap has no stray mappings */ | |
- memset(domheap, 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE); | |
+ memset(domheap, 0, DOMHEAP_SECOND_PAGES * PAGE_SIZE); | |
/* Update the first level mapping to reference the local CPUs | |
* domheap mapping pages. */ | |
for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) | |
{ | |
- pte = mfn_to_xen_entry(virt_to_mfn(domheap+i*LPAE_ENTRIES), | |
+ pte = mfn_to_xen_entry(virt_to_mfn(domheap + i * LPAE_ENTRIES), | |
MT_NORMAL); | |
pte.pt.table = 1; | |
- write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte); | |
+ write_pte( | |
+ &first[first_table_offset(DOMHEAP_VIRT_START + i * FIRST_SIZE)], | |
+ pte); | |
} | |
per_cpu(xen_pgtable, cpu) = first; | |
@@ -790,14 +789,14 @@ void __init setup_xenheap_mappings(unsigned long base_mfn, | |
vaddr_t vaddr; | |
/* Align to previous 1GB boundary */ | |
- mfn = base_mfn & ~((FIRST_SIZE>>PAGE_SHIFT)-1); | |
+ mfn = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1); | |
/* First call sets the xenheap physical and virtual offset. */ | |
if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) ) | |
{ | |
xenheap_mfn_start = _mfn(base_mfn); | |
- xenheap_virt_start = DIRECTMAP_VIRT_START + | |
- (base_mfn - mfn) * PAGE_SIZE; | |
+ xenheap_virt_start = | |
+ DIRECTMAP_VIRT_START + (base_mfn - mfn) * PAGE_SIZE; | |
} | |
if ( base_mfn < mfn_x(xenheap_mfn_start) ) | |
@@ -821,10 +820,11 @@ void __init setup_xenheap_mappings(unsigned long base_mfn, | |
{ | |
/* mfn_to_virt is not valid on the 1st 1st mfn, since it | |
* is not within the xenheap. */ | |
- first = slot == xenheap_first_first_slot ? | |
- xenheap_first_first : mfn_to_virt(lpae_get_mfn(*p)); | |
+ first = slot == xenheap_first_first_slot | |
+ ? xenheap_first_first | |
+ : mfn_to_virt(lpae_get_mfn(*p)); | |
} | |
- else if ( xenheap_first_first_slot == -1) | |
+ else if ( xenheap_first_first_slot == -1 ) | |
{ | |
/* Use xenheap_first_first to bootstrap the mappings */ | |
first = xenheap_first_first; | |
@@ -850,7 +850,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn, | |
/* TODO: Set pte.pt.contig when appropriate. */ | |
write_pte(&first[first_table_offset(vaddr)], pte); | |
- mfn += FIRST_SIZE>>PAGE_SHIFT; | |
+ mfn += FIRST_SIZE >> PAGE_SHIFT; | |
vaddr += FIRST_SIZE; | |
} | |
@@ -865,7 +865,8 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) | |
mfn_to_pdx(maddr_to_mfn(ps)) + 1; | |
unsigned long frametable_size = nr_pdxs * sizeof(struct page_info); | |
mfn_t base_mfn; | |
- const unsigned long mapping_size = frametable_size < MB(32) ? MB(2) : MB(32); | |
+ const unsigned long mapping_size = | |
+ frametable_size < MB(32) ? MB(2) : MB(32); | |
#ifdef CONFIG_ARM_64 | |
lpae_t *second, pte; | |
unsigned long nr_second; | |
@@ -876,7 +877,8 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) | |
frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ps)); | |
/* Round up to 2M or 32M boundary, as appropriate. */ | |
frametable_size = ROUNDUP(frametable_size, mapping_size); | |
- base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12)); | |
+ base_mfn = | |
+ alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32 << (20 - 12)); | |
#ifdef CONFIG_ARM_64 | |
/* Compute the number of second level pages. */ | |
@@ -888,7 +890,8 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) | |
clear_page(mfn_to_virt(mfn_add(second_base, i))); | |
pte = mfn_to_xen_entry(mfn_add(second_base, i), MT_NORMAL); | |
pte.pt.table = 1; | |
- write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte); | |
+ write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START) + i], | |
+ pte); | |
} | |
create_mappings(second, 0, mfn_x(base_mfn), frametable_size >> PAGE_SHIFT, | |
mapping_size); | |
@@ -901,7 +904,8 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) | |
memset(&frame_table[nr_pdxs], -1, | |
frametable_size - (nr_pdxs * sizeof(struct page_info))); | |
- frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info)); | |
+ frametable_virt_end = | |
+ FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info)); | |
} | |
void *__init arch_vmap_virt_end(void) | |
@@ -1017,15 +1021,17 @@ static bool xen_pt_check_entry(lpae_t entry, mfn_t mfn, unsigned int flags) | |
/* We don't allow changing memory attributes. */ | |
if ( entry.pt.ai != PAGE_AI_MASK(flags) ) | |
{ | |
- mm_printk("Modifying memory attributes is not allowed (0x%x -> 0x%x).\n", | |
- entry.pt.ai, PAGE_AI_MASK(flags)); | |
+ mm_printk( | |
+ "Modifying memory attributes is not allowed (0x%x -> 0x%x).\n", | |
+ entry.pt.ai, PAGE_AI_MASK(flags)); | |
return false; | |
} | |
/* We don't allow modifying entry with contiguous bit set. */ | |
if ( entry.pt.contig ) | |
{ | |
- mm_printk("Modifying entry with contiguous bit set is not allowed.\n"); | |
+ mm_printk( | |
+ "Modifying entry with contiguous bit set is not allowed.\n"); | |
return false; | |
} | |
} | |
@@ -1038,13 +1044,15 @@ static bool xen_pt_check_entry(lpae_t entry, mfn_t mfn, unsigned int flags) | |
/* We don't allow replacing any valid entry. */ | |
if ( lpae_is_valid(entry) ) | |
{ | |
- mm_printk("Changing MFN for a valid entry is not allowed (%#"PRI_mfn" -> %#"PRI_mfn").\n", | |
- mfn_x(lpae_get_mfn(entry)), mfn_x(mfn)); | |
+ mm_printk( | |
+ "Changing MFN for a valid entry is not allowed (%#" PRI_mfn | |
+ " -> %#" PRI_mfn ").\n", | |
+ mfn_x(lpae_get_mfn(entry)), mfn_x(mfn)); | |
return false; | |
} | |
} | |
/* Sanity check when removing a page. */ | |
- else if ( (flags & (_PAGE_PRESENT|_PAGE_POPULATE)) == 0 ) | |
+ else if ( (flags & (_PAGE_PRESENT | _PAGE_POPULATE)) == 0 ) | |
{ | |
/* We should be here with an invalid MFN. */ | |
ASSERT(mfn_eq(mfn, INVALID_MFN)); | |
@@ -1052,7 +1060,8 @@ static bool xen_pt_check_entry(lpae_t entry, mfn_t mfn, unsigned int flags) | |
/* We don't allow removing page with contiguous bit set. */ | |
if ( entry.pt.contig ) | |
{ | |
- mm_printk("Removing entry with contiguous bit set is not allowed.\n"); | |
+ mm_printk( | |
+ "Removing entry with contiguous bit set is not allowed.\n"); | |
return false; | |
} | |
} | |
@@ -1067,8 +1076,8 @@ static bool xen_pt_check_entry(lpae_t entry, mfn_t mfn, unsigned int flags) | |
return true; | |
} | |
-static int xen_pt_update_entry(mfn_t root, unsigned long virt, | |
- mfn_t mfn, unsigned int flags) | |
+static int xen_pt_update_entry(mfn_t root, unsigned long virt, mfn_t mfn, | |
+ unsigned int flags) | |
{ | |
int rc; | |
unsigned int level; | |
@@ -1087,7 +1096,8 @@ static int xen_pt_update_entry(mfn_t root, unsigned long virt, | |
DECLARE_OFFSETS(offsets, (paddr_t)virt); | |
/* _PAGE_POPULATE and _PAGE_PRESENT should never be set together. */ | |
- ASSERT((flags & (_PAGE_POPULATE|_PAGE_PRESENT)) != (_PAGE_POPULATE|_PAGE_PRESENT)); | |
+ ASSERT((flags & (_PAGE_POPULATE | _PAGE_PRESENT)) != | |
+ (_PAGE_POPULATE | _PAGE_PRESENT)); | |
table = xen_map_table(root); | |
for ( level = HYP_PT_ROOT_LEVEL; level < target; level++ ) | |
@@ -1102,7 +1112,7 @@ static int xen_pt_update_entry(mfn_t root, unsigned long virt, | |
* removing a mapping as it may not exist in the page table. | |
* In this case, just ignore it. | |
*/ | |
- if ( flags & (_PAGE_PRESENT|_PAGE_POPULATE) ) | |
+ if ( flags & (_PAGE_PRESENT | _PAGE_POPULATE) ) | |
{ | |
mm_printk("%s: Unable to map level %u\n", __func__, level); | |
rc = -ENOENT; | |
@@ -1169,9 +1179,7 @@ out: | |
static DEFINE_SPINLOCK(xen_pt_lock); | |
-static int xen_pt_update(unsigned long virt, | |
- mfn_t mfn, | |
- unsigned long nr_mfns, | |
+static int xen_pt_update(unsigned long virt, mfn_t mfn, unsigned long nr_mfns, | |
unsigned int flags) | |
{ | |
int rc = 0; | |
@@ -1229,9 +1237,7 @@ static int xen_pt_update(unsigned long virt, | |
return rc; | |
} | |
-int map_pages_to_xen(unsigned long virt, | |
- mfn_t mfn, | |
- unsigned long nr_mfns, | |
+int map_pages_to_xen(unsigned long virt, mfn_t mfn, unsigned long nr_mfns, | |
unsigned int flags) | |
{ | |
return xen_pt_update(virt, mfn, nr_mfns, flags); | |
@@ -1291,21 +1297,22 @@ void free_init_memory(void) | |
panic("Unable to remove the init section (rc = %d)\n", rc); | |
init_domheap_pages(pa, pa + len); | |
- printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10); | |
+ printk("Freed %ldkB init memory.\n", | |
+ (long)(__init_end - __init_begin) >> 10); | |
} | |
void arch_dump_shared_mem_info(void) | |
{ | |
} | |
-int donate_page(struct domain *d, struct page_info *page, unsigned int memflags) | |
+int donate_page(struct domain *d, struct page_info *page, | |
+ unsigned int memflags) | |
{ | |
ASSERT_UNREACHABLE(); | |
return -ENOSYS; | |
} | |
-int steal_page( | |
- struct domain *d, struct page_info *page, unsigned int memflags) | |
+int steal_page(struct domain *d, struct page_info *page, unsigned int memflags) | |
{ | |
return -EOPNOTSUPP; | |
} | |
@@ -1349,19 +1356,16 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d, | |
spin_unlock(&d->page_alloc_lock); | |
} | |
-int xenmem_add_to_physmap_one( | |
- struct domain *d, | |
- unsigned int space, | |
- union xen_add_to_physmap_batch_extra extra, | |
- unsigned long idx, | |
- gfn_t gfn) | |
+int xenmem_add_to_physmap_one(struct domain *d, unsigned int space, | |
+ union xen_add_to_physmap_batch_extra extra, | |
+ unsigned long idx, gfn_t gfn) | |
{ | |
mfn_t mfn = INVALID_MFN; | |
int rc; | |
p2m_type_t t; | |
struct page_info *page = NULL; | |
- switch ( space ) | |
+ switch (space) | |
{ | |
case XENMAPSPACE_grant_table: | |
rc = gnttab_map_frame(d, idx, gfn, &mfn); | |
@@ -1452,7 +1456,7 @@ int xenmem_add_to_physmap_one( | |
long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) | |
{ | |
- switch ( op ) | |
+ switch (op) | |
{ | |
/* XXX: memsharing not working yet */ | |
case XENMEM_get_sharing_shared_pages: | |
@@ -1479,8 +1483,7 @@ struct domain *page_get_owner_and_reference(struct page_info *page) | |
*/ | |
if ( unlikely(((x + 1) & PGC_count_mask) <= 1) ) | |
return NULL; | |
- } | |
- while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x ); | |
+ } while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x ); | |
owner = page_get_owner(page); | |
ASSERT(owner); | |
@@ -1494,10 +1497,9 @@ void put_page(struct page_info *page) | |
do { | |
ASSERT((y & PGC_count_mask) != 0); | |
- x = y; | |
+ x = y; | |
nx = x - 1; | |
- } | |
- while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); | |
+ } while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); | |
if ( unlikely((nx & PGC_count_mask) == 0) ) | |
{ | |
@@ -1537,14 +1539,14 @@ int create_grant_host_mapping(unsigned long addr, mfn_t frame, | |
int rc; | |
p2m_type_t t = p2m_grant_map_rw; | |
- if ( cache_flags || (flags & ~GNTMAP_readonly) != GNTMAP_host_map ) | |
+ if ( cache_flags || (flags & ~GNTMAP_readonly) != GNTMAP_host_map ) | |
return GNTST_general_error; | |
if ( flags & GNTMAP_readonly ) | |
t = p2m_grant_map_ro; | |
- rc = guest_physmap_add_entry(current->domain, gaddr_to_gfn(addr), | |
- frame, 0, t); | |
+ rc = guest_physmap_add_entry(current->domain, gaddr_to_gfn(addr), frame, 0, | |
+ t); | |
if ( rc ) | |
return GNTST_general_error; | |
diff --git a/xen/arch/arm/monitor.c b/xen/arch/arm/monitor.c | |
index 8c4a396e3c..67c0bd66a9 100644 | |
--- a/xen/arch/arm/monitor.c | |
+++ b/xen/arch/arm/monitor.c | |
@@ -30,7 +30,7 @@ int arch_monitor_domctl_event(struct domain *d, | |
struct arch_domain *ad = &d->arch; | |
bool requested_status = (XEN_DOMCTL_MONITOR_OP_ENABLE == mop->op); | |
- switch ( mop->event ) | |
+ switch (mop->event) | |
{ | |
case XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL: | |
{ | |
@@ -59,9 +59,7 @@ int arch_monitor_domctl_event(struct domain *d, | |
int monitor_smc(void) | |
{ | |
- vm_event_request_t req = { | |
- .reason = VM_EVENT_REASON_PRIVILEGED_CALL | |
- }; | |
+ vm_event_request_t req = {.reason = VM_EVENT_REASON_PRIVILEGED_CALL}; | |
return monitor_traps(current, 1, &req); | |
} | |
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c | |
index e28ea1c85a..a60ed91974 100644 | |
--- a/xen/arch/arm/p2m.c | |
+++ b/xen/arch/arm/p2m.c | |
@@ -11,7 +11,7 @@ | |
#include <asm/guest_walk.h> | |
#include <asm/page.h> | |
-#define MAX_VMID_8_BIT (1UL << 8) | |
+#define MAX_VMID_8_BIT (1UL << 8) | |
#define MAX_VMID_16_BIT (1UL << 16) | |
#define INVALID_VMID 0 /* VMID 0 is reserved */ | |
@@ -19,28 +19,28 @@ | |
#ifdef CONFIG_ARM_64 | |
static unsigned int __read_mostly p2m_root_order; | |
static unsigned int __read_mostly p2m_root_level; | |
-#define P2M_ROOT_ORDER p2m_root_order | |
+#define P2M_ROOT_ORDER p2m_root_order | |
#define P2M_ROOT_LEVEL p2m_root_level | |
static unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT; | |
/* VMID is by default 8 bit width on AArch64 */ | |
-#define MAX_VMID max_vmid | |
+#define MAX_VMID max_vmid | |
#else | |
/* First level P2M is alway 2 consecutive pages */ | |
#define P2M_ROOT_LEVEL 1 | |
-#define P2M_ROOT_ORDER 1 | |
+#define P2M_ROOT_ORDER 1 | |
/* VMID is always 8 bit width on AArch32 */ | |
-#define MAX_VMID MAX_VMID_8_BIT | |
+#define MAX_VMID MAX_VMID_8_BIT | |
#endif | |
-#define P2M_ROOT_PAGES (1<<P2M_ROOT_ORDER) | |
+#define P2M_ROOT_PAGES (1 << P2M_ROOT_ORDER) | |
unsigned int __read_mostly p2m_ipa_bits; | |
/* Helpers to lookup the properties of each level */ | |
-static const paddr_t level_masks[] = | |
- { ZEROETH_MASK, FIRST_MASK, SECOND_MASK, THIRD_MASK }; | |
-static const uint8_t level_orders[] = | |
- { ZEROETH_ORDER, FIRST_ORDER, SECOND_ORDER, THIRD_ORDER }; | |
+static const paddr_t level_masks[] = {ZEROETH_MASK, FIRST_MASK, SECOND_MASK, | |
+ THIRD_MASK}; | |
+static const uint8_t level_orders[] = {ZEROETH_ORDER, FIRST_ORDER, | |
+ SECOND_ORDER, THIRD_ORDER}; | |
static mfn_t __read_mostly empty_root_mfn; | |
@@ -67,13 +67,12 @@ void p2m_dump_info(struct domain *d) | |
struct p2m_domain *p2m = p2m_get_hostp2m(d); | |
p2m_read_lock(p2m); | |
- printk("p2m mappings for domain %d (vmid %d):\n", | |
- d->domain_id, p2m->vmid); | |
+ printk("p2m mappings for domain %d (vmid %d):\n", d->domain_id, p2m->vmid); | |
BUG_ON(p2m->stats.mappings[0] || p2m->stats.shattered[0]); | |
- printk(" 1G mappings: %ld (shattered %ld)\n", | |
- p2m->stats.mappings[1], p2m->stats.shattered[1]); | |
- printk(" 2M mappings: %ld (shattered %ld)\n", | |
- p2m->stats.mappings[2], p2m->stats.shattered[2]); | |
+ printk(" 1G mappings: %ld (shattered %ld)\n", p2m->stats.mappings[1], | |
+ p2m->stats.shattered[1]); | |
+ printk(" 2M mappings: %ld (shattered %ld)\n", p2m->stats.mappings[2], | |
+ p2m->stats.shattered[2]); | |
printk(" 4K mappings: %ld\n", p2m->stats.mappings[3]); | |
p2m_read_unlock(p2m); | |
} | |
@@ -86,13 +85,13 @@ void dump_p2m_lookup(struct domain *d, paddr_t addr) | |
{ | |
struct p2m_domain *p2m = p2m_get_hostp2m(d); | |
- printk("dom%d IPA 0x%"PRIpaddr"\n", d->domain_id, addr); | |
+ printk("dom%d IPA 0x%" PRIpaddr "\n", d->domain_id, addr); | |
- printk("P2M @ %p mfn:%#"PRI_mfn"\n", | |
- p2m->root, mfn_x(page_to_mfn(p2m->root))); | |
+ printk("P2M @ %p mfn:%#" PRI_mfn "\n", p2m->root, | |
+ mfn_x(page_to_mfn(p2m->root))); | |
- dump_pt_walk(page_to_maddr(p2m->root), addr, | |
- P2M_ROOT_LEVEL, P2M_ROOT_PAGES); | |
+ dump_pt_walk(page_to_maddr(p2m->root), addr, P2M_ROOT_LEVEL, | |
+ P2M_ROOT_PAGES); | |
} | |
/* | |
@@ -106,7 +105,8 @@ void p2m_save_state(struct vcpu *p) | |
if ( cpus_have_const_cap(ARM64_WORKAROUND_AT_SPECULATE) ) | |
{ | |
- WRITE_SYSREG64(generate_vttbr(INVALID_VMID, empty_root_mfn), VTTBR_EL2); | |
+ WRITE_SYSREG64(generate_vttbr(INVALID_VMID, empty_root_mfn), | |
+ VTTBR_EL2); | |
/* | |
* Ensure VTTBR_EL2 is correctly synchronized so we can restore | |
* the next vCPU context without worrying about AT instruction | |
@@ -222,8 +222,7 @@ void p2m_tlb_flush_sync(struct p2m_domain *p2m) | |
* The function will return NULL if the offset of the root table is | |
* invalid. | |
*/ | |
-static lpae_t *p2m_get_root_pointer(struct p2m_domain *p2m, | |
- gfn_t gfn) | |
+static lpae_t *p2m_get_root_pointer(struct p2m_domain *p2m, gfn_t gfn) | |
{ | |
unsigned int root_table; | |
@@ -355,10 +354,8 @@ static int p2m_next_level(struct p2m_domain *p2m, bool read_only, | |
* valid will contain the value of bit[0] (e.g valid bit) of the | |
* entry. | |
*/ | |
-mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, | |
- p2m_type_t *t, p2m_access_t *a, | |
- unsigned int *page_order, | |
- bool *valid) | |
+mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t, | |
+ p2m_access_t *a, unsigned int *page_order, bool *valid) | |
{ | |
paddr_t addr = gfn_to_gaddr(gfn); | |
unsigned int level = 0; | |
@@ -484,8 +481,7 @@ struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn, | |
return get_page(page, d) ? page : NULL; | |
} | |
-int guest_physmap_mark_populate_on_demand(struct domain *d, | |
- unsigned long gfn, | |
+int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, | |
unsigned int order) | |
{ | |
return -ENOSYS; | |
@@ -500,7 +496,7 @@ unsigned long p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, | |
static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a) | |
{ | |
/* First apply type permissions */ | |
- switch ( t ) | |
+ switch (t) | |
{ | |
case p2m_ram_rw: | |
e->p2m.xn = 0; | |
@@ -536,7 +532,7 @@ static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a) | |
} | |
/* Then restrict with access permissions */ | |
- switch ( a ) | |
+ switch (a) | |
{ | |
case p2m_access_rwx: | |
break; | |
@@ -576,7 +572,7 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a) | |
* sh, xn and write bit will be defined in the following switches | |
* based on mattr and t. | |
*/ | |
- lpae_t e = (lpae_t) { | |
+ lpae_t e = (lpae_t){ | |
.p2m.af = 1, | |
.p2m.read = 1, | |
.p2m.table = 1, | |
@@ -586,7 +582,7 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a) | |
BUILD_BUG_ON(p2m_max_real_type > (1 << 4)); | |
- switch ( t ) | |
+ switch (t) | |
{ | |
case p2m_mmio_direct_dev: | |
e.p2m.mattr = MATTR_DEV; | |
@@ -705,8 +701,7 @@ static int p2m_mem_access_radix_set(struct p2m_domain *p2m, gfn_t gfn, | |
{ | |
/* If a setting already exists, change it to the new one */ | |
radix_tree_replace_slot( | |
- radix_tree_lookup_slot( | |
- &p2m->mem_access_settings, gfn_x(gfn)), | |
+ radix_tree_lookup_slot(&p2m->mem_access_settings, gfn_x(gfn)), | |
radix_tree_int_to_ptr(a)); | |
rc = 0; | |
} | |
@@ -740,8 +735,8 @@ static void p2m_put_l3_page(const lpae_t pte) | |
} | |
/* Free lpae sub-tree behind an entry */ | |
-static void p2m_free_entry(struct p2m_domain *p2m, | |
- lpae_t entry, unsigned int level) | |
+static void p2m_free_entry(struct p2m_domain *p2m, lpae_t entry, | |
+ unsigned int level) | |
{ | |
unsigned int i; | |
lpae_t *table; | |
@@ -853,8 +848,8 @@ static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry, | |
* know whether the entry should be shattered for every entry. | |
*/ | |
if ( next_level != target ) | |
- rv = p2m_split_superpage(p2m, table + offsets[next_level], | |
- level + 1, target, offsets); | |
+ rv = p2m_split_superpage(p2m, table + offsets[next_level], level + 1, | |
+ target, offsets); | |
if ( p2m->clean_pte ) | |
clean_dcache_va_range(table, PAGE_SIZE); | |
@@ -874,11 +869,8 @@ static bool p2m_split_superpage(struct p2m_domain *p2m, lpae_t *entry, | |
* Insert an entry in the p2m. This should be called with a mapping | |
* equal to a page/superpage (4K, 2M, 1G). | |
*/ | |
-static int __p2m_set_entry(struct p2m_domain *p2m, | |
- gfn_t sgfn, | |
- unsigned int page_order, | |
- mfn_t smfn, | |
- p2m_type_t t, | |
+static int __p2m_set_entry(struct p2m_domain *p2m, gfn_t sgfn, | |
+ unsigned int page_order, mfn_t smfn, p2m_type_t t, | |
p2m_access_t a) | |
{ | |
unsigned int level = 0; | |
@@ -907,8 +899,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m, | |
* Don't try to allocate intermediate page table if the mapping | |
* is about to be removed. | |
*/ | |
- rc = p2m_next_level(p2m, removing_mapping, | |
- level, &table, offsets[level]); | |
+ rc = p2m_next_level(p2m, removing_mapping, level, &table, | |
+ offsets[level]); | |
if ( rc == GUEST_TABLE_MAP_FAILED ) | |
{ | |
/* | |
@@ -918,7 +910,7 @@ static int __p2m_set_entry(struct p2m_domain *p2m, | |
* when removing a mapping as it may not exist in the | |
* page table. In this case, just ignore it. | |
*/ | |
- rc = removing_mapping ? 0 : -ENOENT; | |
+ rc = removing_mapping ? 0 : -ENOENT; | |
goto out; | |
} | |
else if ( rc != GUEST_TABLE_NORMAL_PAGE ) | |
@@ -1043,8 +1035,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m, | |
p2m_write_pte(entry, pte, p2m->clean_pte); | |
- p2m->max_mapped_gfn = gfn_max(p2m->max_mapped_gfn, | |
- gfn_add(sgfn, 1 << page_order)); | |
+ p2m->max_mapped_gfn = | |
+ gfn_max(p2m->max_mapped_gfn, gfn_add(sgfn, 1 << page_order)); | |
p2m->lowest_mapped_gfn = gfn_min(p2m->lowest_mapped_gfn, sgfn); | |
} | |
@@ -1078,12 +1070,8 @@ out: | |
return rc; | |
} | |
-int p2m_set_entry(struct p2m_domain *p2m, | |
- gfn_t sgfn, | |
- unsigned long nr, | |
- mfn_t smfn, | |
- p2m_type_t t, | |
- p2m_access_t a) | |
+int p2m_set_entry(struct p2m_domain *p2m, gfn_t sgfn, unsigned long nr, | |
+ mfn_t smfn, p2m_type_t t, p2m_access_t a) | |
{ | |
int rc = 0; | |
@@ -1118,7 +1106,7 @@ int p2m_set_entry(struct p2m_domain *p2m, | |
sgfn = gfn_add(sgfn, (1 << order)); | |
if ( !mfn_eq(smfn, INVALID_MFN) ) | |
- smfn = mfn_add(smfn, (1 << order)); | |
+ smfn = mfn_add(smfn, (1 << order)); | |
nr -= (1 << order); | |
} | |
@@ -1278,11 +1266,8 @@ out: | |
return resolved; | |
} | |
-static inline int p2m_insert_mapping(struct domain *d, | |
- gfn_t start_gfn, | |
- unsigned long nr, | |
- mfn_t mfn, | |
- p2m_type_t t) | |
+static inline int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, | |
+ unsigned long nr, mfn_t mfn, p2m_type_t t) | |
{ | |
struct p2m_domain *p2m = p2m_get_hostp2m(d); | |
int rc; | |
@@ -1294,58 +1279,45 @@ static inline int p2m_insert_mapping(struct domain *d, | |
return rc; | |
} | |
-static inline int p2m_remove_mapping(struct domain *d, | |
- gfn_t start_gfn, | |
- unsigned long nr, | |
- mfn_t mfn) | |
+static inline int p2m_remove_mapping(struct domain *d, gfn_t start_gfn, | |
+ unsigned long nr, mfn_t mfn) | |
{ | |
struct p2m_domain *p2m = p2m_get_hostp2m(d); | |
int rc; | |
p2m_write_lock(p2m); | |
- rc = p2m_set_entry(p2m, start_gfn, nr, INVALID_MFN, | |
- p2m_invalid, p2m_access_rwx); | |
+ rc = p2m_set_entry(p2m, start_gfn, nr, INVALID_MFN, p2m_invalid, | |
+ p2m_access_rwx); | |
p2m_write_unlock(p2m); | |
return rc; | |
} | |
-int map_regions_p2mt(struct domain *d, | |
- gfn_t gfn, | |
- unsigned long nr, | |
- mfn_t mfn, | |
+int map_regions_p2mt(struct domain *d, gfn_t gfn, unsigned long nr, mfn_t mfn, | |
p2m_type_t p2mt) | |
{ | |
return p2m_insert_mapping(d, gfn, nr, mfn, p2mt); | |
} | |
-int unmap_regions_p2mt(struct domain *d, | |
- gfn_t gfn, | |
- unsigned long nr, | |
+int unmap_regions_p2mt(struct domain *d, gfn_t gfn, unsigned long nr, | |
mfn_t mfn) | |
{ | |
return p2m_remove_mapping(d, gfn, nr, mfn); | |
} | |
-int map_mmio_regions(struct domain *d, | |
- gfn_t start_gfn, | |
- unsigned long nr, | |
+int map_mmio_regions(struct domain *d, gfn_t start_gfn, unsigned long nr, | |
mfn_t mfn) | |
{ | |
return p2m_insert_mapping(d, start_gfn, nr, mfn, p2m_mmio_direct_dev); | |
} | |
-int unmap_mmio_regions(struct domain *d, | |
- gfn_t start_gfn, | |
- unsigned long nr, | |
+int unmap_mmio_regions(struct domain *d, gfn_t start_gfn, unsigned long nr, | |
mfn_t mfn) | |
{ | |
return p2m_remove_mapping(d, start_gfn, nr, mfn); | |
} | |
-int map_dev_mmio_region(struct domain *d, | |
- gfn_t gfn, | |
- unsigned long nr, | |
+int map_dev_mmio_region(struct domain *d, gfn_t gfn, unsigned long nr, | |
mfn_t mfn) | |
{ | |
int res; | |
@@ -1356,7 +1328,8 @@ int map_dev_mmio_region(struct domain *d, | |
res = p2m_insert_mapping(d, gfn, nr, mfn, p2m_mmio_direct_c); | |
if ( res < 0 ) | |
{ | |
- printk(XENLOG_G_ERR "Unable to map MFNs [%#"PRI_mfn" - %#"PRI_mfn" in Dom%d\n", | |
+ printk(XENLOG_G_ERR "Unable to map MFNs [%#" PRI_mfn " - %#" PRI_mfn | |
+ " in Dom%d\n", | |
mfn_x(mfn), mfn_x(mfn) + nr - 1, d->domain_id); | |
return res; | |
} | |
@@ -1364,11 +1337,8 @@ int map_dev_mmio_region(struct domain *d, | |
return 0; | |
} | |
-int guest_physmap_add_entry(struct domain *d, | |
- gfn_t gfn, | |
- mfn_t mfn, | |
- unsigned long page_order, | |
- p2m_type_t t) | |
+int guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, | |
+ unsigned long page_order, p2m_type_t t) | |
{ | |
return p2m_insert_mapping(d, gfn, (1 << page_order), mfn, t); | |
} | |
@@ -1416,7 +1386,6 @@ static int p2m_alloc_table(struct domain *d) | |
return 0; | |
} | |
- | |
static spinlock_t vmid_alloc_lock = SPIN_LOCK_UNLOCKED; | |
/* | |
@@ -1531,8 +1500,8 @@ int p2m_init(struct domain *d) | |
* shared with the CPU, Xen has to make sure that the PT changes have | |
* reached the memory | |
*/ | |
- p2m->clean_pte = iommu_enabled && | |
- !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK); | |
+ p2m->clean_pte = | |
+ iommu_enabled && !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK); | |
rc = p2m_alloc_table(d); | |
@@ -1542,10 +1511,10 @@ int p2m_init(struct domain *d) | |
* the INVALID_VCPU_ID. | |
*/ | |
BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0]) * 8)) < MAX_VIRT_CPUS); | |
- BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0])* 8)) < INVALID_VCPU_ID); | |
+ BUILD_BUG_ON((1 << (sizeof(p2m->last_vcpu_ran[0]) * 8)) < INVALID_VCPU_ID); | |
- for_each_possible_cpu(cpu) | |
- p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID; | |
+ for_each_possible_cpu (cpu) | |
+ p2m->last_vcpu_ran[cpu] = INVALID_VCPU_ID; | |
/* | |
* Besides getting a domain when we only have the p2m in hand, | |
@@ -1602,11 +1571,13 @@ int relinquish_p2m_mapping(struct domain *d) | |
* For valid mapping, the start will always be aligned as | |
* entry will be removed whilst relinquishing. | |
*/ | |
- rc = __p2m_set_entry(p2m, start, order, INVALID_MFN, | |
- p2m_invalid, p2m_access_rwx); | |
+ rc = __p2m_set_entry(p2m, start, order, INVALID_MFN, p2m_invalid, | |
+ p2m_access_rwx); | |
if ( unlikely(rc) ) | |
{ | |
- printk(XENLOG_G_ERR "Unable to remove mapping gfn=%#"PRI_gfn" order=%u from the p2m of domain %d\n", gfn_x(start), order, d->domain_id); | |
+ printk(XENLOG_G_ERR "Unable to remove mapping gfn=%#" PRI_gfn | |
+ " order=%u from the p2m of domain %d\n", | |
+ gfn_x(start), order, d->domain_id); | |
break; | |
} | |
} | |
@@ -1650,7 +1621,7 @@ int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end) | |
while ( gfn_x(start) < gfn_x(end) ) | |
{ | |
- /* | |
+ /* | |
* Cleaning the cache for the P2M may take a long time. So we | |
* need to be able to preempt. We will arbitrarily preempt every | |
* time count reach 512 or above. | |
@@ -1732,8 +1703,7 @@ void p2m_flush_vm(struct vcpu *v) | |
ASSERT(local_irq_is_enabled()); | |
ASSERT(v->arch.need_flush_to_ram); | |
- do | |
- { | |
+ do { | |
rc = p2m_cache_flush_range(v->domain, &start, _gfn(ULONG_MAX)); | |
if ( rc == -ERESTART ) | |
do_softirq(); | |
@@ -1741,8 +1711,7 @@ void p2m_flush_vm(struct vcpu *v) | |
if ( rc != 0 ) | |
gprintk(XENLOG_WARNING, | |
- "P2M has not been correctly cleaned (rc = %d)\n", | |
- rc); | |
+ "P2M has not been correctly cleaned (rc = %d)\n", rc); | |
/* | |
* Invalidate the p2m to track which page was modified by the guest | |
@@ -1880,7 +1849,8 @@ struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va, | |
if ( !guest_walk_tables(v, va, &ipa, &s1_perms) ) | |
{ | |
dprintk(XENLOG_G_DEBUG, | |
- "%pv: Failed to walk page-table va %#"PRIvaddr"\n", v, va); | |
+ "%pv: Failed to walk page-table va %#" PRIvaddr "\n", v, | |
+ va); | |
return NULL; | |
} | |
@@ -1909,8 +1879,8 @@ struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va, | |
if ( !mfn_valid(mfn) ) | |
{ | |
- dprintk(XENLOG_G_DEBUG, "%pv: Invalid MFN %#"PRI_mfn"\n", | |
- v, mfn_x(mfn)); | |
+ dprintk(XENLOG_G_DEBUG, "%pv: Invalid MFN %#" PRI_mfn "\n", v, | |
+ mfn_x(mfn)); | |
return NULL; | |
} | |
@@ -1919,8 +1889,9 @@ struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va, | |
if ( unlikely(!get_page(page, d)) ) | |
{ | |
- dprintk(XENLOG_G_DEBUG, "%pv: Failing to acquire the MFN %#"PRI_mfn"\n", | |
- v, mfn_x(maddr_to_mfn(maddr))); | |
+ dprintk(XENLOG_G_DEBUG, | |
+ "%pv: Failing to acquire the MFN %#" PRI_mfn "\n", v, | |
+ mfn_x(maddr_to_mfn(maddr))); | |
return NULL; | |
} | |
@@ -1944,7 +1915,8 @@ static void setup_virt_paging_one(void *data) | |
*/ | |
if ( cpus_have_cap(ARM64_WORKAROUND_AT_SPECULATE) ) | |
{ | |
- WRITE_SYSREG64(generate_vttbr(INVALID_VMID, empty_root_mfn), VTTBR_EL2); | |
+ WRITE_SYSREG64(generate_vttbr(INVALID_VMID, empty_root_mfn), | |
+ VTTBR_EL2); | |
WRITE_SYSREG(READ_SYSREG(HCR_EL2) | HCR_VM, HCR_EL2); | |
isb(); | |
@@ -1955,37 +1927,39 @@ static void setup_virt_paging_one(void *data) | |
void __init setup_virt_paging(void) | |
{ | |
/* Setup Stage 2 address translation */ | |
- unsigned long val = VTCR_RES1|VTCR_SH0_IS|VTCR_ORGN0_WBWA|VTCR_IRGN0_WBWA; | |
+ unsigned long val = | |
+ VTCR_RES1 | VTCR_SH0_IS | VTCR_ORGN0_WBWA | VTCR_IRGN0_WBWA; | |
#ifdef CONFIG_ARM_32 | |
printk("P2M: 40-bit IPA\n"); | |
p2m_ipa_bits = 40; | |
val |= VTCR_T0SZ(0x18); /* 40 bit IPA */ | |
- val |= VTCR_SL0(0x1); /* P2M starts at first level */ | |
-#else /* CONFIG_ARM_64 */ | |
- const struct { | |
- unsigned int pabits; /* Physical Address Size */ | |
- unsigned int t0sz; /* Desired T0SZ, minimum in comment */ | |
+ val |= VTCR_SL0(0x1); /* P2M starts at first level */ | |
+#else /* CONFIG_ARM_64 */ | |
+ const struct | |
+ { | |
+ unsigned int pabits; /* Physical Address Size */ | |
+ unsigned int t0sz; /* Desired T0SZ, minimum in comment */ | |
unsigned int root_order; /* Page order of the root of the p2m */ | |
- unsigned int sl0; /* Desired SL0, maximum in comment */ | |
+ unsigned int sl0; /* Desired SL0, maximum in comment */ | |
} pa_range_info[] = { | |
/* T0SZ minimum and SL0 maximum from ARM DDI 0487A.b Table D4-5 */ | |
/* PA size, t0sz(min), root-order, sl0(max) */ | |
- [0] = { 32, 32/*32*/, 0, 1 }, | |
- [1] = { 36, 28/*28*/, 0, 1 }, | |
- [2] = { 40, 24/*24*/, 1, 1 }, | |
- [3] = { 42, 22/*22*/, 3, 1 }, | |
- [4] = { 44, 20/*20*/, 0, 2 }, | |
- [5] = { 48, 16/*16*/, 0, 2 }, | |
- [6] = { 0 }, /* Invalid */ | |
- [7] = { 0 } /* Invalid */ | |
+ [0] = {32, 32 /*32*/, 0, 1}, | |
+ [1] = {36, 28 /*28*/, 0, 1}, | |
+ [2] = {40, 24 /*24*/, 1, 1}, | |
+ [3] = {42, 22 /*22*/, 3, 1}, | |
+ [4] = {44, 20 /*20*/, 0, 2}, | |
+ [5] = {48, 16 /*16*/, 0, 2}, | |
+ [6] = {0}, /* Invalid */ | |
+ [7] = {0} /* Invalid */ | |
}; | |
unsigned int cpu; | |
unsigned int pa_range = 0x10; /* Larger than any possible value */ | |
bool vmid_8_bit = false; | |
- for_each_online_cpu ( cpu ) | |
+ for_each_online_cpu (cpu) | |
{ | |
const struct cpuinfo_arm *info = &cpu_data[cpu]; | |
if ( info->mm64.pa_range < pa_range ) | |
@@ -2004,7 +1978,8 @@ void __init setup_virt_paging(void) | |
max_vmid = MAX_VMID_16_BIT; | |
/* pa_range is 4 bits, but the defined encodings are only 3 bits */ | |
- if ( pa_range >= ARRAY_SIZE(pa_range_info) || !pa_range_info[pa_range].pabits ) | |
+ if ( pa_range >= ARRAY_SIZE(pa_range_info) || | |
+ !pa_range_info[pa_range].pabits ) | |
panic("Unknown encoding of ID_AA64MMFR0_EL1.PARange %x\n", pa_range); | |
val |= VTCR_PS(pa_range); | |
@@ -2020,10 +1995,9 @@ void __init setup_virt_paging(void) | |
p2m_root_level = 2 - pa_range_info[pa_range].sl0; | |
p2m_ipa_bits = 64 - pa_range_info[pa_range].t0sz; | |
- printk("P2M: %d-bit IPA with %d-bit PA and %d-bit VMID\n", | |
- p2m_ipa_bits, | |
+ printk("P2M: %d-bit IPA with %d-bit PA and %d-bit VMID\n", p2m_ipa_bits, | |
pa_range_info[pa_range].pabits, | |
- ( MAX_VMID == MAX_VMID_16_BIT ) ? 16 : 8); | |
+ (MAX_VMID == MAX_VMID_16_BIT) ? 16 : 8); | |
#endif | |
printk("P2M: %d levels with order-%d root, VTCR 0x%lx\n", | |
4 - P2M_ROOT_LEVEL, P2M_ROOT_ORDER, val); | |
@@ -2031,7 +2005,7 @@ void __init setup_virt_paging(void) | |
p2m_vmid_allocator_init(); | |
/* It is not allowed to concatenate a level zero root */ | |
- BUG_ON( P2M_ROOT_LEVEL == 0 && P2M_ROOT_ORDER > 0 ); | |
+ BUG_ON(P2M_ROOT_LEVEL == 0 && P2M_ROOT_ORDER > 0); | |
vtcr = val; | |
/* | |
@@ -2044,7 +2018,8 @@ void __init setup_virt_paging(void) | |
root = p2m_allocate_root(); | |
if ( !root ) | |
- panic("Unable to allocate root table for ARM64_WORKAROUND_AT_SPECULATE\n"); | |
+ panic( | |
+ "Unable to allocate root table for ARM64_WORKAROUND_AT_SPECULATE\n"); | |
empty_root_mfn = page_to_mfn(root); | |
} | |
@@ -2054,10 +2029,9 @@ void __init setup_virt_paging(void) | |
} | |
static int cpu_virt_paging_callback(struct notifier_block *nfb, | |
- unsigned long action, | |
- void *hcpu) | |
+ unsigned long action, void *hcpu) | |
{ | |
- switch ( action ) | |
+ switch (action) | |
{ | |
case CPU_STARTING: | |
ASSERT(system_state != SYS_STATE_boot); | |
@@ -2081,13 +2055,14 @@ static int __init cpu_virt_paging_init(void) | |
return 0; | |
} | |
/* | |
- * Initialization of the notifier has to be done at init rather than presmp_init | |
- * phase because: the registered notifier is used to setup virtual paging for | |
- * non-boot CPUs after the initial virtual paging for all CPUs is already setup, | |
- * i.e. when a non-boot CPU is hotplugged after the system has booted. In other | |
- * words, the notifier should be registered after the virtual paging is | |
- * initially setup (setup_virt_paging() is called from start_xen()). This is | |
- * required because vtcr config value has to be set before a notifier can fire. | |
+ * Initialization of the notifier has to be done at init rather than | |
+ * presmp_init phase because: the registered notifier is used to setup virtual | |
+ * paging for non-boot CPUs after the initial virtual paging for all CPUs is | |
+ * already setup, i.e. when a non-boot CPU is hotplugged after the system has | |
+ * booted. In other words, the notifier should be registered after the virtual | |
+ * paging is initially setup (setup_virt_paging() is called from start_xen()). | |
+ * This is required because vtcr config value has to be set before a notifier | |
+ * can fire. | |
*/ | |
__initcall(cpu_virt_paging_init); | |
diff --git a/xen/arch/arm/percpu.c b/xen/arch/arm/percpu.c | |
index 25442c48fe..1b526c7fc2 100644 | |
--- a/xen/arch/arm/percpu.c | |
+++ b/xen/arch/arm/percpu.c | |
@@ -6,7 +6,8 @@ | |
unsigned long __per_cpu_offset[NR_CPUS]; | |
#define INVALID_PERCPU_AREA (-(long)__per_cpu_start) | |
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start)) | |
+#define PERCPU_ORDER \ | |
+ (get_order_from_bytes(__per_cpu_data_end - __per_cpu_start)) | |
void __init percpu_init_areas(void) | |
{ | |
@@ -27,7 +28,8 @@ static int init_percpu_area(unsigned int cpu) | |
return 0; | |
} | |
-struct free_info { | |
+struct free_info | |
+{ | |
unsigned int cpu; | |
struct rcu_head rcu; | |
}; | |
@@ -49,13 +51,13 @@ static void free_percpu_area(unsigned int cpu) | |
call_rcu(&info->rcu, _free_percpu_area); | |
} | |
-static int cpu_percpu_callback( | |
- struct notifier_block *nfb, unsigned long action, void *hcpu) | |
+static int cpu_percpu_callback(struct notifier_block *nfb, | |
+ unsigned long action, void *hcpu) | |
{ | |
unsigned int cpu = (unsigned long)hcpu; | |
int rc = 0; | |
- switch ( action ) | |
+ switch (action) | |
{ | |
case CPU_UP_PREPARE: | |
rc = init_percpu_area(cpu); | |
diff --git a/xen/arch/arm/physdev.c b/xen/arch/arm/physdev.c | |
index e91355fe22..6b2f20ccbe 100644 | |
--- a/xen/arch/arm/physdev.c | |
+++ b/xen/arch/arm/physdev.c | |
@@ -10,7 +10,6 @@ | |
#include <xen/sched.h> | |
#include <asm/hypercall.h> | |
- | |
int do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) | |
{ | |
gdprintk(XENLOG_DEBUG, "PHYSDEVOP cmd=%d: not implemented\n", cmd); | |
diff --git a/xen/arch/arm/platform.c b/xen/arch/arm/platform.c | |
index 8eb0b6e57a..738b6d4ce4 100644 | |
--- a/xen/arch/arm/platform.c | |
+++ b/xen/arch/arm/platform.c | |
@@ -27,7 +27,6 @@ extern const struct platform_desc _splatform[], _eplatform[]; | |
/* Pointer to the current platform description */ | |
static const struct platform_desc *platform; | |
- | |
static bool __init platform_is_compatible(const struct platform_desc *plat) | |
{ | |
const char *const *compat; | |
diff --git a/xen/arch/arm/platform_hypercall.c b/xen/arch/arm/platform_hypercall.c | |
index 5aab856ce7..b40d0680dc 100644 | |
--- a/xen/arch/arm/platform_hypercall.c | |
+++ b/xen/arch/arm/platform_hypercall.c | |
@@ -44,16 +44,16 @@ long do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) | |
*/ | |
while ( !spin_trylock(&xenpf_lock) ) | |
if ( hypercall_preempt_check() ) | |
- return hypercall_create_continuation( | |
- __HYPERVISOR_platform_op, "h", u_xenpf_op); | |
+ return hypercall_create_continuation(__HYPERVISOR_platform_op, "h", | |
+ u_xenpf_op); | |
- switch ( op->cmd ) | |
+ switch (op->cmd) | |
{ | |
case XENPF_settime64: | |
if ( likely(!op->u.settime64.mbz) ) | |
- do_settime(op->u.settime64.secs, | |
- op->u.settime64.nsecs, | |
- op->u.settime64.system_time + SECONDS(d->time_offset_seconds)); | |
+ do_settime(op->u.settime64.secs, op->u.settime64.nsecs, | |
+ op->u.settime64.system_time + | |
+ SECONDS(d->time_offset_seconds)); | |
else | |
ret = -EINVAL; | |
break; | |
diff --git a/xen/arch/arm/platforms/brcm.c b/xen/arch/arm/platforms/brcm.c | |
index d481b2c60f..57375c118e 100644 | |
--- a/xen/arch/arm/platforms/brcm.c | |
+++ b/xen/arch/arm/platforms/brcm.c | |
@@ -23,12 +23,13 @@ | |
#include <asm/io.h> | |
#include <xen/delay.h> | |
-struct brcm_plat_regs { | |
- uint32_t hif_mask; | |
- uint32_t hif_cpu_reset_config; | |
- uint32_t hif_boot_continuation; | |
- uint32_t cpu0_pwr_zone_ctrl; | |
- uint32_t scratch_reg; | |
+struct brcm_plat_regs | |
+{ | |
+ uint32_t hif_mask; | |
+ uint32_t hif_cpu_reset_config; | |
+ uint32_t hif_boot_continuation; | |
+ uint32_t cpu0_pwr_zone_ctrl; | |
+ uint32_t scratch_reg; | |
}; | |
static u32 brcm_boot_continuation_pc; | |
@@ -105,18 +106,17 @@ static __init int brcm_populate_plat_regs(void) | |
regs.hif_boot_continuation = reg_base; | |
dprintk(XENLOG_INFO, "hif_cpu_reset_config : %08xh\n", | |
- regs.hif_cpu_reset_config); | |
+ regs.hif_cpu_reset_config); | |
dprintk(XENLOG_INFO, "cpu0_pwr_zone_ctrl : %08xh\n", | |
- regs.cpu0_pwr_zone_ctrl); | |
+ regs.cpu0_pwr_zone_ctrl); | |
dprintk(XENLOG_INFO, "hif_boot_continuation : %08xh\n", | |
- regs.hif_boot_continuation); | |
- dprintk(XENLOG_INFO, "scratch_reg : %08xh\n", | |
- regs.scratch_reg); | |
+ regs.hif_boot_continuation); | |
+ dprintk(XENLOG_INFO, "scratch_reg : %08xh\n", regs.scratch_reg); | |
return 0; | |
} | |
-#define ZONE_PWR_UP_REQ (1 << 10) | |
+#define ZONE_PWR_UP_REQ (1 << 10) | |
#define ZONE_PWR_ON_STATE (1 << 26) | |
static int brcm_cpu_power_on(int cpu) | |
@@ -133,7 +133,7 @@ static int brcm_cpu_power_on(int cpu) | |
if ( !pwr_ctl ) | |
{ | |
dprintk(XENLOG_ERR, "%s: Unable to map \"cpu0_pwr_zone_ctrl\"\n", | |
- __func__); | |
+ __func__); | |
return -EFAULT; | |
} | |
@@ -218,7 +218,7 @@ static int brcm_set_boot_continuation(u32 cpu, u32 pc) | |
static int brcm_cpu_up(int cpu) | |
{ | |
- int rc; | |
+ int rc; | |
rc = brcm_cpu_power_on(cpu); | |
if ( rc ) | |
@@ -228,7 +228,7 @@ static int brcm_cpu_up(int cpu) | |
if ( rc ) | |
return rc; | |
- return brcm_cpu_release(cpu); | |
+ return brcm_cpu_release(cpu); | |
} | |
static int __init brcm_smp_init(void) | |
@@ -271,24 +271,19 @@ static __init int brcm_init(void) | |
return brcm_populate_plat_regs(); | |
} | |
-static const char *const brcm_dt_compat[] __initconst = | |
-{ | |
- "brcm,bcm7445d0", | |
- NULL | |
-}; | |
+static const char *const brcm_dt_compat[] __initconst = {"brcm,bcm7445d0", | |
+ NULL}; | |
-PLATFORM_START(brcm, "Broadcom B15") | |
- .compatible = brcm_dt_compat, | |
- .init = brcm_init, | |
- .smp_init = brcm_smp_init, | |
- .cpu_up = brcm_cpu_up, | |
-PLATFORM_END | |
+PLATFORM_START(brcm, "Broadcom B15").compatible = brcm_dt_compat, | |
+ .init = brcm_init, .smp_init = brcm_smp_init, | |
+ .cpu_up = brcm_cpu_up, | |
+ PLATFORM_END | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/exynos5.c b/xen/arch/arm/platforms/exynos5.c | |
index 6560507092..5de2d5df1f 100644 | |
--- a/xen/arch/arm/platforms/exynos5.c | |
+++ b/xen/arch/arm/platforms/exynos5.c | |
@@ -29,12 +29,12 @@ | |
static bool secure_firmware; | |
-#define EXYNOS_ARM_CORE0_CONFIG 0x2000 | |
+#define EXYNOS_ARM_CORE0_CONFIG 0x2000 | |
#define EXYNOS_ARM_CORE_CONFIG(_nr) (EXYNOS_ARM_CORE0_CONFIG + (0x80 * (_nr))) | |
#define EXYNOS_ARM_CORE_STATUS(_nr) (EXYNOS_ARM_CORE_CONFIG(_nr) + 0x4) | |
-#define S5P_CORE_LOCAL_PWR_EN 0x3 | |
+#define S5P_CORE_LOCAL_PWR_EN 0x3 | |
-#define SMC_CMD_CPU1BOOT (-4) | |
+#define SMC_CMD_CPU1BOOT (-4) | |
static int exynos5_init_time(void) | |
{ | |
@@ -131,7 +131,8 @@ static int __init exynos5_smp_init(void) | |
dprintk(XENLOG_ERR, "Error in %s\n", compatible); | |
return -ENXIO; | |
} | |
- dprintk(XENLOG_INFO, "sysram_addr: %016llx size: %016llx offset: %016llx\n", | |
+ dprintk(XENLOG_INFO, | |
+ "sysram_addr: %016llx size: %016llx offset: %016llx\n", | |
sysram_addr, size, sysram_offset); | |
sysram = ioremap_nocache(sysram_addr, size); | |
@@ -141,8 +142,8 @@ static int __init exynos5_smp_init(void) | |
return -EFAULT; | |
} | |
- printk("Set SYSRAM to %"PRIpaddr" (%p)\n", | |
- __pa(init_secondary), init_secondary); | |
+ printk("Set SYSRAM to %" PRIpaddr " (%p)\n", __pa(init_secondary), | |
+ init_secondary); | |
writel(__pa(init_secondary), sysram + sysram_offset); | |
iounmap(sysram); | |
@@ -158,8 +159,7 @@ static int exynos_cpu_power_state(void __iomem *power, int cpu) | |
static void exynos_cpu_power_up(void __iomem *power, int cpu) | |
{ | |
- __raw_writel(S5P_CORE_LOCAL_PWR_EN, | |
- power + EXYNOS_ARM_CORE_CONFIG(cpu)); | |
+ __raw_writel(S5P_CORE_LOCAL_PWR_EN, power + EXYNOS_ARM_CORE_CONFIG(cpu)); | |
} | |
static int exynos5_cpu_power_up(void __iomem *power, int cpu) | |
@@ -193,12 +193,11 @@ static int exynos5_get_pmu_baseandsize(u64 *power_base_addr, u64 *size) | |
{ | |
struct dt_device_node *node; | |
int rc; | |
- static const struct dt_device_match exynos_dt_pmu_matches[] = | |
- { | |
+ static const struct dt_device_match exynos_dt_pmu_matches[] = { | |
DT_MATCH_COMPATIBLE("samsung,exynos5250-pmu"), | |
DT_MATCH_COMPATIBLE("samsung,exynos5410-pmu"), | |
DT_MATCH_COMPATIBLE("samsung,exynos5420-pmu"), | |
- { /*sentinel*/ }, | |
+ {/*sentinel*/}, | |
}; | |
node = dt_find_matching_node(NULL, exynos_dt_pmu_matches); | |
@@ -277,53 +276,42 @@ static void exynos5_reset(void) | |
iounmap(pmu); | |
} | |
-static const struct dt_device_match exynos5_blacklist_dev[] __initconst = | |
-{ | |
+static const struct dt_device_match exynos5_blacklist_dev[] __initconst = { | |
/* Multi core Timer | |
* TODO: this device set up IRQ to CPU 1 which is not yet handled by Xen. | |
* This is result to random freeze. | |
*/ | |
DT_MATCH_COMPATIBLE("samsung,exynos4210-mct"), | |
DT_MATCH_COMPATIBLE("samsung,secure-firmware"), | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
-static const char * const exynos5250_dt_compat[] __initconst = | |
-{ | |
- "samsung,exynos5250", | |
- NULL | |
-}; | |
+static const char *const exynos5250_dt_compat[] __initconst = { | |
+ "samsung,exynos5250", NULL}; | |
-static const char * const exynos5_dt_compat[] __initconst = | |
-{ | |
- "samsung,exynos5410", | |
- NULL | |
-}; | |
+static const char *const exynos5_dt_compat[] __initconst = { | |
+ "samsung,exynos5410", NULL}; | |
PLATFORM_START(exynos5250, "SAMSUNG EXYNOS5250") | |
.compatible = exynos5250_dt_compat, | |
- .init_time = exynos5_init_time, | |
- .specific_mapping = exynos5250_specific_mapping, | |
- .smp_init = exynos5_smp_init, | |
- .cpu_up = cpu_up_send_sgi, | |
- .reset = exynos5_reset, | |
- .blacklist_dev = exynos5_blacklist_dev, | |
-PLATFORM_END | |
- | |
-PLATFORM_START(exynos5, "SAMSUNG EXYNOS5") | |
- .compatible = exynos5_dt_compat, | |
- .init_time = exynos5_init_time, | |
- .smp_init = exynos5_smp_init, | |
- .cpu_up = exynos5_cpu_up, | |
- .reset = exynos5_reset, | |
- .blacklist_dev = exynos5_blacklist_dev, | |
-PLATFORM_END | |
- | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ .init_time = exynos5_init_time, | |
+ .specific_mapping = exynos5250_specific_mapping, | |
+ .smp_init = exynos5_smp_init, .cpu_up = cpu_up_send_sgi, | |
+ .reset = exynos5_reset, .blacklist_dev = exynos5_blacklist_dev, | |
+ PLATFORM_END | |
+ | |
+ PLATFORM_START(exynos5, "SAMSUNG EXYNOS5") | |
+ .compatible = exynos5_dt_compat, | |
+ .init_time = exynos5_init_time, .smp_init = exynos5_smp_init, | |
+ .cpu_up = exynos5_cpu_up, .reset = exynos5_reset, | |
+ .blacklist_dev = exynos5_blacklist_dev, | |
+ PLATFORM_END | |
+ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/midway.c b/xen/arch/arm/platforms/midway.c | |
index b221279ec7..d76afc3b1c 100644 | |
--- a/xen/arch/arm/platforms/midway.c | |
+++ b/xen/arch/arm/platforms/midway.c | |
@@ -42,22 +42,18 @@ static void midway_reset(void) | |
iounmap(pmu); | |
} | |
-static const char * const midway_dt_compat[] __initconst = | |
-{ | |
- "calxeda,ecx-2000", | |
- NULL | |
-}; | |
- | |
-PLATFORM_START(midway, "CALXEDA MIDWAY") | |
- .compatible = midway_dt_compat, | |
- .reset = midway_reset, | |
-PLATFORM_END | |
- | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+static const char *const midway_dt_compat[] __initconst = {"calxeda,ecx-2000", | |
+ NULL}; | |
+ | |
+PLATFORM_START(midway, "CALXEDA MIDWAY").compatible = midway_dt_compat, | |
+ .reset = midway_reset, | |
+ PLATFORM_END | |
+ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/omap5.c b/xen/arch/arm/platforms/omap5.c | |
index aee24e4d28..e3a990be55 100644 | |
--- a/xen/arch/arm/platforms/omap5.c | |
+++ b/xen/arch/arm/platforms/omap5.c | |
@@ -24,14 +24,14 @@ | |
#include <asm/io.h> | |
static uint16_t num_den[8][2] = { | |
- { 0, 0 }, /* not used */ | |
- { 26 * 64, 26 * 125 }, /* 12.0 Mhz */ | |
- { 2 * 768, 2 * 1625 }, /* 13.0 Mhz */ | |
- { 0, 0 }, /* not used */ | |
- { 130 * 8, 130 * 25 }, /* 19.2 Mhz */ | |
- { 2 * 384, 2 * 1625 }, /* 26.0 Mhz */ | |
- { 3 * 256, 3 * 1125 }, /* 27.0 Mhz */ | |
- { 130 * 4, 130 * 25 }, /* 38.4 Mhz */ | |
+ {0, 0}, /* not used */ | |
+ {26 * 64, 26 * 125}, /* 12.0 Mhz */ | |
+ {2 * 768, 2 * 1625}, /* 13.0 Mhz */ | |
+ {0, 0}, /* not used */ | |
+ {130 * 8, 130 * 25}, /* 19.2 Mhz */ | |
+ {2 * 384, 2 * 1625}, /* 26.0 Mhz */ | |
+ {3 * 256, 3 * 1125}, /* 27.0 Mhz */ | |
+ {130 * 4, 130 * 25}, /* 38.4 Mhz */ | |
}; | |
/* | |
@@ -57,15 +57,16 @@ static int omap5_init_time(void) | |
return -ENOMEM; | |
} | |
- sys_clksel = readl(ckgen_prm_base + OMAP5_CM_CLKSEL_SYS) & | |
- ~SYS_CLKSEL_MASK; | |
+ sys_clksel = | |
+ readl(ckgen_prm_base + OMAP5_CM_CLKSEL_SYS) & ~SYS_CLKSEL_MASK; | |
iounmap(ckgen_prm_base); | |
rt_ct_base = ioremap_nocache(REALTIME_COUNTER_BASE, 0x20); | |
if ( !rt_ct_base ) | |
{ | |
- dprintk(XENLOG_ERR, "%s: REALTIME_COUNTER_BASE ioremap failed\n", __func__); | |
+ dprintk(XENLOG_ERR, "%s: REALTIME_COUNTER_BASE ioremap failed\n", | |
+ __func__); | |
return -ENOMEM; | |
} | |
@@ -127,8 +128,8 @@ static int __init omap5_smp_init(void) | |
return -EFAULT; | |
} | |
- printk("Set AuxCoreBoot1 to %"PRIpaddr" (%p)\n", | |
- __pa(init_secondary), init_secondary); | |
+ printk("Set AuxCoreBoot1 to %" PRIpaddr " (%p)\n", __pa(init_secondary), | |
+ init_secondary); | |
writel(__pa(init_secondary), wugen_base + OMAP_AUX_CORE_BOOT_1_OFFSET); | |
printk("Set AuxCoreBoot0 to 0x20\n"); | |
@@ -139,38 +140,27 @@ static int __init omap5_smp_init(void) | |
return 0; | |
} | |
-static const char * const omap5_dt_compat[] __initconst = | |
-{ | |
- "ti,omap5", | |
- NULL | |
-}; | |
- | |
-static const char * const dra7_dt_compat[] __initconst = | |
-{ | |
- "ti,dra7", | |
- NULL | |
-}; | |
- | |
-PLATFORM_START(omap5, "TI OMAP5") | |
- .compatible = omap5_dt_compat, | |
- .init_time = omap5_init_time, | |
- .specific_mapping = omap5_specific_mapping, | |
- .smp_init = omap5_smp_init, | |
- .cpu_up = cpu_up_send_sgi, | |
-PLATFORM_END | |
- | |
-PLATFORM_START(dra7, "TI DRA7") | |
- .compatible = dra7_dt_compat, | |
- .init_time = omap5_init_time, | |
- .cpu_up = cpu_up_send_sgi, | |
- .smp_init = omap5_smp_init, | |
-PLATFORM_END | |
- | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+static const char *const omap5_dt_compat[] __initconst = {"ti,omap5", NULL}; | |
+ | |
+static const char *const dra7_dt_compat[] __initconst = {"ti,dra7", NULL}; | |
+ | |
+PLATFORM_START(omap5, "TI OMAP5").compatible = omap5_dt_compat, | |
+ .init_time = omap5_init_time, | |
+ .specific_mapping = omap5_specific_mapping, | |
+ .smp_init = omap5_smp_init, .cpu_up = cpu_up_send_sgi, | |
+ PLATFORM_END | |
+ | |
+ PLATFORM_START(dra7, "TI DRA7") | |
+ .compatible = dra7_dt_compat, | |
+ .init_time = omap5_init_time, .cpu_up = cpu_up_send_sgi, | |
+ .smp_init = omap5_smp_init, | |
+ PLATFORM_END | |
+ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/rcar2.c b/xen/arch/arm/platforms/rcar2.c | |
index df0ac84709..0eb6a6486b 100644 | |
--- a/xen/arch/arm/platforms/rcar2.c | |
+++ b/xen/arch/arm/platforms/rcar2.c | |
@@ -21,9 +21,9 @@ | |
#include <asm/platform.h> | |
#include <asm/io.h> | |
-#define RCAR2_RAM_ADDR 0xE63C0000 | |
-#define RCAR2_RAM_SIZE 0x1000 | |
-#define RCAR2_SMP_START_OFFSET 0xFFC | |
+#define RCAR2_RAM_ADDR 0xE63C0000 | |
+#define RCAR2_RAM_SIZE 0x1000 | |
+#define RCAR2_SMP_START_OFFSET 0xFFC | |
static int __init rcar2_smp_init(void) | |
{ | |
@@ -31,9 +31,9 @@ static int __init rcar2_smp_init(void) | |
/* map ICRAM */ | |
pram = ioremap_nocache(RCAR2_RAM_ADDR, RCAR2_RAM_SIZE); | |
- if( !pram ) | |
+ if ( !pram ) | |
{ | |
- dprintk( XENLOG_ERR, "Unable to map RCAR2 ICRAM\n"); | |
+ dprintk(XENLOG_ERR, "Unable to map RCAR2 ICRAM\n"); | |
return -ENOMEM; | |
} | |
@@ -46,23 +46,18 @@ static int __init rcar2_smp_init(void) | |
return 0; | |
} | |
-static const char *const rcar2_dt_compat[] __initconst = | |
-{ | |
- "renesas,lager", | |
- NULL | |
-}; | |
- | |
-PLATFORM_START(rcar2, "Renesas R-Car Gen2") | |
- .compatible = rcar2_dt_compat, | |
- .cpu_up = cpu_up_send_sgi, | |
- .smp_init = rcar2_smp_init, | |
-PLATFORM_END | |
- | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+static const char *const rcar2_dt_compat[] __initconst = {"renesas,lager", | |
+ NULL}; | |
+ | |
+PLATFORM_START(rcar2, "Renesas R-Car Gen2").compatible = rcar2_dt_compat, | |
+ .cpu_up = cpu_up_send_sgi, .smp_init = rcar2_smp_init, | |
+ PLATFORM_END | |
+ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/seattle.c b/xen/arch/arm/platforms/seattle.c | |
index 64cc1868c2..7cbb4c8a1c 100644 | |
--- a/xen/arch/arm/platforms/seattle.c | |
+++ b/xen/arch/arm/platforms/seattle.c | |
@@ -20,11 +20,8 @@ | |
#include <asm/platform.h> | |
#include <asm/psci.h> | |
-static const char * const seattle_dt_compat[] __initconst = | |
-{ | |
- "amd,seattle", | |
- NULL | |
-}; | |
+static const char *const seattle_dt_compat[] __initconst = {"amd,seattle", | |
+ NULL}; | |
/* Seattle firmware only implements PSCI handler for | |
* system off and system reset at this point. | |
@@ -41,17 +38,16 @@ static void seattle_system_off(void) | |
arm_smccc_smc(PSCI_0_2_FN32_SYSTEM_OFF, NULL); | |
} | |
-PLATFORM_START(seattle, "SEATTLE") | |
- .compatible = seattle_dt_compat, | |
- .reset = seattle_system_reset, | |
- .poweroff = seattle_system_off, | |
-PLATFORM_END | |
+PLATFORM_START(seattle, "SEATTLE").compatible = seattle_dt_compat, | |
+ .reset = seattle_system_reset, | |
+ .poweroff = seattle_system_off, | |
+ PLATFORM_END | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/sunxi.c b/xen/arch/arm/platforms/sunxi.c | |
index 55705b15b2..41e62a258b 100644 | |
--- a/xen/arch/arm/platforms/sunxi.c | |
+++ b/xen/arch/arm/platforms/sunxi.c | |
@@ -22,13 +22,13 @@ | |
#include <asm/io.h> | |
/* Watchdog constants: */ | |
-#define SUNXI_WDT_MODE_REG 0x04 | |
-#define SUNXI_WDT_MODE_EN (1 << 0) | |
-#define SUNXI_WDT_MODE_RST_EN (1 << 1) | |
+#define SUNXI_WDT_MODE_REG 0x04 | |
+#define SUNXI_WDT_MODE_EN (1 << 0) | |
+#define SUNXI_WDT_MODE_RST_EN (1 << 1) | |
-#define SUNXI_WDT_CONFIG_SYSTEM_RESET (1 << 0) | |
-#define SUNXI_WDOG0_CFG_REG 0x14 | |
-#define SUNXI_WDOG0_MODE_REG 0x18 | |
+#define SUNXI_WDT_CONFIG_SYSTEM_RESET (1 << 0) | |
+#define SUNXI_WDOG0_CFG_REG 0x14 | |
+#define SUNXI_WDOG0_MODE_REG 0x18 | |
static void __iomem *sunxi_map_watchdog(bool *new_wdt) | |
{ | |
@@ -40,7 +40,7 @@ static void __iomem *sunxi_map_watchdog(bool *new_wdt) | |
node = dt_find_compatible_node(NULL, NULL, "allwinner,sun6i-a31-wdt"); | |
if ( node ) | |
- _new_wdt = true; | |
+ _new_wdt = true; | |
else | |
node = dt_find_compatible_node(NULL, NULL, "allwinner,sun4i-a10-wdt"); | |
@@ -99,55 +99,43 @@ static void sunxi_reset(void) | |
iounmap(wdt); | |
- for (;;) | |
+ for ( ;; ) | |
wfi(); | |
} | |
-static const char * const sunxi_v7_dt_compat[] __initconst = | |
-{ | |
- "allwinner,sun6i-a31", | |
- "allwinner,sun6i-a31s", | |
- "allwinner,sun7i-a20", | |
- "allwinner,sun8i-a23", | |
- "allwinner,sun8i-a33", | |
- "allwinner,sun8i-h2-plus", | |
- "allwinner,sun8i-h3", | |
- NULL | |
-}; | |
+static const char *const sunxi_v7_dt_compat[] __initconst = { | |
+ "allwinner,sun6i-a31", "allwinner,sun6i-a31s", | |
+ "allwinner,sun7i-a20", "allwinner,sun8i-a23", | |
+ "allwinner,sun8i-a33", "allwinner,sun8i-h2-plus", | |
+ "allwinner,sun8i-h3", NULL}; | |
-static const char * const sunxi_v8_dt_compat[] __initconst = | |
-{ | |
- "allwinner,sun50i-a64", | |
- "allwinner,sun50i-h5", | |
- NULL | |
-}; | |
+static const char *const sunxi_v8_dt_compat[] __initconst = { | |
+ "allwinner,sun50i-a64", "allwinner,sun50i-h5", NULL}; | |
-static const struct dt_device_match sunxi_blacklist_dev[] __initconst = | |
-{ | |
+static const struct dt_device_match sunxi_blacklist_dev[] __initconst = { | |
/* | |
* The UARTs share a page which runs the risk of mapping the Xen console | |
* UART to dom0, so don't map any of them. | |
*/ | |
DT_MATCH_COMPATIBLE("snps,dw-apb-uart"), | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
-PLATFORM_START(sunxi_v7, "Allwinner ARMv7") | |
- .compatible = sunxi_v7_dt_compat, | |
- .blacklist_dev = sunxi_blacklist_dev, | |
- .reset = sunxi_reset, | |
-PLATFORM_END | |
+PLATFORM_START(sunxi_v7, "Allwinner ARMv7").compatible = sunxi_v7_dt_compat, | |
+ .blacklist_dev = sunxi_blacklist_dev, | |
+ .reset = sunxi_reset, | |
+ PLATFORM_END | |
-PLATFORM_START(sunxi_v8, "Allwinner ARMv8") | |
- .compatible = sunxi_v8_dt_compat, | |
- .blacklist_dev = sunxi_blacklist_dev, | |
-PLATFORM_END | |
+ PLATFORM_START(sunxi_v8, "Allwinner ARMv8") | |
+ .compatible = sunxi_v8_dt_compat, | |
+ .blacklist_dev = sunxi_blacklist_dev, | |
+ PLATFORM_END | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/thunderx.c b/xen/arch/arm/platforms/thunderx.c | |
index 9b32a29c6b..21242dad70 100644 | |
--- a/xen/arch/arm/platforms/thunderx.c | |
+++ b/xen/arch/arm/platforms/thunderx.c | |
@@ -20,20 +20,14 @@ | |
#include <asm/platform.h> | |
-static const char * const thunderx_dt_compat[] __initconst = | |
-{ | |
- "cavium,thunder-88xx", | |
- NULL | |
-}; | |
+static const char *const thunderx_dt_compat[] __initconst = { | |
+ "cavium,thunder-88xx", NULL}; | |
-static const struct dt_device_match thunderx_blacklist_dev[] __initconst = | |
-{ | |
+static const struct dt_device_match thunderx_blacklist_dev[] __initconst = { | |
/* Cavium has its own SMMU which is not yet supported. */ | |
DT_MATCH_COMPATIBLE("cavium,smmu-v2"), | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
-PLATFORM_START(thunderx, "THUNDERX") | |
- .compatible = thunderx_dt_compat, | |
- .blacklist_dev = thunderx_blacklist_dev, | |
-PLATFORM_END | |
+PLATFORM_START(thunderx, "THUNDERX").compatible = thunderx_dt_compat, | |
+ .blacklist_dev = thunderx_blacklist_dev, PLATFORM_END | |
diff --git a/xen/arch/arm/platforms/vexpress.c b/xen/arch/arm/platforms/vexpress.c | |
index b6193f75b5..8151b32538 100644 | |
--- a/xen/arch/arm/platforms/vexpress.c | |
+++ b/xen/arch/arm/platforms/vexpress.c | |
@@ -23,30 +23,30 @@ | |
#include <xen/vmap.h> | |
#include <asm/io.h> | |
-#define DCC_SHIFT 26 | |
+#define DCC_SHIFT 26 | |
#define FUNCTION_SHIFT 20 | |
-#define SITE_SHIFT 16 | |
+#define SITE_SHIFT 16 | |
#define POSITION_SHIFT 12 | |
-#define DEVICE_SHIFT 0 | |
+#define DEVICE_SHIFT 0 | |
static inline int vexpress_ctrl_start(uint32_t *syscfg, int write, | |
int function, int device) | |
{ | |
- int dcc = 0; /* DCC to access */ | |
- int site = 0; /* motherboard */ | |
+ int dcc = 0; /* DCC to access */ | |
+ int site = 0; /* motherboard */ | |
int position = 0; /* daughterboard */ | |
uint32_t stat; | |
/* set control register */ | |
- syscfg[V2M_SYS_CFGCTRL/4] = V2M_SYS_CFG_START | | |
- (write ? V2M_SYS_CFG_WRITE : 0) | | |
+ syscfg[V2M_SYS_CFGCTRL / 4] = | |
+ V2M_SYS_CFG_START | (write ? V2M_SYS_CFG_WRITE : 0) | | |
(dcc << DCC_SHIFT) | (function << FUNCTION_SHIFT) | | |
(site << SITE_SHIFT) | (position << POSITION_SHIFT) | | |
(device << DEVICE_SHIFT); | |
/* wait for complete flag to be set */ | |
do { | |
- stat = syscfg[V2M_SYS_CFGSTAT/4]; | |
+ stat = syscfg[V2M_SYS_CFGSTAT / 4]; | |
dsb(sy); | |
} while ( !(stat & V2M_SYS_CFG_COMPLETE) ); | |
@@ -78,10 +78,12 @@ static void vexpress_reset(void) | |
/* switch to slow mode */ | |
writel(0x3, sp810); | |
- dsb(sy); isb(); | |
+ dsb(sy); | |
+ isb(); | |
/* writing any value to SCSYSSTAT reg will reset the system */ | |
writel(0x1, sp810 + 4); | |
- dsb(sy); isb(); | |
+ dsb(sy); | |
+ isb(); | |
iounmap(sp810); | |
} | |
@@ -99,8 +101,8 @@ static int __init vexpress_smp_init(void) | |
return -EFAULT; | |
} | |
- printk("Set SYS_FLAGS to %"PRIpaddr" (%p)\n", | |
- __pa(init_secondary), init_secondary); | |
+ printk("Set SYS_FLAGS to %" PRIpaddr " (%p)\n", __pa(init_secondary), | |
+ init_secondary); | |
writel(~0, sysflags + V2M_SYS_FLAGSCLR); | |
writel(__pa(init_secondary), sysflags + V2M_SYS_FLAGSSET); | |
@@ -111,14 +113,10 @@ static int __init vexpress_smp_init(void) | |
#endif | |
-static const char * const vexpress_dt_compat[] __initconst = | |
-{ | |
- "arm,vexpress", | |
- NULL | |
-}; | |
+static const char *const vexpress_dt_compat[] __initconst = {"arm,vexpress", | |
+ NULL}; | |
-static const struct dt_device_match vexpress_blacklist_dev[] __initconst = | |
-{ | |
+static const struct dt_device_match vexpress_blacklist_dev[] __initconst = { | |
/* Cache Coherent Interconnect */ | |
DT_MATCH_COMPATIBLE("arm,cci-400"), | |
DT_MATCH_COMPATIBLE("arm,cci-400-pmu"), | |
@@ -130,24 +128,23 @@ static const struct dt_device_match vexpress_blacklist_dev[] __initconst = | |
DT_MATCH_COMPATIBLE("arm,vexpress-reset"), | |
DT_MATCH_COMPATIBLE("arm,vexpress-reboot"), | |
DT_MATCH_COMPATIBLE("arm,vexpress-shutdown"), | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
-PLATFORM_START(vexpress, "VERSATILE EXPRESS") | |
- .compatible = vexpress_dt_compat, | |
+PLATFORM_START(vexpress, "VERSATILE EXPRESS").compatible = vexpress_dt_compat, | |
#ifdef CONFIG_ARM_32 | |
- .smp_init = vexpress_smp_init, | |
- .cpu_up = cpu_up_send_sgi, | |
+ .smp_init = vexpress_smp_init, | |
+ .cpu_up = cpu_up_send_sgi, | |
#endif | |
- .reset = vexpress_reset, | |
- .blacklist_dev = vexpress_blacklist_dev, | |
-PLATFORM_END | |
- | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ .reset = vexpress_reset, | |
+ .blacklist_dev = vexpress_blacklist_dev, | |
+ PLATFORM_END | |
+ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/xgene-storm.c b/xen/arch/arm/platforms/xgene-storm.c | |
index fced4d7c2c..14a8c70c92 100644 | |
--- a/xen/arch/arm/platforms/xgene-storm.c | |
+++ b/xen/arch/arm/platforms/xgene-storm.c | |
@@ -24,26 +24,25 @@ | |
#include <asm/io.h> | |
/* XGENE RESET Specific defines */ | |
-#define XGENE_RESET_ADDR 0x17000014UL | |
-#define XGENE_RESET_SIZE 0x100 | |
-#define XGENE_RESET_MASK 0x1 | |
+#define XGENE_RESET_ADDR 0x17000014UL | |
+#define XGENE_RESET_SIZE 0x100 | |
+#define XGENE_RESET_MASK 0x1 | |
/* Variables to save reset address of soc during platform initialization. */ | |
static u64 reset_addr, reset_size; | |
static u32 reset_mask; | |
static bool reset_vals_valid = false; | |
-#define XGENE_SEC_GICV2_DIST_ADDR 0x78010000 | |
+#define XGENE_SEC_GICV2_DIST_ADDR 0x78010000 | |
static void __init xgene_check_pirq_eoi(void) | |
{ | |
const struct dt_device_node *node; | |
int res; | |
paddr_t dbase; | |
- const struct dt_device_match xgene_dt_int_ctrl_match[] = | |
- { | |
+ const struct dt_device_match xgene_dt_int_ctrl_match[] = { | |
DT_MATCH_COMPATIBLE("arm,cortex-a15-gic"), | |
- { /*sentinel*/ }, | |
+ {/*sentinel*/}, | |
}; | |
node = dt_find_interrupt_controller(xgene_dt_int_ctrl_match); | |
@@ -52,7 +51,8 @@ static void __init xgene_check_pirq_eoi(void) | |
res = dt_device_get_address(node, 0, &dbase, NULL); | |
if ( !dbase ) | |
- panic("%s: Cannot find a valid address for the distributor\n", __func__); | |
+ panic("%s: Cannot find a valid address for the distributor\n", | |
+ __func__); | |
/* | |
* In old X-Gene Storm firmware and DT, secure mode addresses have | |
@@ -84,7 +84,8 @@ static void xgene_storm_reset(void) | |
if ( !addr ) | |
{ | |
- printk("XGENE: Unable to map xgene reset address, can not reset XGENE...\n"); | |
+ printk( | |
+ "XGENE: Unable to map xgene reset address, can not reset XGENE...\n"); | |
return; | |
} | |
@@ -109,24 +110,21 @@ static int xgene_storm_init(void) | |
return 0; | |
} | |
-static const char * const xgene_storm_dt_compat[] __initconst = | |
-{ | |
- "apm,xgene-storm", | |
- NULL | |
-}; | |
+static const char *const xgene_storm_dt_compat[] __initconst = { | |
+ "apm,xgene-storm", NULL}; | |
-PLATFORM_START(xgene_storm, "APM X-GENE STORM") | |
- .compatible = xgene_storm_dt_compat, | |
- .init = xgene_storm_init, | |
- .reset = xgene_storm_reset, | |
- .quirks = xgene_storm_quirks, | |
-PLATFORM_END | |
+PLATFORM_START(xgene_storm, "APM X-GENE STORM").compatible = | |
+ xgene_storm_dt_compat, | |
+ .init = xgene_storm_init, | |
+ .reset = xgene_storm_reset, | |
+ .quirks = xgene_storm_quirks, | |
+ PLATFORM_END | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/platforms/xilinx-zynqmp-eemi.c b/xen/arch/arm/platforms/xilinx-zynqmp-eemi.c | |
index 2053ed7ac5..f69f02831d 100644 | |
--- a/xen/arch/arm/platforms/xilinx-zynqmp-eemi.c | |
+++ b/xen/arch/arm/platforms/xilinx-zynqmp-eemi.c | |
@@ -57,7 +57,7 @@ bool zynqmp_eemi(struct cpu_user_regs *regs) | |
unsigned int pm_fn = fid & 0xFFFF; | |
enum pm_ret_status ret; | |
- switch ( fid ) | |
+ switch (fid) | |
{ | |
/* Mandatory SMC32 functions. */ | |
case ARM_SMCCC_CALL_COUNT_FID(SIP): | |
@@ -87,8 +87,8 @@ bool zynqmp_eemi(struct cpu_user_regs *regs) | |
case EEMI_FID(PM_SET_MAX_LATENCY): | |
if ( !domain_has_node_access(current->domain, nodeid) ) | |
{ | |
- gprintk(XENLOG_WARNING, | |
- "zynqmp-pm: fn=%u No access to node %u\n", pm_fn, nodeid); | |
+ gprintk(XENLOG_WARNING, "zynqmp-pm: fn=%u No access to node %u\n", | |
+ pm_fn, nodeid); | |
ret = XST_PM_NO_ACCESS; | |
goto done; | |
} | |
@@ -98,8 +98,8 @@ bool zynqmp_eemi(struct cpu_user_regs *regs) | |
case EEMI_FID(PM_RESET_GET_STATUS): | |
if ( !domain_has_reset_access(current->domain, nodeid) ) | |
{ | |
- gprintk(XENLOG_WARNING, | |
- "zynqmp-pm: fn=%u No access to reset %u\n", pm_fn, nodeid); | |
+ gprintk(XENLOG_WARNING, "zynqmp-pm: fn=%u No access to reset %u\n", | |
+ pm_fn, nodeid); | |
ret = XST_PM_NO_ACCESS; | |
goto done; | |
} | |
@@ -114,8 +114,8 @@ bool zynqmp_eemi(struct cpu_user_regs *regs) | |
/* No MMIO access is allowed from non-secure domains */ | |
case EEMI_FID(PM_MMIO_WRITE): | |
case EEMI_FID(PM_MMIO_READ): | |
- gprintk(XENLOG_WARNING, | |
- "zynqmp-pm: fn=%u No MMIO access to %u\n", pm_fn, nodeid); | |
+ gprintk(XENLOG_WARNING, "zynqmp-pm: fn=%u No MMIO access to %u\n", | |
+ pm_fn, nodeid); | |
ret = XST_PM_NO_ACCESS; | |
goto done; | |
@@ -187,15 +187,10 @@ forward_to_fw: | |
* can forward the whole command to firmware without additional | |
* parameters checks. | |
*/ | |
- arm_smccc_1_1_smc(get_user_reg(regs, 0), | |
- get_user_reg(regs, 1), | |
- get_user_reg(regs, 2), | |
- get_user_reg(regs, 3), | |
- get_user_reg(regs, 4), | |
- get_user_reg(regs, 5), | |
- get_user_reg(regs, 6), | |
- get_user_reg(regs, 7), | |
- &res); | |
+ arm_smccc_1_1_smc(get_user_reg(regs, 0), get_user_reg(regs, 1), | |
+ get_user_reg(regs, 2), get_user_reg(regs, 3), | |
+ get_user_reg(regs, 4), get_user_reg(regs, 5), | |
+ get_user_reg(regs, 6), get_user_reg(regs, 7), &res); | |
set_user_reg(regs, 0, res.a0); | |
set_user_reg(regs, 1, res.a1); | |
diff --git a/xen/arch/arm/platforms/xilinx-zynqmp.c b/xen/arch/arm/platforms/xilinx-zynqmp.c | |
index 3060d79b34..a302d7ce2b 100644 | |
--- a/xen/arch/arm/platforms/xilinx-zynqmp.c | |
+++ b/xen/arch/arm/platforms/xilinx-zynqmp.c | |
@@ -21,11 +21,8 @@ | |
#include <asm/platforms/xilinx-zynqmp-eemi.h> | |
#include <asm/smccc.h> | |
-static const char * const zynqmp_dt_compat[] __initconst = | |
-{ | |
- "xlnx,zynqmp", | |
- NULL | |
-}; | |
+static const char *const zynqmp_dt_compat[] __initconst = {"xlnx,zynqmp", | |
+ NULL}; | |
static bool zynqmp_smc(struct cpu_user_regs *regs) | |
{ | |
@@ -35,24 +32,24 @@ static bool zynqmp_smc(struct cpu_user_regs *regs) | |
*/ | |
if ( !cpus_have_const_cap(ARM_SMCCC_1_1) ) | |
{ | |
- printk_once(XENLOG_WARNING | |
- "ZynqMP firmware Error: no SMCCC 1.1 support. Disabling firmware calls\n"); | |
+ printk_once( | |
+ XENLOG_WARNING | |
+ "ZynqMP firmware Error: no SMCCC 1.1 support. Disabling firmware calls\n"); | |
return false; | |
} | |
return zynqmp_eemi(regs); | |
} | |
-PLATFORM_START(xilinx_zynqmp, "Xilinx ZynqMP") | |
- .compatible = zynqmp_dt_compat, | |
- .smc = zynqmp_smc, | |
-PLATFORM_END | |
+PLATFORM_START(xilinx_zynqmp, "Xilinx ZynqMP").compatible = zynqmp_dt_compat, | |
+ .smc = zynqmp_smc, | |
+ PLATFORM_END | |
-/* | |
- * Local variables: | |
- * mode: C | |
- * c-file-style: "BSD" | |
- * c-basic-offset: 4 | |
- * indent-tabs-mode: nil | |
- * End: | |
- */ | |
+ /* | |
+ * Local variables: | |
+ * mode: C | |
+ * c-file-style: "BSD" | |
+ * c-basic-offset: 4 | |
+ * indent-tabs-mode: nil | |
+ * End: | |
+ */ | |
diff --git a/xen/arch/arm/psci.c b/xen/arch/arm/psci.c | |
index 0c90c2305c..6fd410a58e 100644 | |
--- a/xen/arch/arm/psci.c | |
+++ b/xen/arch/arm/psci.c | |
@@ -17,7 +17,6 @@ | |
* GNU General Public License for more details. | |
*/ | |
- | |
#include <xen/types.h> | |
#include <xen/init.h> | |
#include <xen/mm.h> | |
@@ -33,9 +32,9 @@ | |
* (native-width) function ID. | |
*/ | |
#ifdef CONFIG_ARM_64 | |
-#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name | |
+#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name | |
#else | |
-#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN32_##name | |
+#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN32_##name | |
#endif | |
uint32_t psci_ver; | |
@@ -43,7 +42,7 @@ uint32_t smccc_ver; | |
static uint32_t psci_cpu_on_nr; | |
-#define PSCI_RET(res) ((int32_t)(res).a0) | |
+#define PSCI_RET(res) ((int32_t)(res).a0) | |
int call_psci_cpu_on(int cpu) | |
{ | |
@@ -170,11 +169,10 @@ static int __init psci_init_0_1(void) | |
static int __init psci_init_0_2(void) | |
{ | |
- static const struct dt_device_match psci_ids[] __initconst = | |
- { | |
+ static const struct dt_device_match psci_ids[] __initconst = { | |
DT_MATCH_COMPATIBLE("arm,psci-0.2"), | |
DT_MATCH_COMPATIBLE("arm,psci-1.0"), | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
int ret; | |
struct arm_smccc_res res; | |
@@ -193,7 +191,8 @@ static int __init psci_init_0_2(void) | |
} | |
else | |
{ | |
- if ( acpi_psci_hvc_present() ) { | |
+ if ( acpi_psci_hvc_present() ) | |
+ { | |
printk("PSCI conduit must be SMC, but is HVC\n"); | |
return -EINVAL; | |
} | |
@@ -231,8 +230,8 @@ int __init psci_init(void) | |
psci_init_smccc(); | |
- printk(XENLOG_INFO "Using PSCI v%u.%u\n", | |
- PSCI_VERSION_MAJOR(psci_ver), PSCI_VERSION_MINOR(psci_ver)); | |
+ printk(XENLOG_INFO "Using PSCI v%u.%u\n", PSCI_VERSION_MAJOR(psci_ver), | |
+ PSCI_VERSION_MINOR(psci_ver)); | |
return 0; | |
} | |
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c | |
index d5d188a105..f94d500ae2 100644 | |
--- a/xen/arch/arm/setup.c | |
+++ b/xen/arch/arm/setup.c | |
@@ -86,7 +86,7 @@ static void __init init_idle_domain(void) | |
/* TODO: setup_idle_pagetable(); */ | |
} | |
-static const char * __initdata processor_implementers[] = { | |
+static const char *__initdata processor_implementers[] = { | |
['A'] = "ARM Limited", | |
['B'] = "Broadcom Corporation", | |
['C'] = "Cavium Inc.", | |
@@ -114,31 +114,31 @@ static void __init processor_id(void) | |
printk("Huh, cpu architecture %x, expected 0xf (defined by cpuid)\n", | |
c->midr.architecture); | |
- printk("Processor: %08"PRIx32": \"%s\", variant: 0x%x, part 0x%03x, rev 0x%x\n", | |
- c->midr.bits, implementer, | |
- c->midr.variant, c->midr.part_number, c->midr.revision); | |
+ printk("Processor: %08" PRIx32 | |
+ ": \"%s\", variant: 0x%x, part 0x%03x, rev 0x%x\n", | |
+ c->midr.bits, implementer, c->midr.variant, c->midr.part_number, | |
+ c->midr.revision); | |
#if defined(CONFIG_ARM_64) | |
printk("64-bit Execution:\n"); | |
- printk(" Processor Features: %016"PRIx64" %016"PRIx64"\n", | |
+ printk(" Processor Features: %016" PRIx64 " %016" PRIx64 "\n", | |
boot_cpu_data.pfr64.bits[0], boot_cpu_data.pfr64.bits[1]); | |
printk(" Exception Levels: EL3:%s EL2:%s EL1:%s EL0:%s\n", | |
cpu_has_el3_32 ? "64+32" : cpu_has_el3_64 ? "64" : "No", | |
cpu_has_el2_32 ? "64+32" : cpu_has_el2_64 ? "64" : "No", | |
cpu_has_el1_32 ? "64+32" : cpu_has_el1_64 ? "64" : "No", | |
cpu_has_el0_32 ? "64+32" : cpu_has_el0_64 ? "64" : "No"); | |
- printk(" Extensions:%s%s%s\n", | |
- cpu_has_fp ? " FloatingPoint" : "", | |
+ printk(" Extensions:%s%s%s\n", cpu_has_fp ? " FloatingPoint" : "", | |
cpu_has_simd ? " AdvancedSIMD" : "", | |
cpu_has_gicv3 ? " GICv3-SysReg" : ""); | |
- printk(" Debug Features: %016"PRIx64" %016"PRIx64"\n", | |
+ printk(" Debug Features: %016" PRIx64 " %016" PRIx64 "\n", | |
boot_cpu_data.dbg64.bits[0], boot_cpu_data.dbg64.bits[1]); | |
- printk(" Auxiliary Features: %016"PRIx64" %016"PRIx64"\n", | |
+ printk(" Auxiliary Features: %016" PRIx64 " %016" PRIx64 "\n", | |
boot_cpu_data.aux64.bits[0], boot_cpu_data.aux64.bits[1]); | |
- printk(" Memory Model Features: %016"PRIx64" %016"PRIx64"\n", | |
+ printk(" Memory Model Features: %016" PRIx64 " %016" PRIx64 "\n", | |
boot_cpu_data.mm64.bits[0], boot_cpu_data.mm64.bits[1]); | |
- printk(" ISA Features: %016"PRIx64" %016"PRIx64"\n", | |
+ printk(" ISA Features: %016" PRIx64 " %016" PRIx64 "\n", | |
boot_cpu_data.isa64.bits[0], boot_cpu_data.isa64.bits[1]); | |
#endif | |
@@ -149,25 +149,23 @@ static void __init processor_id(void) | |
if ( cpu_has_aarch32 ) | |
{ | |
printk("32-bit Execution:\n"); | |
- printk(" Processor Features: %08"PRIx32":%08"PRIx32"\n", | |
+ printk(" Processor Features: %08" PRIx32 ":%08" PRIx32 "\n", | |
boot_cpu_data.pfr32.bits[0], boot_cpu_data.pfr32.bits[1]); | |
printk(" Instruction Sets:%s%s%s%s%s%s\n", | |
- cpu_has_aarch32 ? " AArch32" : "", | |
- cpu_has_arm ? " A32" : "", | |
- cpu_has_thumb ? " Thumb" : "", | |
- cpu_has_thumb2 ? " Thumb-2" : "", | |
+ cpu_has_aarch32 ? " AArch32" : "", cpu_has_arm ? " A32" : "", | |
+ cpu_has_thumb ? " Thumb" : "", cpu_has_thumb2 ? " Thumb-2" : "", | |
cpu_has_thumbee ? " ThumbEE" : "", | |
cpu_has_jazelle ? " Jazelle" : ""); | |
printk(" Extensions:%s%s\n", | |
cpu_has_gentimer ? " GenericTimer" : "", | |
cpu_has_security ? " Security" : ""); | |
- printk(" Debug Features: %08"PRIx32"\n", | |
+ printk(" Debug Features: %08" PRIx32 "\n", | |
boot_cpu_data.dbg32.bits[0]); | |
- printk(" Auxiliary Features: %08"PRIx32"\n", | |
+ printk(" Auxiliary Features: %08" PRIx32 "\n", | |
boot_cpu_data.aux32.bits[0]); | |
printk(" Memory Model Features: " | |
- "%08"PRIx32" %08"PRIx32" %08"PRIx32" %08"PRIx32"\n", | |
+ "%08" PRIx32 " %08" PRIx32 " %08" PRIx32 " %08" PRIx32 "\n", | |
boot_cpu_data.mm32.bits[0], boot_cpu_data.mm32.bits[1], | |
boot_cpu_data.mm32.bits[2], boot_cpu_data.mm32.bits[3]); | |
printk(" ISA Features: %08x %08x %08x %08x %08x %08x\n", | |
@@ -188,11 +186,11 @@ void __init dt_unreserved_regions(paddr_t s, paddr_t e, | |
{ | |
int i, nr = fdt_num_mem_rsv(device_tree_flattened); | |
- for ( i = first; i < nr ; i++ ) | |
+ for ( i = first; i < nr; i++ ) | |
{ | |
paddr_t r_s, r_e; | |
- if ( fdt_get_mem_rsv(device_tree_flattened, i, &r_s, &r_e ) < 0 ) | |
+ if ( fdt_get_mem_rsv(device_tree_flattened, i, &r_s, &r_e) < 0 ) | |
/* If we can't read it, pretend it doesn't exist... */ | |
continue; | |
@@ -200,8 +198,8 @@ void __init dt_unreserved_regions(paddr_t s, paddr_t e, | |
if ( s < r_e && r_s < e ) | |
{ | |
- dt_unreserved_regions(r_e, e, cb, i+1); | |
- dt_unreserved_regions(s, r_s, cb, i+1); | |
+ dt_unreserved_regions(r_e, e, cb, i + 1); | |
+ dt_unreserved_regions(s, r_s, cb, i + 1); | |
return; | |
} | |
} | |
@@ -209,9 +207,8 @@ void __init dt_unreserved_regions(paddr_t s, paddr_t e, | |
cb(s, e); | |
} | |
-struct bootmodule __init *add_boot_module(bootmodule_kind kind, | |
- paddr_t start, paddr_t size, | |
- bool domU) | |
+struct bootmodule __init *add_boot_module(bootmodule_kind kind, paddr_t start, | |
+ paddr_t size, bool domU) | |
{ | |
struct bootmodules *mods = &bootinfo.modules; | |
struct bootmodule *mod; | |
@@ -219,11 +216,12 @@ struct bootmodule __init *add_boot_module(bootmodule_kind kind, | |
if ( mods->nr_mods == MAX_MODULES ) | |
{ | |
- printk("Ignoring %s boot module at %"PRIpaddr"-%"PRIpaddr" (too many)\n", | |
+ printk("Ignoring %s boot module at %" PRIpaddr "-%" PRIpaddr | |
+ " (too many)\n", | |
boot_module_kind_as_string(kind), start, start + size); | |
return NULL; | |
} | |
- for ( i = 0 ; i < mods->nr_mods ; i++ ) | |
+ for ( i = 0; i < mods->nr_mods; i++ ) | |
{ | |
mod = &mods->module[i]; | |
if ( mod->kind == kind && mod->start == start ) | |
@@ -248,12 +246,12 @@ struct bootmodule __init *add_boot_module(bootmodule_kind kind, | |
* XSM, DTB) or Dom0 modules. This is not suitable for looking up guest | |
* modules. | |
*/ | |
-struct bootmodule * __init boot_module_find_by_kind(bootmodule_kind kind) | |
+struct bootmodule *__init boot_module_find_by_kind(bootmodule_kind kind) | |
{ | |
struct bootmodules *mods = &bootinfo.modules; | |
struct bootmodule *mod; | |
int i; | |
- for (i = 0 ; i < mods->nr_mods ; i++ ) | |
+ for ( i = 0; i < mods->nr_mods; i++ ) | |
{ | |
mod = &mods->module[i]; | |
if ( mod->kind == kind && !mod->domU ) | |
@@ -292,13 +290,13 @@ void __init add_boot_cmdline(const char *name, const char *cmdline, | |
* XSM, DTB) or Dom0 modules. This is not suitable for looking up guest | |
* modules. | |
*/ | |
-struct bootcmdline * __init boot_cmdline_find_by_kind(bootmodule_kind kind) | |
+struct bootcmdline *__init boot_cmdline_find_by_kind(bootmodule_kind kind) | |
{ | |
struct bootcmdlines *cmds = &bootinfo.cmdlines; | |
struct bootcmdline *cmd; | |
int i; | |
- for ( i = 0 ; i < cmds->nr_mods ; i++ ) | |
+ for ( i = 0; i < cmds->nr_mods; i++ ) | |
{ | |
cmd = &cmds->cmdline[i]; | |
if ( cmd->kind == kind && !cmd->domU ) | |
@@ -307,13 +305,13 @@ struct bootcmdline * __init boot_cmdline_find_by_kind(bootmodule_kind kind) | |
return NULL; | |
} | |
-struct bootcmdline * __init boot_cmdline_find_by_name(const char *name) | |
+struct bootcmdline *__init boot_cmdline_find_by_name(const char *name) | |
{ | |
struct bootcmdlines *mods = &bootinfo.cmdlines; | |
struct bootcmdline *mod; | |
unsigned int i; | |
- for (i = 0 ; i < mods->nr_mods ; i++ ) | |
+ for ( i = 0; i < mods->nr_mods; i++ ) | |
{ | |
mod = &mods->cmdline[i]; | |
if ( strcmp(mod->dt_name, name) == 0 ) | |
@@ -322,14 +320,14 @@ struct bootcmdline * __init boot_cmdline_find_by_name(const char *name) | |
return NULL; | |
} | |
-struct bootmodule * __init boot_module_find_by_addr_and_kind(bootmodule_kind kind, | |
- paddr_t start) | |
+struct bootmodule *__init | |
+boot_module_find_by_addr_and_kind(bootmodule_kind kind, paddr_t start) | |
{ | |
struct bootmodules *mods = &bootinfo.modules; | |
struct bootmodule *mod; | |
unsigned int i; | |
- for (i = 0 ; i < mods->nr_mods ; i++ ) | |
+ for ( i = 0; i < mods->nr_mods; i++ ) | |
{ | |
mod = &mods->module[i]; | |
if ( mod->kind == kind && mod->start == start ) | |
@@ -338,17 +336,24 @@ struct bootmodule * __init boot_module_find_by_addr_and_kind(bootmodule_kind kin | |
return NULL; | |
} | |
-const char * __init boot_module_kind_as_string(bootmodule_kind kind) | |
+const char *__init boot_module_kind_as_string(bootmodule_kind kind) | |
{ | |
- switch ( kind ) | |
+ switch (kind) | |
{ | |
- case BOOTMOD_XEN: return "Xen"; | |
- case BOOTMOD_FDT: return "Device Tree"; | |
- case BOOTMOD_KERNEL: return "Kernel"; | |
- case BOOTMOD_RAMDISK: return "Ramdisk"; | |
- case BOOTMOD_XSM: return "XSM"; | |
- case BOOTMOD_UNKNOWN: return "Unknown"; | |
- default: BUG(); | |
+ case BOOTMOD_XEN: | |
+ return "Xen"; | |
+ case BOOTMOD_FDT: | |
+ return "Device Tree"; | |
+ case BOOTMOD_KERNEL: | |
+ return "Kernel"; | |
+ case BOOTMOD_RAMDISK: | |
+ return "Ramdisk"; | |
+ case BOOTMOD_XSM: | |
+ return "XSM"; | |
+ case BOOTMOD_UNKNOWN: | |
+ return "Unknown"; | |
+ default: | |
+ BUG(); | |
} | |
} | |
@@ -365,8 +370,7 @@ void __init discard_initial_modules(void) | |
if ( mi->module[i].kind == BOOTMOD_XEN ) | |
continue; | |
- if ( !mfn_valid(maddr_to_mfn(s)) || | |
- !mfn_valid(maddr_to_mfn(e)) ) | |
+ if ( !mfn_valid(maddr_to_mfn(s)) || !mfn_valid(maddr_to_mfn(e)) ) | |
continue; | |
dt_unreserved_regions(s, e, init_domheap_pages, 0); | |
@@ -386,18 +390,17 @@ void __init discard_initial_modules(void) | |
* For non-recursive callers first_mod should normally be 0 (all | |
* modules and Xen itself) or 1 (all modules but not Xen). | |
*/ | |
-static paddr_t __init consider_modules(paddr_t s, paddr_t e, | |
- uint32_t size, paddr_t align, | |
- int first_mod) | |
+static paddr_t __init consider_modules(paddr_t s, paddr_t e, uint32_t size, | |
+ paddr_t align, int first_mod) | |
{ | |
const struct bootmodules *mi = &bootinfo.modules; | |
int i; | |
int nr_rsvd; | |
- s = (s+align-1) & ~(align-1); | |
- e = e & ~(align-1); | |
+ s = (s + align - 1) & ~(align - 1); | |
+ e = e & ~(align - 1); | |
- if ( s > e || e - s < size ) | |
+ if ( s > e || e - s < size ) | |
return 0; | |
/* First check the boot modules */ | |
@@ -408,11 +411,11 @@ static paddr_t __init consider_modules(paddr_t s, paddr_t e, | |
if ( s < mod_e && mod_s < e ) | |
{ | |
- mod_e = consider_modules(mod_e, e, size, align, i+1); | |
+ mod_e = consider_modules(mod_e, e, size, align, i + 1); | |
if ( mod_e ) | |
return mod_e; | |
- return consider_modules(s, mod_s, size, align, i+1); | |
+ return consider_modules(s, mod_s, size, align, i + 1); | |
} | |
} | |
@@ -424,9 +427,8 @@ static paddr_t __init consider_modules(paddr_t s, paddr_t e, | |
{ | |
paddr_t mod_s, mod_e; | |
- if ( fdt_get_mem_rsv(device_tree_flattened, | |
- i - mi->nr_mods, | |
- &mod_s, &mod_e ) < 0 ) | |
+ if ( fdt_get_mem_rsv(device_tree_flattened, i - mi->nr_mods, &mod_s, | |
+ &mod_e) < 0 ) | |
/* If we can't read it, pretend it doesn't exist... */ | |
continue; | |
@@ -435,11 +437,11 @@ static paddr_t __init consider_modules(paddr_t s, paddr_t e, | |
if ( s < mod_e && mod_s < e ) | |
{ | |
- mod_e = consider_modules(mod_e, e, size, align, i+1); | |
+ mod_e = consider_modules(mod_e, e, size, align, i + 1); | |
if ( mod_e ) | |
return mod_e; | |
- return consider_modules(s, mod_s, size, align, i+1); | |
+ return consider_modules(s, mod_s, size, align, i + 1); | |
} | |
} | |
return e; | |
@@ -487,7 +489,7 @@ static void __init init_pdx(void) | |
uint64_t mask = pdx_init_mask(bootinfo.mem.bank[0].start); | |
int bank; | |
- for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ ) | |
+ for ( bank = 0; bank < bootinfo.mem.nr_banks; bank++ ) | |
{ | |
bank_start = bootinfo.mem.bank[bank].start; | |
bank_size = bootinfo.mem.bank[bank].size; | |
@@ -495,25 +497,24 @@ static void __init init_pdx(void) | |
mask |= bank_start | pdx_region_mask(bank_start, bank_size); | |
} | |
- for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ ) | |
+ for ( bank = 0; bank < bootinfo.mem.nr_banks; bank++ ) | |
{ | |
bank_start = bootinfo.mem.bank[bank].start; | |
bank_size = bootinfo.mem.bank[bank].size; | |
- if (~mask & pdx_region_mask(bank_start, bank_size)) | |
+ if ( ~mask & pdx_region_mask(bank_start, bank_size) ) | |
mask = 0; | |
} | |
pfn_pdx_hole_setup(mask >> PAGE_SHIFT); | |
- for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ ) | |
+ for ( bank = 0; bank < bootinfo.mem.nr_banks; bank++ ) | |
{ | |
bank_start = bootinfo.mem.bank[bank].start; | |
bank_size = bootinfo.mem.bank[bank].size; | |
bank_end = bank_start + bank_size; | |
- set_pdx_range(paddr_to_pfn(bank_start), | |
- paddr_to_pfn(bank_end)); | |
+ set_pdx_range(paddr_to_pfn(bank_start), paddr_to_pfn(bank_end)); | |
} | |
} | |
@@ -540,8 +541,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
init_pdx(); | |
ram_start = bootinfo.mem.bank[0].start; | |
- ram_size = bootinfo.mem.bank[0].size; | |
- ram_end = ram_start + ram_size; | |
+ ram_size = bootinfo.mem.bank[0].size; | |
+ ram_end = ram_start + ram_size; | |
for ( i = 1; i < bootinfo.mem.nr_banks; i++ ) | |
{ | |
@@ -549,9 +550,9 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
paddr_t bank_size = bootinfo.mem.bank[i].size; | |
paddr_t bank_end = bank_start + bank_size; | |
- ram_size = ram_size + bank_size; | |
- ram_start = min(ram_start,bank_start); | |
- ram_end = max(ram_end,bank_end); | |
+ ram_size = ram_size + bank_size; | |
+ ram_start = min(ram_start, bank_start); | |
+ ram_end = max(ram_end, bank_end); | |
} | |
total_pages = ram_pages = ram_size >> PAGE_SHIFT; | |
@@ -570,31 +571,30 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
*/ | |
heap_pages = ram_pages; | |
if ( opt_xenheap_megabytes ) | |
- xenheap_pages = opt_xenheap_megabytes << (20-PAGE_SHIFT); | |
+ xenheap_pages = opt_xenheap_megabytes << (20 - PAGE_SHIFT); | |
else | |
{ | |
- xenheap_pages = (heap_pages/32 + 0x1fffUL) & ~0x1fffUL; | |
- xenheap_pages = max(xenheap_pages, 32UL<<(20-PAGE_SHIFT)); | |
- xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT)); | |
+ xenheap_pages = (heap_pages / 32 + 0x1fffUL) & ~0x1fffUL; | |
+ xenheap_pages = max(xenheap_pages, 32UL << (20 - PAGE_SHIFT)); | |
+ xenheap_pages = min(xenheap_pages, 1UL << (30 - PAGE_SHIFT)); | |
} | |
- do | |
- { | |
- e = consider_modules(ram_start, ram_end, | |
- pfn_to_paddr(xenheap_pages), | |
- 32<<20, 0); | |
+ do { | |
+ e = consider_modules(ram_start, ram_end, pfn_to_paddr(xenheap_pages), | |
+ 32 << 20, 0); | |
if ( e ) | |
break; | |
xenheap_pages >>= 1; | |
- } while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) ); | |
+ } while ( !opt_xenheap_megabytes && | |
+ xenheap_pages > 32 << (20 - PAGE_SHIFT) ); | |
- if ( ! e ) | |
+ if ( !e ) | |
panic("Not not enough space for xenheap\n"); | |
domheap_pages = heap_pages - xenheap_pages; | |
- printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages%s)\n", | |
+ printk("Xen heap: %" PRIpaddr "-%" PRIpaddr " (%lu pages%s)\n", | |
e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages, | |
opt_xenheap_megabytes ? ", from command-line" : ""); | |
printk("Dom heap: %lu pages\n", domheap_pages); | |
@@ -605,7 +605,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
* Need a single mapped page for populating bootmem_region_list | |
* and enough mapped pages for copying the DTB. | |
*/ | |
- dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT; | |
+ dtb_pages = (dtb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
boot_mfn_start = mfn_x(xenheap_mfn_end) - dtb_pages - 1; | |
boot_mfn_end = mfn_x(xenheap_mfn_end); | |
@@ -642,8 +642,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
e = bank_end; | |
/* Avoid the xenheap */ | |
- if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages)) | |
- && mfn_to_maddr(xenheap_mfn_start) < e ) | |
+ if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages)) && | |
+ mfn_to_maddr(xenheap_mfn_start) < e ) | |
{ | |
e = mfn_to_maddr(xenheap_mfn_start); | |
n = mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages)); | |
@@ -677,7 +677,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
init_pdx(); | |
total_pages = 0; | |
- for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ ) | |
+ for ( bank = 0; bank < bootinfo.mem.nr_banks; bank++ ) | |
{ | |
paddr_t bank_start = bootinfo.mem.bank[bank].start; | |
paddr_t bank_size = bootinfo.mem.bank[bank].size; | |
@@ -685,10 +685,11 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
paddr_t s, e; | |
ram_size = ram_size + bank_size; | |
- ram_start = min(ram_start,bank_start); | |
- ram_end = max(ram_end,bank_end); | |
+ ram_start = min(ram_start, bank_start); | |
+ ram_end = max(ram_end, bank_end); | |
- setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT); | |
+ setup_xenheap_mappings(bank_start >> PAGE_SHIFT, | |
+ bank_size >> PAGE_SHIFT); | |
s = bank_start; | |
while ( s < bank_end ) | |
@@ -719,7 +720,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
/* | |
* Need enough mapped pages for copying the DTB. | |
*/ | |
- dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT; | |
+ dtb_pages = (dtb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
/* Copy the DTB. */ | |
fdt = mfn_to_virt(mfn_x(alloc_boot_pages(dtb_pages, 1))); | |
@@ -734,8 +735,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) | |
size_t __read_mostly dcache_line_bytes; | |
/* C entry point for boot CPU */ | |
-void __init start_xen(unsigned long boot_phys_offset, | |
- unsigned long fdt_paddr) | |
+void __init start_xen(unsigned long boot_phys_offset, unsigned long fdt_paddr) | |
{ | |
size_t fdt_size; | |
int cpus, i; | |
@@ -758,7 +758,8 @@ void __init start_xen(unsigned long boot_phys_offset, | |
idle_vcpu[0] = current; | |
setup_virtual_regions(NULL, NULL); | |
- /* Initialize traps early allow us to get backtrace when an error occurred */ | |
+ /* Initialize traps early allow us to get backtrace when an error occurred | |
+ */ | |
init_traps(); | |
setup_pagetables(boot_phys_offset); | |
@@ -767,10 +768,11 @@ void __init start_xen(unsigned long boot_phys_offset, | |
device_tree_flattened = early_fdt_map(fdt_paddr); | |
if ( !device_tree_flattened ) | |
- panic("Invalid device tree blob at physical address %#lx.\n" | |
- "The DTB must be 8-byte aligned and must not exceed 2 MB in size.\n\n" | |
- "Please check your bootloader.\n", | |
- fdt_paddr); | |
+ panic( | |
+ "Invalid device tree blob at physical address %#lx.\n" | |
+ "The DTB must be 8-byte aligned and must not exceed 2 MB in size.\n\n" | |
+ "Please check your bootloader.\n", | |
+ fdt_paddr); | |
fdt_size = boot_fdt_info(device_tree_flattened, fdt_paddr); | |
@@ -779,9 +781,9 @@ void __init start_xen(unsigned long boot_phys_offset, | |
cmdline_parse(cmdline); | |
/* Register Xen's load address as a boot module. */ | |
- xen_bootmodule = add_boot_module(BOOTMOD_XEN, | |
- (paddr_t)(uintptr_t)(_start + boot_phys_offset), | |
- (paddr_t)(uintptr_t)(_end - _start + 1), false); | |
+ xen_bootmodule = add_boot_module( | |
+ BOOTMOD_XEN, (paddr_t)(uintptr_t)(_start + boot_phys_offset), | |
+ (paddr_t)(uintptr_t)(_end - _start + 1), false); | |
BUG_ON(!xen_bootmodule); | |
setup_mm(fdt_paddr, fdt_size); | |
@@ -868,7 +870,7 @@ void __init start_xen(unsigned long boot_phys_offset, | |
do_presmp_initcalls(); | |
- for_each_present_cpu ( i ) | |
+ for_each_present_cpu (i) | |
{ | |
if ( (num_online_cpus() < cpus) && !cpu_online(i) ) | |
{ | |
@@ -901,7 +903,7 @@ void __init start_xen(unsigned long boot_phys_offset, | |
* Xen vGIC supports a maximum of 992 interrupt lines. | |
* 32 are substracted to cover local IRQs. | |
*/ | |
- dom0_cfg.arch.nr_spis = min(gic_number_lines(), (unsigned int) 992) - 32; | |
+ dom0_cfg.arch.nr_spis = min(gic_number_lines(), (unsigned int)992) - 32; | |
if ( gic_number_lines() > 992 ) | |
printk(XENLOG_WARNING "Maximum number of vGIC IRQs exceeded.\n"); | |
dom0_cfg.arch.tee_type = tee_get_type(); | |
@@ -911,7 +913,7 @@ void __init start_xen(unsigned long boot_phys_offset, | |
if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) ) | |
panic("Error creating domain 0\n"); | |
- if ( construct_dom0(dom0) != 0) | |
+ if ( construct_dom0(dom0) != 0 ) | |
panic("Could not set up DOM0 guest OS\n"); | |
heap_init_late(); | |
diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c | |
index ce1fcc8ef9..f35cebe111 100644 | |
--- a/xen/arch/arm/smp.c | |
+++ b/xen/arch/arm/smp.c | |
@@ -7,7 +7,8 @@ | |
void flush_tlb_mask(const cpumask_t *mask) | |
{ | |
- /* No need to IPI other processors on ARM, the processor takes care of it. */ | |
+ /* No need to IPI other processors on ARM, the processor takes care of it. | |
+ */ | |
flush_all_guests_tlb(); | |
} | |
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c | |
index 00b64c3322..b9064d905a 100644 | |
--- a/xen/arch/arm/smpboot.c | |
+++ b/xen/arch/arm/smpboot.c | |
@@ -43,18 +43,17 @@ cpumask_t cpu_possible_map; | |
struct cpuinfo_arm cpu_data[NR_CPUS]; | |
/* CPU logical map: map xen cpuid to an MPIDR */ | |
-register_t __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; | |
+register_t __cpu_logical_map[NR_CPUS] = {[0 ... NR_CPUS - 1] = MPIDR_INVALID}; | |
/* Fake one node for now. See also include/asm-arm/numa.h */ | |
-nodemask_t __read_mostly node_online_map = { { [0] = 1UL } }; | |
+nodemask_t __read_mostly node_online_map = {{[0] = 1UL}}; | |
/* Xen stack for bringing up the first CPU. */ | |
static unsigned char __initdata cpu0_boot_stack[STACK_SIZE] | |
- __attribute__((__aligned__(STACK_SIZE))); | |
+ __attribute__((__aligned__(STACK_SIZE))); | |
/* Boot cpu data */ | |
-struct init_info init_data = | |
-{ | |
+struct init_info init_data = { | |
.stack = cpu0_boot_stack, | |
}; | |
@@ -95,8 +94,7 @@ static void remove_cpu_sibling_map(int cpu) | |
free_cpumask_var(per_cpu(cpu_core_mask, cpu)); | |
} | |
-void __init | |
-smp_clear_cpu_maps (void) | |
+void __init smp_clear_cpu_maps(void) | |
{ | |
cpumask_clear(&cpu_possible_map); | |
cpumask_clear(&cpu_online_map); | |
@@ -116,10 +114,8 @@ static void __init dt_smp_init_cpus(void) | |
struct dt_device_node *cpu; | |
unsigned int i, j; | |
unsigned int cpuidx = 1; | |
- static register_t tmp_map[NR_CPUS] __initdata = | |
- { | |
- [0 ... NR_CPUS - 1] = MPIDR_INVALID | |
- }; | |
+ static register_t tmp_map[NR_CPUS] __initdata = {[0 ... NR_CPUS - 1] = | |
+ MPIDR_INVALID}; | |
bool bootcpu_valid = false; | |
int rc; | |
@@ -128,11 +124,11 @@ static void __init dt_smp_init_cpus(void) | |
if ( !cpus ) | |
{ | |
printk(XENLOG_WARNING "WARNING: Can't find /cpus in the device tree.\n" | |
- "Using only 1 CPU\n"); | |
+ "Using only 1 CPU\n"); | |
return; | |
} | |
- dt_for_each_child_node( cpus, cpu ) | |
+ dt_for_each_child_node(cpus, cpu) | |
{ | |
const __be32 *prop; | |
u64 addr; | |
@@ -166,7 +162,7 @@ static void __init dt_smp_init_cpus(void) | |
hwid = addr; | |
if ( hwid != addr ) | |
{ | |
- printk(XENLOG_WARNING "cpu node `%s`: hwid overflow %"PRIx64"\n", | |
+ printk(XENLOG_WARNING "cpu node `%s`: hwid overflow %" PRIx64 "\n", | |
dt_node_full_name(cpu), addr); | |
continue; | |
} | |
@@ -177,7 +173,8 @@ static void __init dt_smp_init_cpus(void) | |
*/ | |
if ( hwid & ~MPIDR_HWID_MASK ) | |
{ | |
- printk(XENLOG_WARNING "cpu node `%s`: invalid hwid value (0x%"PRIregister")\n", | |
+ printk(XENLOG_WARNING | |
+ "cpu node `%s`: invalid hwid value (0x%" PRIregister ")\n", | |
dt_node_full_name(cpu), hwid); | |
continue; | |
} | |
@@ -192,9 +189,11 @@ static void __init dt_smp_init_cpus(void) | |
{ | |
if ( tmp_map[j] == hwid ) | |
{ | |
- printk(XENLOG_WARNING | |
- "cpu node `%s`: duplicate /cpu reg properties %"PRIregister" in the DT\n", | |
- dt_node_full_name(cpu), hwid); | |
+ printk( | |
+ XENLOG_WARNING | |
+ "cpu node `%s`: duplicate /cpu reg properties %" PRIregister | |
+ " in the DT\n", | |
+ dt_node_full_name(cpu), hwid); | |
break; | |
} | |
} | |
@@ -228,7 +227,8 @@ static void __init dt_smp_init_cpus(void) | |
if ( (rc = arch_cpu_init(i, cpu)) < 0 ) | |
{ | |
- printk("cpu%d init failed (hwid %"PRIregister"): %d\n", i, hwid, rc); | |
+ printk("cpu%d init failed (hwid %" PRIregister "): %d\n", i, hwid, | |
+ rc); | |
tmp_map[i] = MPIDR_INVALID; | |
} | |
else | |
@@ -238,7 +238,7 @@ static void __init dt_smp_init_cpus(void) | |
if ( !bootcpu_valid ) | |
{ | |
printk(XENLOG_WARNING "DT missing boot CPU MPIDR[23:0]\n" | |
- "Using only 1 CPU\n"); | |
+ "Using only 1 CPU\n"); | |
return; | |
} | |
@@ -261,7 +261,8 @@ void __init smp_init_cpus(void) | |
if ( (rc = arch_smp_init()) < 0 ) | |
{ | |
printk(XENLOG_WARNING "SMP init failed (%d)\n" | |
- "Using only 1 CPU\n", rc); | |
+ "Using only 1 CPU\n", | |
+ rc); | |
return; | |
} | |
@@ -271,13 +272,13 @@ void __init smp_init_cpus(void) | |
acpi_smp_init_cpus(); | |
if ( opt_hmp_unsafe ) | |
- warning_add("WARNING: HMP COMPUTING HAS BEEN ENABLED.\n" | |
- "It has implications on the security and stability of the system,\n" | |
- "unless the cpu affinity of all domains is specified.\n"); | |
+ warning_add( | |
+ "WARNING: HMP COMPUTING HAS BEEN ENABLED.\n" | |
+ "It has implications on the security and stability of the system,\n" | |
+ "unless the cpu affinity of all domains is specified.\n"); | |
} | |
-int __init | |
-smp_get_max_cpus (void) | |
+int __init smp_get_max_cpus(void) | |
{ | |
int i, max_cpus = 0; | |
@@ -288,8 +289,7 @@ smp_get_max_cpus (void) | |
return max_cpus; | |
} | |
-void __init | |
-smp_prepare_cpus(void) | |
+void __init smp_prepare_cpus(void) | |
{ | |
cpumask_copy(&cpu_present_map, &cpu_possible_map); | |
@@ -301,7 +301,7 @@ void start_secondary(void) | |
{ | |
unsigned int cpuid = init_data.cpuid; | |
- memset(get_cpu_info(), 0, sizeof (struct cpu_info)); | |
+ memset(get_cpu_info(), 0, sizeof(struct cpu_info)); | |
set_processor_id(cpuid); | |
@@ -320,7 +320,8 @@ void start_secondary(void) | |
if ( !opt_hmp_unsafe && | |
current_cpu_data.midr.bits != boot_cpu_data.midr.bits ) | |
{ | |
- printk(XENLOG_ERR "CPU%u MIDR (0x%x) does not match boot CPU MIDR (0x%x),\n" | |
+ printk(XENLOG_ERR | |
+ "CPU%u MIDR (0x%x) does not match boot CPU MIDR (0x%x),\n" | |
"disable cpu (see big.LITTLE.txt under docs/).\n", | |
smp_processor_id(), current_cpu_data.midr.bits, | |
boot_cpu_data.midr.bits); | |
@@ -329,9 +330,10 @@ void start_secondary(void) | |
if ( dcache_line_bytes != read_dcache_line_bytes() ) | |
{ | |
- printk(XENLOG_ERR "CPU%u dcache line size (%zu) does not match the boot CPU (%zu)\n", | |
- smp_processor_id(), read_dcache_line_bytes(), | |
- dcache_line_bytes); | |
+ printk( | |
+ XENLOG_ERR | |
+ "CPU%u dcache line size (%zu) does not match the boot CPU (%zu)\n", | |
+ smp_processor_id(), read_dcache_line_bytes(), dcache_line_bytes); | |
stop_cpu(); | |
} | |
@@ -386,7 +388,7 @@ void __cpu_disable(void) | |
smp_mb(); | |
- /* Return to caller; eventually the IPI mechanism will unwind and the | |
+ /* Return to caller; eventually the IPI mechanism will unwind and the | |
* scheduler will drop to the idle loop, which will call stop_cpu(). */ | |
} | |
@@ -502,12 +504,11 @@ void __cpu_die(unsigned int cpu) | |
} | |
static int cpu_smpboot_callback(struct notifier_block *nfb, | |
- unsigned long action, | |
- void *hcpu) | |
+ unsigned long action, void *hcpu) | |
{ | |
unsigned int cpu = (unsigned long)hcpu; | |
- switch ( action ) | |
+ switch (action) | |
{ | |
case CPU_DEAD: | |
remove_cpu_sibling_map(cpu); | |
diff --git a/xen/arch/arm/sysctl.c b/xen/arch/arm/sysctl.c | |
index fbfdb44eff..3a950ef827 100644 | |
--- a/xen/arch/arm/sysctl.c | |
+++ b/xen/arch/arm/sysctl.c | |
@@ -12,7 +12,9 @@ | |
#include <xen/hypercall.h> | |
#include <public/sysctl.h> | |
-void arch_do_physinfo(struct xen_sysctl_physinfo *pi) { } | |
+void arch_do_physinfo(struct xen_sysctl_physinfo *pi) | |
+{ | |
+} | |
long arch_do_sysctl(struct xen_sysctl *sysctl, | |
XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) | |
diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c | |
index ec5402e89b..09f80f93c1 100644 | |
--- a/xen/arch/arm/tee/optee.c | |
+++ b/xen/arch/arm/tee/optee.c | |
@@ -39,7 +39,7 @@ | |
#include <asm/tee/optee_rpc_cmd.h> | |
/* Number of SMCs known to the mediator */ | |
-#define OPTEE_MEDIATOR_SMC_COUNT 11 | |
+#define OPTEE_MEDIATOR_SMC_COUNT 11 | |
/* | |
* "The return code is an error that originated within the underlying | |
@@ -70,12 +70,12 @@ | |
* assumptions about OP-TEE heap usage, we limit number of pages | |
* arbitrary. | |
*/ | |
-#define MAX_TOTAL_SMH_BUF_PG 16384 | |
+#define MAX_TOTAL_SMH_BUF_PG 16384 | |
#define OPTEE_KNOWN_NSEC_CAPS OPTEE_SMC_NSEC_CAP_UNIPROCESSOR | |
-#define OPTEE_KNOWN_SEC_CAPS (OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM | \ | |
- OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM | \ | |
- OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) | |
+#define OPTEE_KNOWN_SEC_CAPS \ | |
+ (OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM | \ | |
+ OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM | OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) | |
static unsigned int __read_mostly max_optee_threads; | |
@@ -83,7 +83,8 @@ static unsigned int __read_mostly max_optee_threads; | |
* Call context. OP-TEE can issue multiple RPC returns during one call. | |
* We need to preserve context during them. | |
*/ | |
-struct optee_std_call { | |
+struct optee_std_call | |
+{ | |
struct list_head list; | |
/* Page where shadowed copy of call arguments is stored */ | |
struct page_info *xen_arg_pg; | |
@@ -99,7 +100,8 @@ struct optee_std_call { | |
}; | |
/* Pre-allocated SHM buffer for RPC commands */ | |
-struct shm_rpc { | |
+struct shm_rpc | |
+{ | |
struct list_head list; | |
struct page_info *guest_page; | |
struct page_info *xen_arg_pg; | |
@@ -109,7 +111,8 @@ struct shm_rpc { | |
}; | |
/* Shared memory buffer for arbitrary data */ | |
-struct optee_shm_buf { | |
+struct optee_shm_buf | |
+{ | |
struct list_head list; | |
uint64_t cookie; | |
unsigned int page_cnt; | |
@@ -127,7 +130,8 @@ struct optee_shm_buf { | |
}; | |
/* Domain context */ | |
-struct optee_domain { | |
+struct optee_domain | |
+{ | |
struct list_head call_list; | |
struct list_head shm_rpc_list; | |
struct list_head optee_shm_buf_list; | |
@@ -207,7 +211,8 @@ static int optee_domain_init(struct domain *d) | |
&resp); | |
if ( resp.a0 != OPTEE_SMC_RETURN_OK ) | |
{ | |
- printk(XENLOG_WARNING "%pd: Unable to create OPTEE client: rc = 0x%X\n", | |
+ printk(XENLOG_WARNING | |
+ "%pd: Unable to create OPTEE client: rc = 0x%X\n", | |
d, (uint32_t)resp.a0); | |
xfree(ctx); | |
@@ -326,14 +331,15 @@ static struct optee_std_call *get_std_call(struct optee_domain *ctx, | |
struct optee_std_call *call; | |
spin_lock(&ctx->lock); | |
- list_for_each_entry( call, &ctx->call_list, list ) | |
+ list_for_each_entry (call, &ctx->call_list, list) | |
{ | |
if ( call->optee_thread_id == thread_id ) | |
{ | |
if ( call->in_flight ) | |
{ | |
- gdprintk(XENLOG_WARNING, | |
- "Guest tries to execute call which is already in flight.\n"); | |
+ gdprintk( | |
+ XENLOG_WARNING, | |
+ "Guest tries to execute call which is already in flight.\n"); | |
goto out; | |
} | |
call->in_flight = true; | |
@@ -385,13 +391,14 @@ static struct shm_rpc *allocate_and_pin_shm_rpc(struct optee_domain *ctx, | |
spin_lock(&ctx->lock); | |
/* Check if there is existing SHM with the same cookie. */ | |
- list_for_each_entry( shm_rpc_tmp, &ctx->shm_rpc_list, list ) | |
+ list_for_each_entry (shm_rpc_tmp, &ctx->shm_rpc_list, list) | |
{ | |
if ( shm_rpc_tmp->cookie == cookie ) | |
{ | |
spin_unlock(&ctx->lock); | |
gdprintk(XENLOG_WARNING, | |
- "Guest tries to use the same RPC SHM cookie %"PRIx64"\n", | |
+ "Guest tries to use the same RPC SHM cookie %" PRIx64 | |
+ "\n", | |
cookie); | |
goto err; | |
} | |
@@ -419,7 +426,7 @@ static void free_shm_rpc(struct optee_domain *ctx, uint64_t cookie) | |
spin_lock(&ctx->lock); | |
- list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list ) | |
+ list_for_each_entry (shm_rpc, &ctx->shm_rpc_list, list) | |
{ | |
if ( shm_rpc->cookie == cookie ) | |
{ | |
@@ -446,12 +453,12 @@ static struct shm_rpc *find_shm_rpc(struct optee_domain *ctx, uint64_t cookie) | |
struct shm_rpc *shm_rpc; | |
spin_lock(&ctx->lock); | |
- list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list ) | |
+ list_for_each_entry (shm_rpc, &ctx->shm_rpc_list, list) | |
{ | |
if ( shm_rpc->cookie == cookie ) | |
{ | |
- spin_unlock(&ctx->lock); | |
- return shm_rpc; | |
+ spin_unlock(&ctx->lock); | |
+ return shm_rpc; | |
} | |
} | |
spin_unlock(&ctx->lock); | |
@@ -469,15 +476,13 @@ static struct optee_shm_buf *allocate_optee_shm_buf(struct optee_domain *ctx, | |
int old, new; | |
int err_code; | |
- do | |
- { | |
+ do { | |
old = atomic_read(&ctx->optee_shm_buf_pages); | |
new = old + pages_cnt; | |
if ( new >= MAX_TOTAL_SMH_BUF_PG ) | |
return ERR_PTR(-ENOMEM); | |
- } | |
- while ( unlikely(old != atomic_cmpxchg(&ctx->optee_shm_buf_pages, | |
- old, new)) ); | |
+ } while ( | |
+ unlikely(old != atomic_cmpxchg(&ctx->optee_shm_buf_pages, old, new)) ); | |
/* | |
* TODO: Guest can try to register many small buffers, thus, forcing | |
@@ -499,13 +504,14 @@ static struct optee_shm_buf *allocate_optee_shm_buf(struct optee_domain *ctx, | |
spin_lock(&ctx->lock); | |
/* Check if there is already SHM with the same cookie */ | |
- list_for_each_entry( optee_shm_buf_tmp, &ctx->optee_shm_buf_list, list ) | |
+ list_for_each_entry (optee_shm_buf_tmp, &ctx->optee_shm_buf_list, list) | |
{ | |
if ( optee_shm_buf_tmp->cookie == cookie ) | |
{ | |
spin_unlock(&ctx->lock); | |
gdprintk(XENLOG_WARNING, | |
- "Guest tries to use the same SHM buffer cookie %"PRIx64"\n", | |
+ "Guest tries to use the same SHM buffer cookie %" PRIx64 | |
+ "\n", | |
cookie); | |
err_code = -EINVAL; | |
goto err; | |
@@ -541,7 +547,7 @@ static void free_optee_shm_buf(struct optee_domain *ctx, uint64_t cookie) | |
bool found = false; | |
spin_lock(&ctx->lock); | |
- list_for_each_entry( optee_shm_buf, &ctx->optee_shm_buf_list, list ) | |
+ list_for_each_entry (optee_shm_buf, &ctx->optee_shm_buf_list, list) | |
{ | |
if ( optee_shm_buf->cookie == cookie ) | |
{ | |
@@ -573,7 +579,7 @@ static void free_optee_shm_buf_pg_list(struct optee_domain *ctx, | |
bool found = false; | |
spin_lock(&ctx->lock); | |
- list_for_each_entry( optee_shm_buf, &ctx->optee_shm_buf_list, list ) | |
+ list_for_each_entry (optee_shm_buf, &ctx->optee_shm_buf_list, list) | |
{ | |
if ( optee_shm_buf->cookie == cookie ) | |
{ | |
@@ -587,7 +593,8 @@ static void free_optee_shm_buf_pg_list(struct optee_domain *ctx, | |
free_pg_list(optee_shm_buf); | |
else | |
gdprintk(XENLOG_ERR, | |
- "Can't find pagelist for SHM buffer with cookie %"PRIx64" to free it\n", | |
+ "Can't find pagelist for SHM buffer with cookie %" PRIx64 | |
+ " to free it\n", | |
cookie); | |
} | |
@@ -607,7 +614,7 @@ static int optee_relinquish_resources(struct domain *d) | |
* no more than 8-16 calls. But it depends on OP-TEE configuration | |
* (CFG_NUM_THREADS option). | |
*/ | |
- list_for_each_entry_safe( call, call_tmp, &ctx->call_list, list ) | |
+ list_for_each_entry_safe(call, call_tmp, &ctx->call_list, list) | |
free_std_call(ctx, call); | |
if ( hypercall_preempt_check() ) | |
@@ -617,7 +624,7 @@ static int optee_relinquish_resources(struct domain *d) | |
* Number of this buffers also depends on max_optee_threads, so | |
* check the comment above. | |
*/ | |
- list_for_each_entry_safe( shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list ) | |
+ list_for_each_entry_safe(shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list) | |
free_shm_rpc(ctx, shm_rpc->cookie); | |
if ( hypercall_preempt_check() ) | |
@@ -628,8 +635,8 @@ static int optee_relinquish_resources(struct domain *d) | |
* them will be put in this loop. It is worth considering to | |
* check for preemption inside the loop. | |
*/ | |
- list_for_each_entry_safe( optee_shm_buf, optee_shm_buf_tmp, | |
- &ctx->optee_shm_buf_list, list ) | |
+ list_for_each_entry_safe(optee_shm_buf, optee_shm_buf_tmp, | |
+ &ctx->optee_shm_buf_list, list) | |
free_optee_shm_buf(ctx, optee_shm_buf->cookie); | |
if ( hypercall_preempt_check() ) | |
@@ -656,7 +663,7 @@ static int optee_relinquish_resources(struct domain *d) | |
return 0; | |
} | |
-#define PAGELIST_ENTRIES_PER_PAGE \ | |
+#define PAGELIST_ENTRIES_PER_PAGE \ | |
((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) | |
static size_t get_pages_list_size(size_t num_entries) | |
@@ -679,18 +686,21 @@ static int translate_noncontig(struct optee_domain *ctx, | |
struct page_info *guest_pg, *xen_pgs; | |
struct optee_shm_buf *optee_shm_buf; | |
/* | |
- * This is memory layout for page list. Basically list consists of 4k pages, | |
- * every page store 511 page addresses of user buffer and page address of | |
- * the next page of list. | |
+ * This is memory layout for page list. Basically list consists of 4k | |
+ * pages, every page store 511 page addresses of user buffer and page | |
+ * address of the next page of list. | |
* | |
- * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h for details. | |
+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h for | |
+ * details. | |
*/ | |
- struct { | |
+ struct | |
+ { | |
uint64_t pages_list[PAGELIST_ENTRIES_PER_PAGE]; | |
uint64_t next_page_data; | |
- } *guest_data, *xen_data; | |
+ } * guest_data, *xen_data; | |
- /* Offset of user buffer withing OPTEE_MSG_NONCONTIG_PAGE_SIZE-sized page */ | |
+ /* Offset of user buffer withing OPTEE_MSG_NONCONTIG_PAGE_SIZE-sized page | |
+ */ | |
offset = param->u.tmem.buf_ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1); | |
/* Size of the user buffer in bytes */ | |
@@ -810,14 +820,15 @@ static int translate_params(struct optee_domain *ctx, | |
{ | |
attr = call->xen_arg->params[i].attr; | |
- switch ( attr & OPTEE_MSG_ATTR_TYPE_MASK ) | |
+ switch (attr & OPTEE_MSG_ATTR_TYPE_MASK) | |
{ | |
case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: | |
case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: | |
case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: | |
if ( attr & OPTEE_MSG_ATTR_NONCONTIG ) | |
{ | |
- ret = translate_noncontig(ctx, call, call->xen_arg->params + i); | |
+ ret = | |
+ translate_noncontig(ctx, call, call->xen_arg->params + i); | |
if ( ret ) | |
goto out; | |
} | |
@@ -860,8 +871,8 @@ out: | |
static bool copy_std_request(struct cpu_user_regs *regs, | |
struct optee_std_call *call) | |
{ | |
- call->guest_arg_ipa = regpair_to_uint64(get_user_reg(regs, 1), | |
- get_user_reg(regs, 2)); | |
+ call->guest_arg_ipa = | |
+ regpair_to_uint64(get_user_reg(regs, 1), get_user_reg(regs, 2)); | |
/* | |
* Command buffer should start at page boundary. | |
@@ -941,7 +952,7 @@ static void copy_std_request_back(struct optee_domain *ctx, | |
{ | |
attr = call->xen_arg->params[i].attr; | |
- switch ( attr & OPTEE_MSG_ATTR_TYPE_MASK ) | |
+ switch (attr & OPTEE_MSG_ATTR_TYPE_MASK) | |
{ | |
case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: | |
case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: | |
@@ -973,15 +984,14 @@ static void copy_std_request_back(struct optee_domain *ctx, | |
put_page(page); | |
} | |
- | |
static void free_shm_buffers(struct optee_domain *ctx, | |
struct optee_msg_arg *arg) | |
{ | |
unsigned int i; | |
- for ( i = 0; i < arg->num_params; i ++ ) | |
+ for ( i = 0; i < arg->num_params; i++ ) | |
{ | |
- switch ( arg->params[i].attr & OPTEE_MSG_ATTR_TYPE_MASK ) | |
+ switch (arg->params[i].attr & OPTEE_MSG_ATTR_TYPE_MASK) | |
{ | |
case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: | |
case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: | |
@@ -1015,8 +1025,8 @@ static int handle_rpc_return(struct optee_domain *ctx, | |
if ( call->rpc_op == OPTEE_SMC_RPC_FUNC_CMD ) | |
{ | |
/* Copy RPC request from shadowed buffer to guest */ | |
- uint64_t cookie = regpair_to_uint64(get_user_reg(regs, 1), | |
- get_user_reg(regs, 2)); | |
+ uint64_t cookie = | |
+ regpair_to_uint64(get_user_reg(regs, 1), get_user_reg(regs, 2)); | |
struct shm_rpc *shm_rpc = find_shm_rpc(ctx, cookie); | |
if ( !shm_rpc ) | |
@@ -1031,7 +1041,7 @@ static int handle_rpc_return(struct optee_domain *ctx, | |
* will overwrite it with actual result. So we can just | |
* continue the call. | |
*/ | |
- gprintk(XENLOG_ERR, "Can't find SHM-RPC with cookie %"PRIx64"\n", | |
+ gprintk(XENLOG_ERR, "Can't find SHM-RPC with cookie %" PRIx64 "\n", | |
cookie); | |
return -ERESTART; | |
@@ -1039,11 +1049,9 @@ static int handle_rpc_return(struct optee_domain *ctx, | |
shm_rpc->xen_arg = __map_domain_page(shm_rpc->xen_arg_pg); | |
- if ( access_guest_memory_by_ipa(current->domain, | |
- gfn_to_gaddr(shm_rpc->gfn), | |
- shm_rpc->xen_arg, | |
- OPTEE_MSG_GET_ARG_SIZE(shm_rpc->xen_arg->num_params), | |
- true) ) | |
+ if ( access_guest_memory_by_ipa( | |
+ current->domain, gfn_to_gaddr(shm_rpc->gfn), shm_rpc->xen_arg, | |
+ OPTEE_MSG_GET_ARG_SIZE(shm_rpc->xen_arg->num_params), true) ) | |
{ | |
/* | |
* We were unable to propagate request to guest, so let's return | |
@@ -1076,9 +1084,9 @@ static int handle_rpc_return(struct optee_domain *ctx, | |
*/ | |
static void do_call_with_arg(struct optee_domain *ctx, | |
struct optee_std_call *call, | |
- struct cpu_user_regs *regs, | |
- register_t a0, register_t a1, register_t a2, | |
- register_t a3, register_t a4, register_t a5) | |
+ struct cpu_user_regs *regs, register_t a0, | |
+ register_t a1, register_t a2, register_t a3, | |
+ register_t a4, register_t a5) | |
{ | |
struct arm_smccc_res res; | |
@@ -1087,14 +1095,13 @@ static void do_call_with_arg(struct optee_domain *ctx, | |
if ( OPTEE_SMC_RETURN_IS_RPC(res.a0) ) | |
{ | |
- while ( handle_rpc_return(ctx, &res, regs, call) == -ERESTART ) | |
+ while ( handle_rpc_return(ctx, &res, regs, call) == -ERESTART ) | |
{ | |
arm_smccc_smc(res.a0, res.a1, res.a2, res.a3, 0, 0, 0, | |
OPTEE_CLIENT_ID(current->domain), &res); | |
if ( !OPTEE_SMC_RETURN_IS_RPC(res.a0) ) | |
break; | |
- | |
} | |
put_std_call(ctx, call); | |
@@ -1105,15 +1112,16 @@ static void do_call_with_arg(struct optee_domain *ctx, | |
copy_std_request_back(ctx, regs, call); | |
set_user_reg(regs, 0, res.a0); | |
- switch ( call->xen_arg->cmd ) | |
+ switch (call->xen_arg->cmd) | |
{ | |
case OPTEE_MSG_CMD_REGISTER_SHM: | |
if ( call->xen_arg->ret == 0 ) | |
/* OP-TEE registered buffer, we don't need pg_list anymore */ | |
- free_optee_shm_buf_pg_list(ctx, | |
- call->xen_arg->params[0].u.tmem.shm_ref); | |
+ free_optee_shm_buf_pg_list( | |
+ ctx, call->xen_arg->params[0].u.tmem.shm_ref); | |
else | |
- /* OP-TEE failed to register buffer, we need to unpin guest pages */ | |
+ /* OP-TEE failed to register buffer, we need to unpin guest pages | |
+ */ | |
free_optee_shm_buf(ctx, call->xen_arg->params[0].u.tmem.shm_ref); | |
break; | |
case OPTEE_MSG_CMD_UNREGISTER_SHM: | |
@@ -1171,7 +1179,8 @@ static void handle_std_call(struct optee_domain *ctx, | |
{ | |
call->xen_arg->ret = TEEC_ERROR_BAD_PARAMETERS; | |
call->xen_arg->ret_origin = TEEC_ORIGIN_COMMS; | |
- /* Make sure that copy_std_request_back() will stay within the buffer */ | |
+ /* Make sure that copy_std_request_back() will stay within the buffer | |
+ */ | |
call->xen_arg->num_params = 0; | |
copy_std_request_back(ctx, regs, call); | |
@@ -1179,7 +1188,7 @@ static void handle_std_call(struct optee_domain *ctx, | |
goto err; | |
} | |
- switch ( call->xen_arg->cmd ) | |
+ switch (call->xen_arg->cmd) | |
{ | |
case OPTEE_MSG_CMD_OPEN_SESSION: | |
case OPTEE_MSG_CMD_CLOSE_SESSION: | |
@@ -1187,7 +1196,7 @@ static void handle_std_call(struct optee_domain *ctx, | |
case OPTEE_MSG_CMD_CANCEL: | |
case OPTEE_MSG_CMD_REGISTER_SHM: | |
case OPTEE_MSG_CMD_UNREGISTER_SHM: | |
- if( translate_params(ctx, call) ) | |
+ if ( translate_params(ctx, call) ) | |
{ | |
/* | |
* translate_params() sets xen_arg->ret value to non-zero. | |
@@ -1230,11 +1239,11 @@ static void handle_rpc_cmd_alloc(struct optee_domain *ctx, | |
if ( shm_rpc->xen_arg->ret || shm_rpc->xen_arg->num_params != 1 ) | |
return; | |
- if ( shm_rpc->xen_arg->params[0].attr != (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | | |
- OPTEE_MSG_ATTR_NONCONTIG) ) | |
+ if ( shm_rpc->xen_arg->params[0].attr != | |
+ (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | OPTEE_MSG_ATTR_NONCONTIG) ) | |
{ | |
gdprintk(XENLOG_WARNING, | |
- "Invalid attrs for shared mem buffer: %"PRIx64"\n", | |
+ "Invalid attrs for shared mem buffer: %" PRIx64 "\n", | |
shm_rpc->xen_arg->params[0].attr); | |
return; | |
} | |
@@ -1245,8 +1254,7 @@ static void handle_rpc_cmd_alloc(struct optee_domain *ctx, | |
if ( !translate_noncontig(ctx, call, &shm_rpc->xen_arg->params[0]) ) | |
{ | |
- call->rpc_data_cookie = | |
- shm_rpc->xen_arg->params[0].u.tmem.shm_ref; | |
+ call->rpc_data_cookie = shm_rpc->xen_arg->params[0].u.tmem.shm_ref; | |
} | |
else | |
{ | |
@@ -1264,26 +1272,27 @@ static void handle_rpc_cmd_alloc(struct optee_domain *ctx, | |
* while OP-TEE thinks of opposite. Ideally, we need to | |
* emulate RPC with OPTEE_MSG_RPC_CMD_SHM_FREE command. | |
*/ | |
- gprintk(XENLOG_WARNING, | |
- "translate_noncontig() failed, OP-TEE/guest state is out of sync.\n"); | |
+ gprintk( | |
+ XENLOG_WARNING, | |
+ "translate_noncontig() failed, OP-TEE/guest state is out of sync.\n"); | |
} | |
} | |
-static void handle_rpc_cmd(struct optee_domain *ctx, struct cpu_user_regs *regs, | |
+static void handle_rpc_cmd(struct optee_domain *ctx, | |
+ struct cpu_user_regs *regs, | |
struct optee_std_call *call) | |
{ | |
struct shm_rpc *shm_rpc; | |
uint64_t cookie; | |
size_t arg_size; | |
- cookie = regpair_to_uint64(get_user_reg(regs, 1), | |
- get_user_reg(regs, 2)); | |
+ cookie = regpair_to_uint64(get_user_reg(regs, 1), get_user_reg(regs, 2)); | |
shm_rpc = find_shm_rpc(ctx, cookie); | |
if ( !shm_rpc ) | |
{ | |
- gdprintk(XENLOG_ERR, "Can't find SHM-RPC with cookie %"PRIx64"\n", | |
+ gdprintk(XENLOG_ERR, "Can't find SHM-RPC with cookie %" PRIx64 "\n", | |
cookie); | |
return; | |
} | |
@@ -1291,11 +1300,9 @@ static void handle_rpc_cmd(struct optee_domain *ctx, struct cpu_user_regs *regs, | |
shm_rpc->xen_arg = __map_domain_page(shm_rpc->xen_arg_pg); | |
/* First, copy only header to read number of arguments */ | |
- if ( access_guest_memory_by_ipa(current->domain, | |
- gfn_to_gaddr(shm_rpc->gfn), | |
- shm_rpc->xen_arg, | |
- sizeof(struct optee_msg_arg), | |
- false) ) | |
+ if ( access_guest_memory_by_ipa( | |
+ current->domain, gfn_to_gaddr(shm_rpc->gfn), shm_rpc->xen_arg, | |
+ sizeof(struct optee_msg_arg), false) ) | |
{ | |
shm_rpc->xen_arg->ret = TEEC_ERROR_GENERIC; | |
goto out; | |
@@ -1309,7 +1316,8 @@ static void handle_rpc_cmd(struct optee_domain *ctx, struct cpu_user_regs *regs, | |
} | |
/* Read the whole command structure */ | |
- if ( access_guest_memory_by_ipa(current->domain, gfn_to_gaddr(shm_rpc->gfn), | |
+ if ( access_guest_memory_by_ipa(current->domain, | |
+ gfn_to_gaddr(shm_rpc->gfn), | |
shm_rpc->xen_arg, arg_size, false) ) | |
{ | |
shm_rpc->xen_arg->ret = TEEC_ERROR_GENERIC; | |
@@ -1339,7 +1347,6 @@ out: | |
do_call_with_arg(ctx, call, regs, OPTEE_SMC_CALL_RETURN_FROM_RPC, 0, 0, | |
get_user_reg(regs, 3), 0, 0); | |
- | |
} | |
static void handle_rpc_func_alloc(struct optee_domain *ctx, | |
@@ -1348,14 +1355,15 @@ static void handle_rpc_func_alloc(struct optee_domain *ctx, | |
{ | |
struct shm_rpc *shm_rpc; | |
register_t r1, r2; | |
- paddr_t ptr = regpair_to_uint64(get_user_reg(regs, 1), | |
- get_user_reg(regs, 2)); | |
- uint64_t cookie = regpair_to_uint64(get_user_reg(regs, 4), | |
- get_user_reg(regs, 5)); | |
+ paddr_t ptr = | |
+ regpair_to_uint64(get_user_reg(regs, 1), get_user_reg(regs, 2)); | |
+ uint64_t cookie = | |
+ regpair_to_uint64(get_user_reg(regs, 4), get_user_reg(regs, 5)); | |
if ( ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1) ) | |
{ | |
- gdprintk(XENLOG_WARNING, "Domain returned invalid RPC command buffer\n"); | |
+ gdprintk(XENLOG_WARNING, | |
+ "Domain returned invalid RPC command buffer\n"); | |
/* | |
* OP-TEE is waiting for a response to the RPC. We can't just | |
* return error to the guest. We need to provide some invalid | |
@@ -1379,8 +1387,7 @@ out: | |
uint64_to_regpair(&r1, &r2, ptr); | |
do_call_with_arg(ctx, call, regs, OPTEE_SMC_CALL_RETURN_FROM_RPC, r1, r2, | |
- get_user_reg(regs, 3), | |
- get_user_reg(regs, 4), | |
+ get_user_reg(regs, 3), get_user_reg(regs, 4), | |
get_user_reg(regs, 5)); | |
} | |
@@ -1404,15 +1411,15 @@ static void handle_rpc(struct optee_domain *ctx, struct cpu_user_regs *regs) | |
*/ | |
call->optee_thread_id = -1; | |
- switch ( call->rpc_op ) | |
+ switch (call->rpc_op) | |
{ | |
case OPTEE_SMC_RPC_FUNC_ALLOC: | |
handle_rpc_func_alloc(ctx, regs, call); | |
return; | |
case OPTEE_SMC_RPC_FUNC_FREE: | |
{ | |
- uint64_t cookie = regpair_to_uint64(call->rpc_params[0], | |
- call->rpc_params[1]); | |
+ uint64_t cookie = | |
+ regpair_to_uint64(call->rpc_params[0], call->rpc_params[1]); | |
free_shm_rpc(ctx, cookie); | |
break; | |
} | |
@@ -1424,8 +1431,8 @@ static void handle_rpc(struct optee_domain *ctx, struct cpu_user_regs *regs) | |
} | |
do_call_with_arg(ctx, call, regs, OPTEE_SMC_CALL_RETURN_FROM_RPC, | |
- call->rpc_params[0], call->rpc_params[1], | |
- optee_thread_id, 0, 0); | |
+ call->rpc_params[0], call->rpc_params[1], optee_thread_id, | |
+ 0, 0); | |
return; | |
} | |
@@ -1440,7 +1447,8 @@ static void handle_exchange_capabilities(struct cpu_user_regs *regs) | |
arm_smccc_smc(OPTEE_SMC_EXCHANGE_CAPABILITIES, caps, 0, 0, 0, 0, 0, | |
OPTEE_CLIENT_ID(current->domain), &resp); | |
- if ( resp.a0 != OPTEE_SMC_RETURN_OK ) { | |
+ if ( resp.a0 != OPTEE_SMC_RETURN_OK ) | |
+ { | |
set_user_reg(regs, 0, resp.a0); | |
return; | |
} | |
@@ -1472,7 +1480,7 @@ static bool optee_handle_call(struct cpu_user_regs *regs) | |
if ( !ctx ) | |
return false; | |
- switch ( get_user_reg(regs, 0) ) | |
+ switch (get_user_reg(regs, 0)) | |
{ | |
case OPTEE_SMC_CALLS_COUNT: | |
set_user_reg(regs, 0, OPTEE_MEDIATOR_SMC_COUNT); | |
@@ -1496,7 +1504,7 @@ static bool optee_handle_call(struct cpu_user_regs *regs) | |
case OPTEE_SMC_CALL_GET_OS_UUID: | |
arm_smccc_smc(OPTEE_SMC_CALL_GET_OS_UUID, 0, 0, 0, 0, 0, 0, | |
- OPTEE_CLIENT_ID(current->domain),&resp); | |
+ OPTEE_CLIENT_ID(current->domain), &resp); | |
set_user_reg(regs, 0, resp.a0); | |
set_user_reg(regs, 1, resp.a1); | |
set_user_reg(regs, 2, resp.a2); | |
@@ -1520,8 +1528,9 @@ static bool optee_handle_call(struct cpu_user_regs *regs) | |
arm_smccc_smc(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, | |
OPTEE_CLIENT_ID(current->domain), &resp); | |
set_user_reg(regs, 0, resp.a0); | |
- if ( resp.a0 == OPTEE_SMC_RETURN_OK ) { | |
- free_shm_rpc(ctx, regpair_to_uint64(resp.a1, resp.a2)); | |
+ if ( resp.a0 == OPTEE_SMC_RETURN_OK ) | |
+ { | |
+ free_shm_rpc(ctx, regpair_to_uint64(resp.a1, resp.a2)); | |
set_user_reg(regs, 1, resp.a1); | |
set_user_reg(regs, 2, resp.a2); | |
} | |
@@ -1549,15 +1558,15 @@ static bool optee_handle_call(struct cpu_user_regs *regs) | |
} | |
} | |
-static const struct tee_mediator_ops optee_ops = | |
-{ | |
+static const struct tee_mediator_ops optee_ops = { | |
.probe = optee_probe, | |
.domain_init = optee_domain_init, | |
.relinquish_resources = optee_relinquish_resources, | |
.handle_call = optee_handle_call, | |
}; | |
-REGISTER_TEE_MEDIATOR(optee, "OP-TEE", XEN_DOMCTL_CONFIG_TEE_OPTEE, &optee_ops); | |
+REGISTER_TEE_MEDIATOR(optee, "OP-TEE", XEN_DOMCTL_CONFIG_TEE_OPTEE, | |
+ &optee_ops); | |
/* | |
* Local variables: | |
diff --git a/xen/arch/arm/tee/tee.c b/xen/arch/arm/tee/tee.c | |
index 3964a8a5cd..57b6a74ac9 100644 | |
--- a/xen/arch/arm/tee/tee.c | |
+++ b/xen/arch/arm/tee/tee.c | |
@@ -68,7 +68,6 @@ uint16_t tee_get_type(void) | |
return cur_mediator->tee_type; | |
} | |
- | |
static int __init tee_init(void) | |
{ | |
const struct tee_mediator_desc *desc; | |
diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c | |
index 739bcf186c..ac0a565460 100644 | |
--- a/xen/arch/arm/time.c | |
+++ b/xen/arch/arm/time.c | |
@@ -41,7 +41,7 @@ uint64_t __read_mostly boot_count; | |
/* For fine-grained timekeeping, we use the ARM "Generic Timer", a | |
* register-mapped time source in the SoC. */ | |
-unsigned long __read_mostly cpu_khz; /* CPU clock frequency in kHz. */ | |
+unsigned long __read_mostly cpu_khz; /* CPU clock frequency in kHz. */ | |
uint32_t __read_mostly timer_dt_clock_frequency; | |
@@ -106,16 +106,17 @@ static void __init preinit_acpi_xen_time(void) | |
acpi_table_parse(ACPI_SIG_GTDT, arch_timer_acpi_init); | |
} | |
#else | |
-static void __init preinit_acpi_xen_time(void) { } | |
+static void __init preinit_acpi_xen_time(void) | |
+{ | |
+} | |
#endif | |
/* Set up the timer on the boot CPU (early init function) */ | |
static void __init preinit_dt_xen_time(void) | |
{ | |
- static const struct dt_device_match timer_ids[] __initconst = | |
- { | |
+ static const struct dt_device_match timer_ids[] __initconst = { | |
DT_MATCH_TIMER, | |
- { /* sentinel */ }, | |
+ {/* sentinel */}, | |
}; | |
int res; | |
u32 rate; | |
@@ -165,7 +166,8 @@ static void __init init_dt_xen_time(void) | |
res = platform_get_irq(timer, i); | |
if ( res < 0 ) | |
- panic("Timer: Unable to retrieve IRQ %u from the device tree\n", i); | |
+ panic("Timer: Unable to retrieve IRQ %u from the device tree\n", | |
+ i); | |
timer_irq[i] = res; | |
} | |
} | |
@@ -181,10 +183,8 @@ int __init init_xen_time(void) | |
panic("CPU does not support the Generic Timer v1 interface\n"); | |
printk("Generic Timer IRQ: phys=%u hyp=%u virt=%u Freq: %lu KHz\n", | |
- timer_irq[TIMER_PHYS_NONSECURE_PPI], | |
- timer_irq[TIMER_HYP_PPI], | |
- timer_irq[TIMER_VIRT_PPI], | |
- cpu_khz); | |
+ timer_irq[TIMER_PHYS_NONSECURE_PPI], timer_irq[TIMER_HYP_PPI], | |
+ timer_irq[TIMER_VIRT_PPI], cpu_khz); | |
return 0; | |
} | |
@@ -262,7 +262,8 @@ static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) | |
current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0); | |
WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0); | |
- vgic_inject_irq(current->domain, current, current->arch.virt_timer.irq, true); | |
+ vgic_inject_irq(current->domain, current, current->arch.virt_timer.irq, | |
+ true); | |
} | |
/* | |
@@ -287,25 +288,26 @@ static void check_timer_irq_cfg(unsigned int irq, const char *which) | |
if ( desc->arch.type & IRQ_TYPE_LEVEL_MASK ) | |
return; | |
- printk(XENLOG_WARNING | |
- "WARNING: %s-timer IRQ%u is not level triggered.\n", which, irq); | |
+ printk(XENLOG_WARNING "WARNING: %s-timer IRQ%u is not level triggered.\n", | |
+ which, irq); | |
} | |
/* Set up the timer interrupt on this CPU */ | |
void init_timer_interrupt(void) | |
{ | |
/* Sensible defaults */ | |
- WRITE_SYSREG64(0, CNTVOFF_EL2); /* No VM-specific offset */ | |
- /* Do not let the VMs program the physical timer, only read the physical counter */ | |
+ WRITE_SYSREG64(0, CNTVOFF_EL2); /* No VM-specific offset */ | |
+ /* Do not let the VMs program the physical timer, only read the physical | |
+ * counter */ | |
WRITE_SYSREG32(CNTHCTL_EL2_EL1PCTEN, CNTHCTL_EL2); | |
- WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Physical timer disabled */ | |
- WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Hypervisor's timer disabled */ | |
+ WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Physical timer disabled */ | |
+ WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Hypervisor's timer disabled */ | |
isb(); | |
- request_irq(timer_irq[TIMER_HYP_PPI], 0, timer_interrupt, | |
- "hyptimer", NULL); | |
- request_irq(timer_irq[TIMER_VIRT_PPI], 0, vtimer_interrupt, | |
- "virtimer", NULL); | |
+ request_irq(timer_irq[TIMER_HYP_PPI], 0, timer_interrupt, "hyptimer", | |
+ NULL); | |
+ request_irq(timer_irq[TIMER_VIRT_PPI], 0, vtimer_interrupt, "virtimer", | |
+ NULL); | |
request_irq(timer_irq[TIMER_PHYS_NONSECURE_PPI], 0, timer_interrupt, | |
"phytimer", NULL); | |
@@ -320,8 +322,8 @@ void init_timer_interrupt(void) | |
*/ | |
static void deinit_timer_interrupt(void) | |
{ | |
- WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Disable physical timer */ | |
- WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Disable hypervisor's timer */ | |
+ WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Disable physical timer */ | |
+ WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Disable hypervisor's timer */ | |
isb(); | |
release_irq(timer_irq[TIMER_HYP_PPI], NULL); | |
@@ -332,7 +334,7 @@ static void deinit_timer_interrupt(void) | |
/* Wait a set number of microseconds */ | |
void udelay(unsigned long usecs) | |
{ | |
- s_time_t deadline = get_s_time() + 1000 * (s_time_t) usecs; | |
+ s_time_t deadline = get_s_time() + 1000 * (s_time_t)usecs; | |
while ( get_s_time() - deadline < 0 ) | |
; | |
dsb(sy); | |
@@ -357,11 +359,10 @@ void domain_set_time_offset(struct domain *d, int64_t time_offset_seconds) | |
/* XXX update guest visible wallclock time */ | |
} | |
-static int cpu_time_callback(struct notifier_block *nfb, | |
- unsigned long action, | |
+static int cpu_time_callback(struct notifier_block *nfb, unsigned long action, | |
void *hcpu) | |
{ | |
- switch ( action ) | |
+ switch (action) | |
{ | |
case CPU_DYING: | |
deinit_timer_interrupt(); | |
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c | |
index 3103620323..551facaf04 100644 | |
--- a/xen/arch/arm/traps.c | |
+++ b/xen/arch/arm/traps.c | |
@@ -54,16 +54,17 @@ | |
* that both the kernel half of struct cpu_user_regs (which is pushed in | |
* entry.S) and struct cpu_info (which lives at the bottom of a Xen | |
* stack) must be doubleword-aligned in size. */ | |
-static inline void check_stack_alignment_constraints(void) { | |
+static inline void check_stack_alignment_constraints(void) | |
+{ | |
#ifdef CONFIG_ARM_64 | |
- BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0xf); | |
+ BUILD_BUG_ON((sizeof(struct cpu_user_regs)) & 0xf); | |
BUILD_BUG_ON((offsetof(struct cpu_user_regs, spsr_el1)) & 0xf); | |
BUILD_BUG_ON((offsetof(struct cpu_user_regs, lr)) & 0xf); | |
- BUILD_BUG_ON((sizeof (struct cpu_info)) & 0xf); | |
+ BUILD_BUG_ON((sizeof(struct cpu_info)) & 0xf); | |
#else | |
- BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0x7); | |
+ BUILD_BUG_ON((sizeof(struct cpu_user_regs)) & 0x7); | |
BUILD_BUG_ON((offsetof(struct cpu_user_regs, sp_usr)) & 0x7); | |
- BUILD_BUG_ON((sizeof (struct cpu_info)) & 0x7); | |
+ BUILD_BUG_ON((sizeof(struct cpu_info)) & 0x7); | |
#endif | |
} | |
@@ -78,26 +79,26 @@ static int debug_stack_lines = 40; | |
integer_param("debug_stack_lines", debug_stack_lines); | |
static enum { | |
- TRAP, | |
- NATIVE, | |
+ TRAP, | |
+ NATIVE, | |
} vwfi; | |
static int __init parse_vwfi(const char *s) | |
{ | |
- if ( !strcmp(s, "native") ) | |
- vwfi = NATIVE; | |
- else | |
- vwfi = TRAP; | |
+ if ( !strcmp(s, "native") ) | |
+ vwfi = NATIVE; | |
+ else | |
+ vwfi = TRAP; | |
- return 0; | |
+ return 0; | |
} | |
custom_param("vwfi", parse_vwfi); | |
register_t get_default_hcr_flags(void) | |
{ | |
- return (HCR_PTW|HCR_BSU_INNER|HCR_AMO|HCR_IMO|HCR_FMO|HCR_VM| | |
- (vwfi != NATIVE ? (HCR_TWI|HCR_TWE) : 0) | | |
- HCR_TSC|HCR_TAC|HCR_SWIO|HCR_TIDCP|HCR_FB|HCR_TSW); | |
+ return (HCR_PTW | HCR_BSU_INNER | HCR_AMO | HCR_IMO | HCR_FMO | HCR_VM | | |
+ (vwfi != NATIVE ? (HCR_TWI | HCR_TWE) : 0) | HCR_TSC | HCR_TAC | | |
+ HCR_SWIO | HCR_TIDCP | HCR_FB | HCR_TSW); | |
} | |
static enum { | |
@@ -140,7 +141,7 @@ void init_traps(void) | |
WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); | |
/* Trap Debug and Performance Monitor accesses */ | |
- WRITE_SYSREG(HDCR_TDRA|HDCR_TDOSA|HDCR_TDA|HDCR_TPM|HDCR_TPMCR, | |
+ WRITE_SYSREG(HDCR_TDRA | HDCR_TDOSA | HDCR_TDA | HDCR_TPM | HDCR_TPMCR, | |
MDCR_EL2); | |
/* Trap CP15 c15 used for implementation defined registers */ | |
@@ -213,7 +214,7 @@ static inline bool is_zero_register(int reg) | |
*/ | |
static register_t *select_user_reg(struct cpu_user_regs *regs, int reg) | |
{ | |
- BUG_ON( !guest_mode(regs) ); | |
+ BUG_ON(!guest_mode(regs)); | |
#ifdef CONFIG_ARM_32 | |
/* | |
@@ -223,24 +224,30 @@ static register_t *select_user_reg(struct cpu_user_regs *regs, int reg) | |
*/ | |
#define REGOFFS(R) offsetof(struct cpu_user_regs, R) | |
- switch ( reg ) | |
+ switch (reg) | |
{ | |
case 0 ... 7: /* Unbanked registers */ | |
- BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7)); | |
+ BUILD_BUG_ON(REGOFFS(r0) + 7 * sizeof(register_t) != REGOFFS(r7)); | |
return ®s->r0 + reg; | |
case 8 ... 12: /* Register banked in FIQ mode */ | |
- BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq)); | |
+ BUILD_BUG_ON(REGOFFS(r8_fiq) + 4 * sizeof(register_t) != | |
+ REGOFFS(r12_fiq)); | |
if ( fiq_mode(regs) ) | |
return ®s->r8_fiq + reg - 8; | |
else | |
return ®s->r8 + reg - 8; | |
case 13 ... 14: /* Banked SP + LR registers */ | |
- BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq)); | |
- BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq)); | |
- BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc)); | |
- BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt)); | |
- BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und)); | |
- switch ( regs->cpsr & PSR_MODE_MASK ) | |
+ BUILD_BUG_ON(REGOFFS(sp_fiq) + 1 * sizeof(register_t) != | |
+ REGOFFS(lr_fiq)); | |
+ BUILD_BUG_ON(REGOFFS(sp_irq) + 1 * sizeof(register_t) != | |
+ REGOFFS(lr_irq)); | |
+ BUILD_BUG_ON(REGOFFS(sp_svc) + 1 * sizeof(register_t) != | |
+ REGOFFS(lr_svc)); | |
+ BUILD_BUG_ON(REGOFFS(sp_abt) + 1 * sizeof(register_t) != | |
+ REGOFFS(lr_abt)); | |
+ BUILD_BUG_ON(REGOFFS(sp_und) + 1 * sizeof(register_t) != | |
+ REGOFFS(lr_und)); | |
+ switch (regs->cpsr & PSR_MODE_MASK) | |
{ | |
case PSR_MODE_USR: | |
case PSR_MODE_SYS: /* Sys regs are the usr regs */ | |
@@ -299,7 +306,7 @@ static const char *decode_fsc(uint32_t fsc, int *level) | |
{ | |
const char *msg = NULL; | |
- switch ( fsc & 0x3f ) | |
+ switch (fsc & 0x3f) | |
{ | |
case FSC_FLT_TRANS ... FSC_FLT_TRANS + 3: | |
msg = "Translation fault"; | |
@@ -354,13 +361,18 @@ static const char *decode_fsc(uint32_t fsc, int *level) | |
static const char *fsc_level_str(int level) | |
{ | |
- switch ( level ) | |
+ switch (level) | |
{ | |
- case -1: return ""; | |
- case 1: return " at level 1"; | |
- case 2: return " at level 2"; | |
- case 3: return " at level 3"; | |
- default: return " (level invalid)"; | |
+ case -1: | |
+ return ""; | |
+ case 1: | |
+ return " at level 1"; | |
+ case 2: | |
+ return " at level 2"; | |
+ case 3: | |
+ return " at level 3"; | |
+ default: | |
+ return " (level invalid)"; | |
} | |
} | |
@@ -371,11 +383,9 @@ void panic_PAR(uint64_t par) | |
int stage = par & PAR_STAGE2 ? 2 : 1; | |
int second_in_first = !!(par & PAR_STAGE21); | |
- msg = decode_fsc( (par&PAR_FSC_MASK) >> PAR_FSC_SHIFT, &level); | |
+ msg = decode_fsc((par & PAR_FSC_MASK) >> PAR_FSC_SHIFT, &level); | |
- printk("PAR: %016"PRIx64": %s stage %d%s%s\n", | |
- par, msg, | |
- stage, | |
+ printk("PAR: %016" PRIx64 ": %s stage %d%s%s\n", par, msg, stage, | |
second_in_first ? " during second stage lookup" : "", | |
fsc_level_str(level)); | |
@@ -386,7 +396,8 @@ static void cpsr_switch_mode(struct cpu_user_regs *regs, int mode) | |
{ | |
uint32_t sctlr = READ_SYSREG32(SCTLR_EL1); | |
- regs->cpsr &= ~(PSR_MODE_MASK|PSR_IT_MASK|PSR_JAZELLE|PSR_BIG_ENDIAN|PSR_THUMB); | |
+ regs->cpsr &= ~(PSR_MODE_MASK | PSR_IT_MASK | PSR_JAZELLE | | |
+ PSR_BIG_ENDIAN | PSR_THUMB); | |
regs->cpsr |= mode; | |
regs->cpsr |= PSR_IRQ_MASK; | |
@@ -420,7 +431,7 @@ static void inject_undef32_exception(struct cpu_user_regs *regs) | |
/* Saved PC points to the instruction past the faulting instruction. */ | |
uint32_t return_offset = is_thumb ? 2 : 4; | |
- BUG_ON( !is_32bit_domain(current->domain) ); | |
+ BUG_ON(!is_32bit_domain(current->domain)); | |
/* Update processor mode */ | |
cpsr_switch_mode(regs, PSR_MODE_UND); | |
@@ -438,8 +449,7 @@ static void inject_undef32_exception(struct cpu_user_regs *regs) | |
* adjustments). See TakePrefetchAbortException and | |
* TakeDataAbortException pseudocode in ARM ARM. | |
*/ | |
-static void inject_abt32_exception(struct cpu_user_regs *regs, | |
- int prefetch, | |
+static void inject_abt32_exception(struct cpu_user_regs *regs, int prefetch, | |
register_t addr) | |
{ | |
uint32_t spsr = regs->cpsr; | |
@@ -448,7 +458,7 @@ static void inject_abt32_exception(struct cpu_user_regs *regs, | |
uint32_t return_offset = is_thumb ? 4 : 0; | |
register_t fsr; | |
- BUG_ON( !is_32bit_domain(current->domain) ); | |
+ BUG_ON(!is_32bit_domain(current->domain)); | |
cpsr_switch_mode(regs, PSR_MODE_ABT); | |
@@ -518,7 +528,7 @@ static vaddr_t exception_handler64(struct cpu_user_regs *regs, vaddr_t offset) | |
if ( usr_mode(regs) ) | |
base += VECTOR64_LOWER32_BASE; | |
- else if ( psr_mode(regs->cpsr,PSR_MODE_EL0t) ) | |
+ else if ( psr_mode(regs->cpsr, PSR_MODE_EL0t) ) | |
base += VECTOR64_LOWER64_BASE; | |
else /* Otherwise must be from kernel mode */ | |
base += VECTOR64_CURRENT_SPx_BASE; | |
@@ -536,25 +546,23 @@ void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len) | |
.ec = HSR_EC_UNKNOWN, | |
}; | |
- BUG_ON( is_32bit_domain(current->domain) ); | |
+ BUG_ON(is_32bit_domain(current->domain)); | |
handler = exception_handler64(regs, VECTOR64_SYNC_OFFSET); | |
regs->spsr_el1 = regs->cpsr; | |
regs->elr_el1 = regs->pc; | |
- regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \ | |
- PSR_IRQ_MASK | PSR_DBG_MASK; | |
+ regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | PSR_IRQ_MASK | | |
+ PSR_DBG_MASK; | |
regs->pc = handler; | |
WRITE_SYSREG32(esr.bits, ESR_EL1); | |
} | |
/* Inject an abort exception into a 64 bit guest */ | |
-static void inject_abt64_exception(struct cpu_user_regs *regs, | |
- int prefetch, | |
- register_t addr, | |
- int instr_len) | |
+static void inject_abt64_exception(struct cpu_user_regs *regs, int prefetch, | |
+ register_t addr, int instr_len) | |
{ | |
vaddr_t handler; | |
union hsr esr = { | |
@@ -563,21 +571,21 @@ static void inject_abt64_exception(struct cpu_user_regs *regs, | |
}; | |
if ( psr_mode_is_user(regs) ) | |
- esr.ec = prefetch | |
- ? HSR_EC_INSTR_ABORT_LOWER_EL : HSR_EC_DATA_ABORT_LOWER_EL; | |
+ esr.ec = prefetch ? HSR_EC_INSTR_ABORT_LOWER_EL | |
+ : HSR_EC_DATA_ABORT_LOWER_EL; | |
else | |
- esr.ec = prefetch | |
- ? HSR_EC_INSTR_ABORT_CURR_EL : HSR_EC_DATA_ABORT_CURR_EL; | |
+ esr.ec = | |
+ prefetch ? HSR_EC_INSTR_ABORT_CURR_EL : HSR_EC_DATA_ABORT_CURR_EL; | |
- BUG_ON( is_32bit_domain(current->domain) ); | |
+ BUG_ON(is_32bit_domain(current->domain)); | |
handler = exception_handler64(regs, VECTOR64_SYNC_OFFSET); | |
regs->spsr_el1 = regs->cpsr; | |
regs->elr_el1 = regs->pc; | |
- regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \ | |
- PSR_IRQ_MASK | PSR_DBG_MASK; | |
+ regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | PSR_IRQ_MASK | | |
+ PSR_DBG_MASK; | |
regs->pc = handler; | |
WRITE_SYSREG(addr, FAR_EL1); | |
@@ -585,15 +593,13 @@ static void inject_abt64_exception(struct cpu_user_regs *regs, | |
} | |
static void inject_dabt64_exception(struct cpu_user_regs *regs, | |
- register_t addr, | |
- int instr_len) | |
+ register_t addr, int instr_len) | |
{ | |
inject_abt64_exception(regs, 0, addr, instr_len); | |
} | |
static void inject_iabt64_exception(struct cpu_user_regs *regs, | |
- register_t addr, | |
- int instr_len) | |
+ register_t addr, int instr_len) | |
{ | |
inject_abt64_exception(regs, 1, addr, instr_len); | |
} | |
@@ -602,49 +608,47 @@ static void inject_iabt64_exception(struct cpu_user_regs *regs, | |
void inject_undef_exception(struct cpu_user_regs *regs, const union hsr hsr) | |
{ | |
- if ( is_32bit_domain(current->domain) ) | |
- inject_undef32_exception(regs); | |
+ if ( is_32bit_domain(current->domain) ) | |
+ inject_undef32_exception(regs); | |
#ifdef CONFIG_ARM_64 | |
- else | |
- inject_undef64_exception(regs, hsr.len); | |
+ else | |
+ inject_undef64_exception(regs, hsr.len); | |
#endif | |
} | |
-static void inject_iabt_exception(struct cpu_user_regs *regs, | |
- register_t addr, | |
+static void inject_iabt_exception(struct cpu_user_regs *regs, register_t addr, | |
int instr_len) | |
{ | |
- if ( is_32bit_domain(current->domain) ) | |
- inject_pabt32_exception(regs, addr); | |
+ if ( is_32bit_domain(current->domain) ) | |
+ inject_pabt32_exception(regs, addr); | |
#ifdef CONFIG_ARM_64 | |
- else | |
- inject_iabt64_exception(regs, addr, instr_len); | |
+ else | |
+ inject_iabt64_exception(regs, addr, instr_len); | |
#endif | |
} | |
-static void inject_dabt_exception(struct cpu_user_regs *regs, | |
- register_t addr, | |
+static void inject_dabt_exception(struct cpu_user_regs *regs, register_t addr, | |
int instr_len) | |
{ | |
- if ( is_32bit_domain(current->domain) ) | |
- inject_dabt32_exception(regs, addr); | |
+ if ( is_32bit_domain(current->domain) ) | |
+ inject_dabt32_exception(regs, addr); | |
#ifdef CONFIG_ARM_64 | |
- else | |
- inject_dabt64_exception(regs, addr, instr_len); | |
+ else | |
+ inject_dabt64_exception(regs, addr, instr_len); | |
#endif | |
} | |
/* Inject a virtual Abort/SError into the guest. */ | |
static void inject_vabt_exception(struct cpu_user_regs *regs) | |
{ | |
- const union hsr hsr = { .bits = regs->hsr }; | |
+ const union hsr hsr = {.bits = regs->hsr}; | |
/* | |
* SVC/HVC/SMC already have an adjusted PC (See ARM ARM DDI 0487A.j | |
* D1.10.1 for more details), which we need to correct in order to | |
* return to after having injected the SError. | |
*/ | |
- switch ( hsr.ec ) | |
+ switch (hsr.ec) | |
{ | |
case HSR_EC_SVC32: | |
case HSR_EC_HVC32: | |
@@ -717,7 +721,8 @@ crash_system: | |
do_unexpected_trap("SError", regs); | |
} | |
-struct reg_ctxt { | |
+struct reg_ctxt | |
+{ | |
/* Guest-side state */ | |
uint32_t sctlr_el1; | |
register_t tcr_el1; | |
@@ -739,54 +744,55 @@ static const char *mode_string(uint32_t cpsr) | |
{ | |
uint32_t mode; | |
static const char *mode_strings[] = { | |
- [PSR_MODE_USR] = "32-bit Guest USR", | |
- [PSR_MODE_FIQ] = "32-bit Guest FIQ", | |
- [PSR_MODE_IRQ] = "32-bit Guest IRQ", | |
- [PSR_MODE_SVC] = "32-bit Guest SVC", | |
- [PSR_MODE_MON] = "32-bit Monitor", | |
- [PSR_MODE_ABT] = "32-bit Guest ABT", | |
- [PSR_MODE_HYP] = "Hypervisor", | |
- [PSR_MODE_UND] = "32-bit Guest UND", | |
- [PSR_MODE_SYS] = "32-bit Guest SYS", | |
+ [PSR_MODE_USR] = "32-bit Guest USR", | |
+ [PSR_MODE_FIQ] = "32-bit Guest FIQ", | |
+ [PSR_MODE_IRQ] = "32-bit Guest IRQ", | |
+ [PSR_MODE_SVC] = "32-bit Guest SVC", | |
+ [PSR_MODE_MON] = "32-bit Monitor", | |
+ [PSR_MODE_ABT] = "32-bit Guest ABT", | |
+ [PSR_MODE_HYP] = "Hypervisor", | |
+ [PSR_MODE_UND] = "32-bit Guest UND", | |
+ [PSR_MODE_SYS] = "32-bit Guest SYS", | |
#ifdef CONFIG_ARM_64 | |
- [PSR_MODE_EL3h] = "64-bit EL3h (Monitor, handler)", | |
- [PSR_MODE_EL3t] = "64-bit EL3t (Monitor, thread)", | |
- [PSR_MODE_EL2h] = "64-bit EL2h (Hypervisor, handler)", | |
- [PSR_MODE_EL2t] = "64-bit EL2t (Hypervisor, thread)", | |
- [PSR_MODE_EL1h] = "64-bit EL1h (Guest Kernel, handler)", | |
- [PSR_MODE_EL1t] = "64-bit EL1t (Guest Kernel, thread)", | |
- [PSR_MODE_EL0t] = "64-bit EL0t (Guest User)", | |
+ [PSR_MODE_EL3h] = "64-bit EL3h (Monitor, handler)", | |
+ [PSR_MODE_EL3t] = "64-bit EL3t (Monitor, thread)", | |
+ [PSR_MODE_EL2h] = "64-bit EL2h (Hypervisor, handler)", | |
+ [PSR_MODE_EL2t] = "64-bit EL2t (Hypervisor, thread)", | |
+ [PSR_MODE_EL1h] = "64-bit EL1h (Guest Kernel, handler)", | |
+ [PSR_MODE_EL1t] = "64-bit EL1t (Guest Kernel, thread)", | |
+ [PSR_MODE_EL0t] = "64-bit EL0t (Guest User)", | |
#endif | |
}; | |
mode = cpsr & PSR_MODE_MASK; | |
if ( mode >= ARRAY_SIZE(mode_strings) ) | |
return "Unknown"; | |
- return mode_strings[mode] ? : "Unknown"; | |
+ return mode_strings[mode] ?: "Unknown"; | |
} | |
static void show_registers_32(const struct cpu_user_regs *regs, | |
- const struct reg_ctxt *ctxt, | |
- bool guest_mode, | |
+ const struct reg_ctxt *ctxt, bool guest_mode, | |
const struct vcpu *v) | |
{ | |
- | |
#ifdef CONFIG_ARM_64 | |
- BUG_ON( ! (regs->cpsr & PSR_MODE_BIT) ); | |
- printk("PC: %08"PRIx32"\n", regs->pc32); | |
+ BUG_ON(!(regs->cpsr & PSR_MODE_BIT)); | |
+ printk("PC: %08" PRIx32 "\n", regs->pc32); | |
#else | |
- printk("PC: %08"PRIx32, regs->pc); | |
+ printk("PC: %08" PRIx32, regs->pc); | |
if ( !guest_mode ) | |
printk(" %pS", _p(regs->pc)); | |
printk("\n"); | |
#endif | |
- printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr, | |
+ printk("CPSR: %08" PRIx32 " MODE:%s\n", regs->cpsr, | |
mode_string(regs->cpsr)); | |
- printk(" R0: %08"PRIx32" R1: %08"PRIx32" R2: %08"PRIx32" R3: %08"PRIx32"\n", | |
+ printk(" R0: %08" PRIx32 " R1: %08" PRIx32 " R2: %08" PRIx32 | |
+ " R3: %08" PRIx32 "\n", | |
regs->r0, regs->r1, regs->r2, regs->r3); | |
- printk(" R4: %08"PRIx32" R5: %08"PRIx32" R6: %08"PRIx32" R7: %08"PRIx32"\n", | |
+ printk(" R4: %08" PRIx32 " R5: %08" PRIx32 " R6: %08" PRIx32 | |
+ " R7: %08" PRIx32 "\n", | |
regs->r4, regs->r5, regs->r6, regs->r7); | |
- printk(" R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n", | |
+ printk(" R8: %08" PRIx32 " R9: %08" PRIx32 " R10:%08" PRIx32 | |
+ " R11:%08" PRIx32 " R12:%08" PRIx32 "\n", | |
regs->r8, regs->r9, regs->r10, | |
#ifdef CONFIG_ARM_64 | |
regs->r11, | |
@@ -797,115 +803,113 @@ static void show_registers_32(const struct cpu_user_regs *regs, | |
if ( guest_mode ) | |
{ | |
- printk("USR: SP: %08"PRIx32" LR: %08"PRIregister"\n", | |
- regs->sp_usr, regs->lr); | |
- printk("SVC: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", | |
+ printk("USR: SP: %08" PRIx32 " LR: %08" PRIregister "\n", regs->sp_usr, | |
+ regs->lr); | |
+ printk("SVC: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", | |
regs->sp_svc, regs->lr_svc, regs->spsr_svc); | |
- printk("ABT: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", | |
+ printk("ABT: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", | |
regs->sp_abt, regs->lr_abt, regs->spsr_abt); | |
- printk("UND: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", | |
+ printk("UND: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", | |
regs->sp_und, regs->lr_und, regs->spsr_und); | |
- printk("IRQ: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", | |
+ printk("IRQ: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", | |
regs->sp_irq, regs->lr_irq, regs->spsr_irq); | |
- printk("FIQ: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", | |
+ printk("FIQ: SP: %08" PRIx32 " LR: %08" PRIx32 " SPSR:%08" PRIx32 "\n", | |
regs->sp_fiq, regs->lr_fiq, regs->spsr_fiq); | |
- printk("FIQ: R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n", | |
- regs->r8_fiq, regs->r9_fiq, regs->r10_fiq, regs->r11_fiq, regs->r11_fiq); | |
+ printk("FIQ: R8: %08" PRIx32 " R9: %08" PRIx32 " R10:%08" PRIx32 | |
+ " R11:%08" PRIx32 " R12:%08" PRIx32 "\n", | |
+ regs->r8_fiq, regs->r9_fiq, regs->r10_fiq, regs->r11_fiq, | |
+ regs->r11_fiq); | |
} | |
#ifndef CONFIG_ARM_64 | |
else | |
{ | |
- printk("HYP: SP: %08"PRIx32" LR: %08"PRIregister"\n", regs->sp, regs->lr); | |
+ printk("HYP: SP: %08" PRIx32 " LR: %08" PRIregister "\n", regs->sp, | |
+ regs->lr); | |
} | |
#endif | |
printk("\n"); | |
if ( guest_mode ) | |
{ | |
- printk(" SCTLR: %08"PRIx32"\n", ctxt->sctlr_el1); | |
- printk(" TCR: %08"PRIregister"\n", ctxt->tcr_el1); | |
- printk(" TTBR0: %016"PRIx64"\n", ctxt->ttbr0_el1); | |
- printk(" TTBR1: %016"PRIx64"\n", ctxt->ttbr1_el1); | |
- printk(" IFAR: %08"PRIx32", IFSR: %08"PRIx32"\n" | |
- " DFAR: %08"PRIx32", DFSR: %08"PRIx32"\n", | |
+ printk(" SCTLR: %08" PRIx32 "\n", ctxt->sctlr_el1); | |
+ printk(" TCR: %08" PRIregister "\n", ctxt->tcr_el1); | |
+ printk(" TTBR0: %016" PRIx64 "\n", ctxt->ttbr0_el1); | |
+ printk(" TTBR1: %016" PRIx64 "\n", ctxt->ttbr1_el1); | |
+ printk(" IFAR: %08" PRIx32 ", IFSR: %08" PRIx32 "\n" | |
+ " DFAR: %08" PRIx32 ", DFSR: %08" PRIx32 "\n", | |
#ifdef CONFIG_ARM_64 | |
- (uint32_t)(ctxt->far >> 32), | |
- ctxt->ifsr32_el2, | |
- (uint32_t)(ctxt->far & 0xffffffff), | |
- ctxt->esr_el1 | |
+ (uint32_t)(ctxt->far >> 32), ctxt->ifsr32_el2, | |
+ (uint32_t)(ctxt->far & 0xffffffff), ctxt->esr_el1 | |
#else | |
ctxt->ifar, ctxt->ifsr, ctxt->dfar, ctxt->dfsr | |
#endif | |
- ); | |
+ ); | |
printk("\n"); | |
} | |
} | |
#ifdef CONFIG_ARM_64 | |
static void show_registers_64(const struct cpu_user_regs *regs, | |
- const struct reg_ctxt *ctxt, | |
- bool guest_mode, | |
+ const struct reg_ctxt *ctxt, bool guest_mode, | |
const struct vcpu *v) | |
{ | |
+ BUG_ON((regs->cpsr & PSR_MODE_BIT)); | |
- BUG_ON( (regs->cpsr & PSR_MODE_BIT) ); | |
- | |
- printk("PC: %016"PRIx64, regs->pc); | |
+ printk("PC: %016" PRIx64, regs->pc); | |
if ( !guest_mode ) | |
printk(" %pS", _p(regs->pc)); | |
printk("\n"); | |
- printk("LR: %016"PRIx64"\n", regs->lr); | |
+ printk("LR: %016" PRIx64 "\n", regs->lr); | |
if ( guest_mode ) | |
{ | |
- printk("SP_EL0: %016"PRIx64"\n", regs->sp_el0); | |
- printk("SP_EL1: %016"PRIx64"\n", regs->sp_el1); | |
+ printk("SP_EL0: %016" PRIx64 "\n", regs->sp_el0); | |
+ printk("SP_EL1: %016" PRIx64 "\n", regs->sp_el1); | |
} | |
else | |
{ | |
- printk("SP: %016"PRIx64"\n", regs->sp); | |
+ printk("SP: %016" PRIx64 "\n", regs->sp); | |
} | |
- printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr, | |
+ printk("CPSR: %08" PRIx32 " MODE:%s\n", regs->cpsr, | |
mode_string(regs->cpsr)); | |
- printk(" X0: %016"PRIx64" X1: %016"PRIx64" X2: %016"PRIx64"\n", | |
+ printk(" X0: %016" PRIx64 " X1: %016" PRIx64 " X2: %016" PRIx64 "\n", | |
regs->x0, regs->x1, regs->x2); | |
- printk(" X3: %016"PRIx64" X4: %016"PRIx64" X5: %016"PRIx64"\n", | |
+ printk(" X3: %016" PRIx64 " X4: %016" PRIx64 " X5: %016" PRIx64 "\n", | |
regs->x3, regs->x4, regs->x5); | |
- printk(" X6: %016"PRIx64" X7: %016"PRIx64" X8: %016"PRIx64"\n", | |
+ printk(" X6: %016" PRIx64 " X7: %016" PRIx64 " X8: %016" PRIx64 "\n", | |
regs->x6, regs->x7, regs->x8); | |
- printk(" X9: %016"PRIx64" X10: %016"PRIx64" X11: %016"PRIx64"\n", | |
+ printk(" X9: %016" PRIx64 " X10: %016" PRIx64 " X11: %016" PRIx64 "\n", | |
regs->x9, regs->x10, regs->x11); | |
- printk(" X12: %016"PRIx64" X13: %016"PRIx64" X14: %016"PRIx64"\n", | |
+ printk(" X12: %016" PRIx64 " X13: %016" PRIx64 " X14: %016" PRIx64 "\n", | |
regs->x12, regs->x13, regs->x14); | |
- printk(" X15: %016"PRIx64" X16: %016"PRIx64" X17: %016"PRIx64"\n", | |
+ printk(" X15: %016" PRIx64 " X16: %016" PRIx64 " X17: %016" PRIx64 "\n", | |
regs->x15, regs->x16, regs->x17); | |
- printk(" X18: %016"PRIx64" X19: %016"PRIx64" X20: %016"PRIx64"\n", | |
+ printk(" X18: %016" PRIx64 " X19: %016" PRIx64 " X20: %016" PRIx64 "\n", | |
regs->x18, regs->x19, regs->x20); | |
- printk(" X21: %016"PRIx64" X22: %016"PRIx64" X23: %016"PRIx64"\n", | |
+ printk(" X21: %016" PRIx64 " X22: %016" PRIx64 " X23: %016" PRIx64 "\n", | |
regs->x21, regs->x22, regs->x23); | |
- printk(" X24: %016"PRIx64" X25: %016"PRIx64" X26: %016"PRIx64"\n", | |
+ printk(" X24: %016" PRIx64 " X25: %016" PRIx64 " X26: %016" PRIx64 "\n", | |
regs->x24, regs->x25, regs->x26); | |
- printk(" X27: %016"PRIx64" X28: %016"PRIx64" FP: %016"PRIx64"\n", | |
+ printk(" X27: %016" PRIx64 " X28: %016" PRIx64 " FP: %016" PRIx64 "\n", | |
regs->x27, regs->x28, regs->fp); | |
printk("\n"); | |
if ( guest_mode ) | |
{ | |
- printk(" ELR_EL1: %016"PRIx64"\n", regs->elr_el1); | |
- printk(" ESR_EL1: %08"PRIx32"\n", ctxt->esr_el1); | |
- printk(" FAR_EL1: %016"PRIx64"\n", ctxt->far); | |
+ printk(" ELR_EL1: %016" PRIx64 "\n", regs->elr_el1); | |
+ printk(" ESR_EL1: %08" PRIx32 "\n", ctxt->esr_el1); | |
+ printk(" FAR_EL1: %016" PRIx64 "\n", ctxt->far); | |
printk("\n"); | |
- printk(" SCTLR_EL1: %08"PRIx32"\n", ctxt->sctlr_el1); | |
- printk(" TCR_EL1: %08"PRIregister"\n", ctxt->tcr_el1); | |
- printk(" TTBR0_EL1: %016"PRIx64"\n", ctxt->ttbr0_el1); | |
- printk(" TTBR1_EL1: %016"PRIx64"\n", ctxt->ttbr1_el1); | |
+ printk(" SCTLR_EL1: %08" PRIx32 "\n", ctxt->sctlr_el1); | |
+ printk(" TCR_EL1: %08" PRIregister "\n", ctxt->tcr_el1); | |
+ printk(" TTBR0_EL1: %016" PRIx64 "\n", ctxt->ttbr0_el1); | |
+ printk(" TTBR1_EL1: %016" PRIx64 "\n", ctxt->ttbr1_el1); | |
printk("\n"); | |
} | |
} | |
#endif | |
static void _show_registers(const struct cpu_user_regs *regs, | |
- const struct reg_ctxt *ctxt, | |
- bool guest_mode, | |
+ const struct reg_ctxt *ctxt, bool guest_mode, | |
const struct vcpu *v) | |
{ | |
print_xen_info(); | |
@@ -939,22 +943,22 @@ static void _show_registers(const struct cpu_user_regs *regs, | |
show_registers_32(regs, ctxt, guest_mode, v); | |
#endif | |
} | |
- printk(" VTCR_EL2: %08"PRIx32"\n", READ_SYSREG32(VTCR_EL2)); | |
- printk(" VTTBR_EL2: %016"PRIx64"\n", ctxt->vttbr_el2); | |
+ printk(" VTCR_EL2: %08" PRIx32 "\n", READ_SYSREG32(VTCR_EL2)); | |
+ printk(" VTTBR_EL2: %016" PRIx64 "\n", ctxt->vttbr_el2); | |
printk("\n"); | |
- printk(" SCTLR_EL2: %08"PRIx32"\n", READ_SYSREG32(SCTLR_EL2)); | |
- printk(" HCR_EL2: %016"PRIregister"\n", READ_SYSREG(HCR_EL2)); | |
- printk(" TTBR0_EL2: %016"PRIx64"\n", READ_SYSREG64(TTBR0_EL2)); | |
+ printk(" SCTLR_EL2: %08" PRIx32 "\n", READ_SYSREG32(SCTLR_EL2)); | |
+ printk(" HCR_EL2: %016" PRIregister "\n", READ_SYSREG(HCR_EL2)); | |
+ printk(" TTBR0_EL2: %016" PRIx64 "\n", READ_SYSREG64(TTBR0_EL2)); | |
printk("\n"); | |
- printk(" ESR_EL2: %08"PRIx32"\n", regs->hsr); | |
- printk(" HPFAR_EL2: %016"PRIregister"\n", READ_SYSREG(HPFAR_EL2)); | |
+ printk(" ESR_EL2: %08" PRIx32 "\n", regs->hsr); | |
+ printk(" HPFAR_EL2: %016" PRIregister "\n", READ_SYSREG(HPFAR_EL2)); | |
#ifdef CONFIG_ARM_32 | |
- printk(" HDFAR: %08"PRIx32"\n", READ_CP32(HDFAR)); | |
- printk(" HIFAR: %08"PRIx32"\n", READ_CP32(HIFAR)); | |
+ printk(" HDFAR: %08" PRIx32 "\n", READ_CP32(HDFAR)); | |
+ printk(" HIFAR: %08" PRIx32 "\n", READ_CP32(HIFAR)); | |
#else | |
- printk(" FAR_EL2: %016"PRIx64"\n", READ_SYSREG64(FAR_EL2)); | |
+ printk(" FAR_EL2: %016" PRIx64 "\n", READ_SYSREG64(FAR_EL2)); | |
#endif | |
printk("\n"); | |
} | |
@@ -1019,7 +1023,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) | |
return; | |
} | |
- switch ( regs->cpsr & PSR_MODE_MASK ) | |
+ switch (regs->cpsr & PSR_MODE_MASK) | |
{ | |
case PSR_MODE_USR: | |
case PSR_MODE_SYS: | |
@@ -1067,9 +1071,9 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) | |
return; | |
} | |
- printk("Guest stack trace from sp=%"PRIvaddr":\n ", sp); | |
+ printk("Guest stack trace from sp=%" PRIvaddr ":\n ", sp); | |
- if ( sp & ( sizeof(long) - 1 ) ) | |
+ if ( sp & (sizeof(long) - 1) ) | |
{ | |
printk("Stack is misaligned\n"); | |
return; | |
@@ -1086,7 +1090,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) | |
stack = mapped + (sp & ~PAGE_MASK); | |
- for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) | |
+ for ( i = 0; i < (debug_stack_lines * stack_words_per_line); i++ ) | |
{ | |
if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & PAGE_SIZE ) | |
break; | |
@@ -1103,7 +1107,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) | |
put_page(page); | |
} | |
-#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp) | |
+#define STACK_BEFORE_EXCEPTION(regs) ((register_t *)(regs)->sp) | |
#ifdef CONFIG_ARM_32 | |
/* Frame pointer points to the return address: | |
* (largest address) | |
@@ -1120,7 +1124,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) | |
* | | |
* v (smallest address, sp) | |
*/ | |
-#define STACK_FRAME_BASE(fp) ((register_t*)(fp) - 1) | |
+#define STACK_FRAME_BASE(fp) ((register_t *)(fp)-1) | |
#else | |
/* Frame pointer points to the next frame: | |
* (largest address) | |
@@ -1137,7 +1141,7 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs) | |
* | | |
* v (smallest address, sp) | |
*/ | |
-#define STACK_FRAME_BASE(fp) ((register_t*)(fp)) | |
+#define STACK_FRAME_BASE(fp) ((register_t *)(fp)) | |
#endif | |
static void show_trace(const struct cpu_user_regs *regs) | |
{ | |
@@ -1149,22 +1153,21 @@ static void show_trace(const struct cpu_user_regs *regs) | |
printk(" [<%p>] %pS (LR)\n", _p(regs->lr), _p(regs->lr)); | |
/* Bounds for range of valid frame pointer. */ | |
- low = (register_t)(STACK_BEFORE_EXCEPTION(regs)); | |
- high = (low & ~(STACK_SIZE - 1)) + | |
- (STACK_SIZE - sizeof(struct cpu_info)); | |
+ low = (register_t)(STACK_BEFORE_EXCEPTION(regs)); | |
+ high = (low & ~(STACK_SIZE - 1)) + (STACK_SIZE - sizeof(struct cpu_info)); | |
/* The initial frame pointer. */ | |
next = regs->fp; | |
- for ( ; ; ) | |
+ for ( ;; ) | |
{ | |
if ( (next < low) || (next >= high) ) | |
break; | |
/* Ordinary stack frame. */ | |
frame = STACK_FRAME_BASE(next); | |
- next = frame[0]; | |
- addr = frame[1]; | |
+ next = frame[0]; | |
+ addr = frame[1]; | |
printk(" [<%p>] %pS\n", _p(addr), _p(addr)); | |
@@ -1184,9 +1187,9 @@ void show_stack(const struct cpu_user_regs *regs) | |
printk("Xen stack trace from sp=%p:\n ", stack); | |
- for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) | |
+ for ( i = 0; i < (debug_stack_lines * stack_words_per_line); i++ ) | |
{ | |
- if ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) == 0 ) | |
+ if ( ((long)stack & (STACK_SIZE - BYTES_PER_LONG)) == 0 ) | |
break; | |
if ( (i != 0) && ((i % stack_words_per_line) == 0) ) | |
printk("\n "); | |
@@ -1209,8 +1212,8 @@ void show_execution_state(const struct cpu_user_regs *regs) | |
void vcpu_show_execution_state(struct vcpu *v) | |
{ | |
- printk("*** Dumping Dom%d vcpu#%d state: ***\n", | |
- v->domain->domain_id, v->vcpu_id); | |
+ printk("*** Dumping Dom%d vcpu#%d state: ***\n", v->domain->domain_id, | |
+ v->vcpu_id); | |
if ( v == current ) | |
{ | |
@@ -1261,7 +1264,7 @@ int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc) | |
} | |
} | |
} | |
- found: | |
+found: | |
if ( !bug ) | |
return -ENOENT; | |
@@ -1277,7 +1280,7 @@ int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc) | |
} | |
lineno = bug_line(bug); | |
- switch ( id ) | |
+ switch (id) | |
{ | |
case BUGFRAME_warn: | |
printk("Xen WARN at %s%s:%d\n", prefix, filename, lineno); | |
@@ -1299,13 +1302,13 @@ int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc) | |
if ( !is_kernel(predicate) ) | |
predicate = "<unknown>"; | |
- printk("Assertion '%s' failed at %s%s:%d\n", | |
- predicate, prefix, filename, lineno); | |
+ printk("Assertion '%s' failed at %s%s:%d\n", predicate, prefix, | |
+ filename, lineno); | |
if ( debugger_trap_fatal(TRAP_invalid_op, regs) ) | |
return 0; | |
show_execution_state(regs); | |
- panic("Assertion '%s' failed at %s%s:%d\n", | |
- predicate, prefix, filename, lineno); | |
+ panic("Assertion '%s' failed at %s%s:%d\n", predicate, prefix, | |
+ filename, lineno); | |
} | |
return -EINVAL; | |
@@ -1319,7 +1322,7 @@ static void do_trap_brk(struct cpu_user_regs *regs, const union hsr hsr) | |
*/ | |
BUG_ON(!hyp_mode(regs)); | |
- switch ( hsr.brk.comment ) | |
+ switch (hsr.brk.comment) | |
{ | |
case BRK_BUG_FRAME_IMM: | |
if ( do_bug_frame(regs, regs->pc) ) | |
@@ -1330,7 +1333,7 @@ static void do_trap_brk(struct cpu_user_regs *regs, const union hsr hsr) | |
break; | |
default: | |
-die: | |
+ die: | |
do_unexpected_trap("Undefined Breakpoint Value", regs); | |
} | |
} | |
@@ -1341,45 +1344,44 @@ static register_t do_deprecated_hypercall(void) | |
struct cpu_user_regs *regs = guest_cpu_user_regs(); | |
const register_t op = | |
#ifdef CONFIG_ARM_64 | |
- !is_32bit_domain(current->domain) ? | |
- regs->x16 | |
- : | |
+ !is_32bit_domain(current->domain) ? regs->x16 : | |
#endif | |
- regs->r12; | |
+ regs->r12; | |
- gdprintk(XENLOG_DEBUG, "%pv: deprecated hypercall %lu\n", | |
- current, (unsigned long)op); | |
+ gdprintk(XENLOG_DEBUG, "%pv: deprecated hypercall %lu\n", current, | |
+ (unsigned long)op); | |
return -ENOSYS; | |
} | |
-typedef register_t (*arm_hypercall_fn_t)( | |
- register_t, register_t, register_t, register_t, register_t); | |
+typedef register_t (*arm_hypercall_fn_t)(register_t, register_t, register_t, | |
+ register_t, register_t); | |
-typedef struct { | |
+typedef struct | |
+{ | |
arm_hypercall_fn_t fn; | |
int nr_args; | |
} arm_hypercall_t; | |
-#define HYPERCALL(_name, _nr_args) \ | |
- [ __HYPERVISOR_ ## _name ] = { \ | |
- .fn = (arm_hypercall_fn_t) &do_ ## _name, \ | |
- .nr_args = _nr_args, \ | |
+#define HYPERCALL(_name, _nr_args) \ | |
+ [__HYPERVISOR_##_name] = { \ | |
+ .fn = (arm_hypercall_fn_t)&do_##_name, \ | |
+ .nr_args = _nr_args, \ | |
} | |
-#define HYPERCALL_ARM(_name, _nr_args) \ | |
- [ __HYPERVISOR_ ## _name ] = { \ | |
- .fn = (arm_hypercall_fn_t) &do_arm_ ## _name, \ | |
- .nr_args = _nr_args, \ | |
+#define HYPERCALL_ARM(_name, _nr_args) \ | |
+ [__HYPERVISOR_##_name] = { \ | |
+ .fn = (arm_hypercall_fn_t)&do_arm_##_name, \ | |
+ .nr_args = _nr_args, \ | |
} | |
/* | |
* Only use this for hypercalls which were deprecated (i.e. replaced | |
* by something else) before Xen on ARM was created, i.e. *not* for | |
* hypercalls which are simply not yet used on ARM. | |
*/ | |
-#define HYPERCALL_DEPRECATED(_name, _nr_args) \ | |
- [ __HYPERVISOR_##_name ] = { \ | |
- .fn = (arm_hypercall_fn_t) &do_deprecated_hypercall, \ | |
- .nr_args = _nr_args, \ | |
+#define HYPERCALL_DEPRECATED(_name, _nr_args) \ | |
+ [__HYPERVISOR_##_name] = { \ | |
+ .fn = (arm_hypercall_fn_t)&do_deprecated_hypercall, \ | |
+ .nr_args = _nr_args, \ | |
} | |
static arm_hypercall_t arm_hypercall_table[] = { | |
@@ -1414,15 +1416,15 @@ static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) | |
uint32_t reg; | |
uint32_t domid = current->domain->domain_id; | |
- switch ( code ) | |
+ switch (code) | |
{ | |
case 0xe0 ... 0xef: | |
reg = code - 0xe0; | |
- printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n", | |
- domid, reg, get_user_reg(regs, reg), regs->pc); | |
+ printk("DOM%d: R%d = 0x%" PRIregister " at 0x%" PRIvaddr "\n", domid, | |
+ reg, get_user_reg(regs, reg), regs->pc); | |
break; | |
case 0xfd: | |
- printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc); | |
+ printk("DOM%d: Reached %" PRIvaddr "\n", domid, regs->pc); | |
break; | |
case 0xfe: | |
printk("%c", (char)(get_user_reg(regs, 0) & 0xff)); | |
@@ -1461,7 +1463,7 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, register_t *nr, | |
{ | |
arm_hypercall_fn_t call = NULL; | |
- BUILD_BUG_ON(NR_hypercalls < ARRAY_SIZE(arm_hypercall_table) ); | |
+ BUILD_BUG_ON(NR_hypercalls < ARRAY_SIZE(arm_hypercall_table)); | |
if ( hsr.iss != XEN_HYPERCALL_TAG ) | |
{ | |
@@ -1492,14 +1494,20 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, register_t *nr, | |
if ( !current->hcall_preempted ) | |
{ | |
/* Deliberately corrupt parameter regs used by this hypercall. */ | |
- switch ( arm_hypercall_table[*nr].nr_args ) { | |
- case 5: HYPERCALL_ARG5(regs) = 0xDEADBEEF; | |
- case 4: HYPERCALL_ARG4(regs) = 0xDEADBEEF; | |
- case 3: HYPERCALL_ARG3(regs) = 0xDEADBEEF; | |
- case 2: HYPERCALL_ARG2(regs) = 0xDEADBEEF; | |
+ switch (arm_hypercall_table[*nr].nr_args) | |
+ { | |
+ case 5: | |
+ HYPERCALL_ARG5(regs) = 0xDEADBEEF; | |
+ case 4: | |
+ HYPERCALL_ARG4(regs) = 0xDEADBEEF; | |
+ case 3: | |
+ HYPERCALL_ARG3(regs) = 0xDEADBEEF; | |
+ case 2: | |
+ HYPERCALL_ARG2(regs) = 0xDEADBEEF; | |
case 1: /* Don't clobber x0/r0 -- it's the return value */ | |
break; | |
- default: BUG(); | |
+ default: | |
+ BUG(); | |
} | |
*nr = 0xDEADBEEF; | |
} | |
@@ -1507,7 +1515,7 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, register_t *nr, | |
/* Ensure the hypercall trap instruction is re-executed. */ | |
if ( current->hcall_preempted ) | |
- regs->pc -= 4; /* re-execute 'hvc #XEN_HYPERCALL_TAG' */ | |
+ regs->pc -= 4; /* re-execute 'hvc #XEN_HYPERCALL_TAG' */ | |
} | |
static bool check_multicall_32bit_clean(struct multicall_entry *multi) | |
@@ -1518,7 +1526,8 @@ static bool check_multicall_32bit_clean(struct multicall_entry *multi) | |
{ | |
if ( unlikely(multi->args[i] & 0xffffffff00000000ULL) ) | |
{ | |
- printk("%pv: multicall argument %d is not 32-bit clean %"PRIx64"\n", | |
+ printk("%pv: multicall argument %d is not 32-bit clean %" PRIx64 | |
+ "\n", | |
current, i, multi->args[i]); | |
domain_crash(current->domain); | |
return false; | |
@@ -1550,12 +1559,11 @@ enum mc_disposition arch_do_multicall_call(struct mc_state *state) | |
!check_multicall_32bit_clean(multi) ) | |
return mc_continue; | |
- multi->result = call(multi->args[0], multi->args[1], | |
- multi->args[2], multi->args[3], | |
- multi->args[4]); | |
+ multi->result = call(multi->args[0], multi->args[1], multi->args[2], | |
+ multi->args[3], multi->args[4]); | |
- return likely(!psr_mode_is_user(guest_cpu_user_regs())) | |
- ? mc_continue : mc_preempt; | |
+ return likely(!psr_mode_is_user(guest_cpu_user_regs())) ? mc_continue | |
+ : mc_preempt; | |
} | |
/* | |
@@ -1567,22 +1575,22 @@ enum mc_disposition arch_do_multicall_call(struct mc_state *state) | |
* bit position in short is condition code: NZCV | |
*/ | |
static const unsigned short cc_map[16] = { | |
- 0xF0F0, /* EQ == Z set */ | |
- 0x0F0F, /* NE */ | |
- 0xCCCC, /* CS == C set */ | |
- 0x3333, /* CC */ | |
- 0xFF00, /* MI == N set */ | |
- 0x00FF, /* PL */ | |
- 0xAAAA, /* VS == V set */ | |
- 0x5555, /* VC */ | |
- 0x0C0C, /* HI == C set && Z clear */ | |
- 0xF3F3, /* LS == C clear || Z set */ | |
- 0xAA55, /* GE == (N==V) */ | |
- 0x55AA, /* LT == (N!=V) */ | |
- 0x0A05, /* GT == (!Z && (N==V)) */ | |
- 0xF5FA, /* LE == (Z || (N!=V)) */ | |
- 0xFFFF, /* AL always */ | |
- 0 /* NV */ | |
+ 0xF0F0, /* EQ == Z set */ | |
+ 0x0F0F, /* NE */ | |
+ 0xCCCC, /* CS == C set */ | |
+ 0x3333, /* CC */ | |
+ 0xFF00, /* MI == N set */ | |
+ 0x00FF, /* PL */ | |
+ 0xAAAA, /* VS == V set */ | |
+ 0x5555, /* VC */ | |
+ 0x0C0C, /* HI == C set && Z clear */ | |
+ 0xF3F3, /* LS == C clear || Z set */ | |
+ 0xAA55, /* GE == (N==V) */ | |
+ 0x55AA, /* LT == (N!=V) */ | |
+ 0x0A05, /* GT == (!Z && (N==V)) */ | |
+ 0xF5FA, /* LE == (Z || (N!=V)) */ | |
+ 0xFFFF, /* AL always */ | |
+ 0 /* NV */ | |
}; | |
int check_conditional_instr(struct cpu_user_regs *regs, const union hsr hsr) | |
@@ -1625,16 +1633,16 @@ int check_conditional_instr(struct cpu_user_regs *regs, const union hsr hsr) | |
{ | |
unsigned long it; | |
- BUG_ON( !psr_mode_is_32bit(regs->cpsr) || !(cpsr&PSR_THUMB) ); | |
+ BUG_ON(!psr_mode_is_32bit(regs->cpsr) || !(cpsr & PSR_THUMB)); | |
- it = ( (cpsr >> (10-2)) & 0xfc) | ((cpsr >> 25) & 0x3 ); | |
+ it = ((cpsr >> (10 - 2)) & 0xfc) | ((cpsr >> 25) & 0x3); | |
/* it == 0 => unconditional. */ | |
if ( it == 0 ) | |
return 1; | |
/* The cond for this instruction works out as the top 4 bits. */ | |
- cond = ( it >> 4 ); | |
+ cond = (it >> 4); | |
} | |
cpsr_cond = cpsr >> 28; | |
@@ -1685,11 +1693,8 @@ void advance_pc(struct cpu_user_regs *regs, const union hsr hsr) | |
} | |
/* Read as zero and write ignore */ | |
-void handle_raz_wi(struct cpu_user_regs *regs, | |
- int regidx, | |
- bool read, | |
- const union hsr hsr, | |
- int min_el) | |
+void handle_raz_wi(struct cpu_user_regs *regs, int regidx, bool read, | |
+ const union hsr hsr, int min_el) | |
{ | |
ASSERT((min_el == 0) || (min_el == 1)); | |
@@ -1704,11 +1709,8 @@ void handle_raz_wi(struct cpu_user_regs *regs, | |
} | |
/* write only as write ignore */ | |
-void handle_wo_wi(struct cpu_user_regs *regs, | |
- int regidx, | |
- bool read, | |
- const union hsr hsr, | |
- int min_el) | |
+void handle_wo_wi(struct cpu_user_regs *regs, int regidx, bool read, | |
+ const union hsr hsr, int min_el) | |
{ | |
ASSERT((min_el == 0) || (min_el == 1)); | |
@@ -1723,12 +1725,8 @@ void handle_wo_wi(struct cpu_user_regs *regs, | |
} | |
/* Read only as value provided with 'val' argument of this function */ | |
-void handle_ro_read_val(struct cpu_user_regs *regs, | |
- int regidx, | |
- bool read, | |
- const union hsr hsr, | |
- int min_el, | |
- register_t val) | |
+void handle_ro_read_val(struct cpu_user_regs *regs, int regidx, bool read, | |
+ const union hsr hsr, int min_el, register_t val) | |
{ | |
ASSERT((min_el == 0) || (min_el == 1)); | |
@@ -1744,11 +1742,8 @@ void handle_ro_read_val(struct cpu_user_regs *regs, | |
} | |
/* Read only as read as zero */ | |
-inline void handle_ro_raz(struct cpu_user_regs *regs, | |
- int regidx, | |
- bool read, | |
- const union hsr hsr, | |
- int min_el) | |
+inline void handle_ro_raz(struct cpu_user_regs *regs, int regidx, bool read, | |
+ const union hsr hsr, int min_el) | |
{ | |
handle_ro_read_val(regs, regidx, read, hsr, min_el, 0); | |
} | |
@@ -1763,10 +1758,10 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr) | |
mfn = gfn_to_mfn(d, gaddr_to_gfn(ttbr0)); | |
- printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr); | |
- printk(" TTBCR: 0x%08"PRIregister"\n", ttbcr); | |
- printk(" TTBR0: 0x%016"PRIx64" = 0x%"PRIpaddr"\n", | |
- ttbr0, mfn_to_maddr(mfn)); | |
+ printk("dom%d VA 0x%08" PRIvaddr "\n", d->domain_id, addr); | |
+ printk(" TTBCR: 0x%08" PRIregister "\n", ttbcr); | |
+ printk(" TTBR0: 0x%016" PRIx64 " = 0x%" PRIpaddr "\n", ttbr0, | |
+ mfn_to_maddr(mfn)); | |
if ( ttbcr & TTBCR_EAE ) | |
{ | |
@@ -1786,11 +1781,10 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr) | |
} | |
first = map_domain_page(mfn); | |
- offset = addr >> (12+8); | |
- printk("1ST[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n", | |
- offset, mfn_to_maddr(mfn), first[offset]); | |
- if ( !(first[offset] & 0x1) || | |
- (first[offset] & 0x2) ) | |
+ offset = addr >> (12 + 8); | |
+ printk("1ST[0x%" PRIx32 "] (0x%" PRIpaddr ") = 0x%08" PRIx32 "\n", offset, | |
+ mfn_to_maddr(mfn), first[offset]); | |
+ if ( !(first[offset] & 0x1) || (first[offset] & 0x2) ) | |
goto done; | |
mfn = gfn_to_mfn(d, gaddr_to_gfn(first[offset])); | |
@@ -1802,12 +1796,14 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr) | |
} | |
second = map_domain_page(mfn); | |
offset = (addr >> 12) & 0x3FF; | |
- printk("2ND[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n", | |
- offset, mfn_to_maddr(mfn), second[offset]); | |
+ printk("2ND[0x%" PRIx32 "] (0x%" PRIpaddr ") = 0x%08" PRIx32 "\n", offset, | |
+ mfn_to_maddr(mfn), second[offset]); | |
done: | |
- if ( second ) unmap_domain_page(second); | |
- if ( first ) unmap_domain_page(first); | |
+ if ( second ) | |
+ unmap_domain_page(second); | |
+ if ( first ) | |
+ unmap_domain_page(first); | |
} | |
/* | |
@@ -1826,7 +1822,7 @@ static inline vaddr_t get_hfar(bool is_data) | |
else | |
gva = READ_CP32(HIFAR); | |
#else | |
- gva = READ_SYSREG(FAR_EL2); | |
+ gva = READ_SYSREG(FAR_EL2); | |
#endif | |
return gva; | |
@@ -1933,17 +1929,16 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs, | |
return; /* Try again */ | |
} | |
- switch ( fsc ) | |
+ switch (fsc) | |
{ | |
case FSC_FLT_PERM: | |
{ | |
- const struct npfec npfec = { | |
- .insn_fetch = !is_data, | |
- .read_access = is_data && !hsr.dabt.write, | |
- .write_access = is_data && hsr.dabt.write, | |
- .gla_valid = 1, | |
- .kind = xabt.s1ptw ? npfec_kind_in_gpt : npfec_kind_with_gla | |
- }; | |
+ const struct npfec npfec = {.insn_fetch = !is_data, | |
+ .read_access = is_data && !hsr.dabt.write, | |
+ .write_access = is_data && hsr.dabt.write, | |
+ .gla_valid = 1, | |
+ .kind = xabt.s1ptw ? npfec_kind_in_gpt | |
+ : npfec_kind_with_gla}; | |
p2m_mem_access_check(gpa, gva, npfec); | |
/* | |
@@ -1963,7 +1958,7 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs, | |
{ | |
enum io_state state = try_handle_mmio(regs, hsr, gpa); | |
- switch ( state ) | |
+ switch (state) | |
{ | |
case IO_ABORT: | |
goto inject_abt; | |
@@ -1994,8 +1989,10 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs, | |
} | |
inject_abt: | |
- gdprintk(XENLOG_DEBUG, "HSR=0x%x pc=%#"PRIregister" gva=%#"PRIvaddr | |
- " gpa=%#"PRIpaddr"\n", hsr.bits, regs->pc, gva, gpa); | |
+ gdprintk(XENLOG_DEBUG, | |
+ "HSR=0x%x pc=%#" PRIregister " gva=%#" PRIvaddr " gpa=%#" PRIpaddr | |
+ "\n", | |
+ hsr.bits, regs->pc, gva, gpa); | |
if ( is_data ) | |
inject_dabt_exception(regs, gva, hsr.len); | |
else | |
@@ -2008,7 +2005,7 @@ static inline bool needs_ssbd_flip(struct vcpu *v) | |
return false; | |
return !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG) && | |
- cpu_require_ssbd_mitigation(); | |
+ cpu_require_ssbd_mitigation(); | |
} | |
static void enter_hypervisor_head(struct cpu_user_regs *regs) | |
@@ -2048,11 +2045,11 @@ static void enter_hypervisor_head(struct cpu_user_regs *regs) | |
void do_trap_guest_sync(struct cpu_user_regs *regs) | |
{ | |
- const union hsr hsr = { .bits = regs->hsr }; | |
+ const union hsr hsr = {.bits = regs->hsr}; | |
enter_hypervisor_head(regs); | |
- switch ( hsr.ec ) | |
+ switch (hsr.ec) | |
{ | |
case HSR_EC_WFI_WFE: | |
/* | |
@@ -2066,11 +2063,14 @@ void do_trap_guest_sync(struct cpu_user_regs *regs) | |
advance_pc(regs, hsr); | |
return; | |
} | |
- if ( hsr.wfi_wfe.ti ) { | |
+ if ( hsr.wfi_wfe.ti ) | |
+ { | |
/* Yield the VCPU for WFE */ | |
perfc_incr(trap_wfe); | |
vcpu_yield(); | |
- } else { | |
+ } | |
+ else | |
+ { | |
/* Block the VCPU for WFI */ | |
perfc_incr(trap_wfi); | |
vcpu_block_unless_event_pending(current); | |
@@ -2174,20 +2174,22 @@ void do_trap_guest_sync(struct cpu_user_regs *regs) | |
break; | |
default: | |
- gprintk(XENLOG_WARNING, | |
- "Unknown Guest Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=0x%"PRIx32"\n", | |
- hsr.bits, hsr.ec, hsr.len, hsr.iss); | |
+ gprintk( | |
+ XENLOG_WARNING, | |
+ "Unknown Guest Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=0x%" PRIx32 | |
+ "\n", | |
+ hsr.bits, hsr.ec, hsr.len, hsr.iss); | |
inject_undef_exception(regs, hsr); | |
} | |
} | |
void do_trap_hyp_sync(struct cpu_user_regs *regs) | |
{ | |
- const union hsr hsr = { .bits = regs->hsr }; | |
+ const union hsr hsr = {.bits = regs->hsr}; | |
enter_hypervisor_head(regs); | |
- switch ( hsr.ec ) | |
+ switch (hsr.ec) | |
{ | |
#ifdef CONFIG_ARM_64 | |
case HSR_EC_BRK: | |
@@ -2215,7 +2217,8 @@ void do_trap_hyp_sync(struct cpu_user_regs *regs) | |
break; | |
} | |
default: | |
- printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=0x%"PRIx32"\n", | |
+ printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=0x%" PRIx32 | |
+ "\n", | |
hsr.bits, hsr.ec, hsr.len, hsr.iss); | |
do_unexpected_trap("Hypervisor", regs); | |
} | |
diff --git a/xen/arch/arm/vcpreg.c b/xen/arch/arm/vcpreg.c | |
index cdc91cdf5b..2c0190cefb 100644 | |
--- a/xen/arch/arm/vcpreg.c | |
+++ b/xen/arch/arm/vcpreg.c | |
@@ -48,67 +48,67 @@ | |
*/ | |
/* The name is passed from the upper macro to workaround macro expansion. */ | |
-#define TVM_REG(sz, func, reg...) \ | |
-static bool func(struct cpu_user_regs *regs, uint##sz##_t *r, bool read) \ | |
-{ \ | |
- struct vcpu *v = current; \ | |
- bool cache_enabled = vcpu_has_cache_enabled(v); \ | |
- \ | |
- GUEST_BUG_ON(read); \ | |
- WRITE_SYSREG##sz(*r, reg); \ | |
- \ | |
- p2m_toggle_cache(v, cache_enabled); \ | |
- \ | |
- return true; \ | |
-} | |
+#define TVM_REG(sz, func, reg...) \ | |
+ static bool func(struct cpu_user_regs *regs, uint##sz##_t *r, bool read) \ | |
+ { \ | |
+ struct vcpu *v = current; \ | |
+ bool cache_enabled = vcpu_has_cache_enabled(v); \ | |
+ \ | |
+ GUEST_BUG_ON(read); \ | |
+ WRITE_SYSREG##sz(*r, reg); \ | |
+ \ | |
+ p2m_toggle_cache(v, cache_enabled); \ | |
+ \ | |
+ return true; \ | |
+ } | |
#define TVM_REG32(regname, xreg) TVM_REG(32, vreg_emulate_##regname, xreg) | |
#define TVM_REG64(regname, xreg) TVM_REG(64, vreg_emulate_##regname, xreg) | |
#ifdef CONFIG_ARM_32 | |
-#define TVM_REG32_COMBINED(lowreg, hireg, xreg) \ | |
- /* Use TVM_REG directly to workaround macro expansion. */ \ | |
- TVM_REG(32, vreg_emulate_##lowreg, lowreg) \ | |
+#define TVM_REG32_COMBINED(lowreg, hireg, xreg) \ | |
+ /* Use TVM_REG directly to workaround macro expansion. */ \ | |
+ TVM_REG(32, vreg_emulate_##lowreg, lowreg) \ | |
TVM_REG(32, vreg_emulate_##hireg, hireg) | |
#else /* CONFIG_ARM_64 */ | |
-#define TVM_REG32_COMBINED(lowreg, hireg, xreg) \ | |
-static bool vreg_emulate_##xreg(struct cpu_user_regs *regs, uint32_t *r, \ | |
- bool read, bool hi) \ | |
-{ \ | |
- struct vcpu *v = current; \ | |
- bool cache_enabled = vcpu_has_cache_enabled(v); \ | |
- register_t reg = READ_SYSREG(xreg); \ | |
- \ | |
- GUEST_BUG_ON(read); \ | |
- if ( hi ) /* reg[63:32] is AArch32 register hireg */ \ | |
- { \ | |
- reg &= GENMASK(31, 0); \ | |
- reg |= ((uint64_t)*r) << 32; \ | |
- } \ | |
- else /* reg[31:0] is AArch32 register lowreg. */ \ | |
- { \ | |
- reg &= GENMASK(63, 32); \ | |
- reg |= *r; \ | |
- } \ | |
- WRITE_SYSREG(reg, xreg); \ | |
- \ | |
- p2m_toggle_cache(v, cache_enabled); \ | |
- \ | |
- return true; \ | |
-} \ | |
- \ | |
-static bool vreg_emulate_##lowreg(struct cpu_user_regs *regs, uint32_t *r, \ | |
- bool read) \ | |
-{ \ | |
- return vreg_emulate_##xreg(regs, r, read, false); \ | |
-} \ | |
- \ | |
-static bool vreg_emulate_##hireg(struct cpu_user_regs *regs, uint32_t *r, \ | |
- bool read) \ | |
-{ \ | |
- return vreg_emulate_##xreg(regs, r, read, true); \ | |
-} | |
+#define TVM_REG32_COMBINED(lowreg, hireg, xreg) \ | |
+ static bool vreg_emulate_##xreg(struct cpu_user_regs *regs, uint32_t *r, \ | |
+ bool read, bool hi) \ | |
+ { \ | |
+ struct vcpu *v = current; \ | |
+ bool cache_enabled = vcpu_has_cache_enabled(v); \ | |
+ register_t reg = READ_SYSREG(xreg); \ | |
+ \ | |
+ GUEST_BUG_ON(read); \ | |
+ if ( hi ) /* reg[63:32] is AArch32 register hireg */ \ | |
+ { \ | |
+ reg &= GENMASK(31, 0); \ | |
+ reg |= ((uint64_t)*r) << 32; \ | |
+ } \ | |
+ else /* reg[31:0] is AArch32 register lowreg. */ \ | |
+ { \ | |
+ reg &= GENMASK(63, 32); \ | |
+ reg |= *r; \ | |
+ } \ | |
+ WRITE_SYSREG(reg, xreg); \ | |
+ \ | |
+ p2m_toggle_cache(v, cache_enabled); \ | |
+ \ | |
+ return true; \ | |
+ } \ | |
+ \ | |
+ static bool vreg_emulate_##lowreg(struct cpu_user_regs *regs, \ | |
+ uint32_t *r, bool read) \ | |
+ { \ | |
+ return vreg_emulate_##xreg(regs, r, read, false); \ | |
+ } \ | |
+ \ | |
+ static bool vreg_emulate_##hireg(struct cpu_user_regs *regs, uint32_t *r, \ | |
+ bool read) \ | |
+ { \ | |
+ return vreg_emulate_##xreg(regs, r, read, true); \ | |
+ } | |
#endif | |
/* Defining helpers for emulating co-processor registers. */ | |
@@ -145,14 +145,14 @@ TVM_REG32_COMBINED(AMAIR0, AMAIR1, AMAIR_EL1) | |
TVM_REG32(CONTEXTIDR, CONTEXTIDR_EL1) | |
/* Macro to generate easily case for co-processor emulation. */ | |
-#define GENERATE_CASE(reg, sz) \ | |
- case HSR_CPREG##sz(reg): \ | |
- { \ | |
- bool res; \ | |
- \ | |
- res = vreg_emulate_cp##sz(regs, hsr, vreg_emulate_##reg); \ | |
- ASSERT(res); \ | |
- break; \ | |
+#define GENERATE_CASE(reg, sz) \ | |
+ case HSR_CPREG##sz(reg): \ | |
+ { \ | |
+ bool res; \ | |
+ \ | |
+ res = vreg_emulate_cp##sz(regs, hsr, vreg_emulate_##reg); \ | |
+ ASSERT(res); \ | |
+ break; \ | |
} | |
void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr) | |
@@ -167,7 +167,7 @@ void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr) | |
return; | |
} | |
- switch ( hsr.bits & HSR_CP32_REGS_MASK ) | |
+ switch (hsr.bits & HSR_CP32_REGS_MASK) | |
{ | |
/* | |
* !CNTHCTL_EL2.EL1PCEN / !CNTHCTL.PL1PCEN | |
@@ -207,30 +207,30 @@ void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr) | |
p2m_set_way_flush(current); | |
break; | |
- /* | |
- * HCR_EL2.TVM | |
- * | |
- * ARMv8 (DDI 0487D.a): Table D1-38 | |
- */ | |
- GENERATE_CASE(SCTLR, 32) | |
- GENERATE_CASE(TTBR0_32, 32) | |
- GENERATE_CASE(TTBR1_32, 32) | |
- GENERATE_CASE(TTBCR, 32) | |
- GENERATE_CASE(TTBCR2, 32) | |
- GENERATE_CASE(DACR, 32) | |
- GENERATE_CASE(DFSR, 32) | |
- GENERATE_CASE(IFSR, 32) | |
- GENERATE_CASE(DFAR, 32) | |
- GENERATE_CASE(IFAR, 32) | |
- GENERATE_CASE(ADFSR, 32) | |
- GENERATE_CASE(AIFSR, 32) | |
- /* AKA PRRR */ | |
- GENERATE_CASE(MAIR0, 32) | |
- /* AKA NMRR */ | |
- GENERATE_CASE(MAIR1, 32) | |
- GENERATE_CASE(AMAIR0, 32) | |
- GENERATE_CASE(AMAIR1, 32) | |
- GENERATE_CASE(CONTEXTIDR, 32) | |
+ /* | |
+ * HCR_EL2.TVM | |
+ * | |
+ * ARMv8 (DDI 0487D.a): Table D1-38 | |
+ */ | |
+ GENERATE_CASE(SCTLR, 32) | |
+ GENERATE_CASE(TTBR0_32, 32) | |
+ GENERATE_CASE(TTBR1_32, 32) | |
+ GENERATE_CASE(TTBCR, 32) | |
+ GENERATE_CASE(TTBCR2, 32) | |
+ GENERATE_CASE(DACR, 32) | |
+ GENERATE_CASE(DFSR, 32) | |
+ GENERATE_CASE(IFSR, 32) | |
+ GENERATE_CASE(DFAR, 32) | |
+ GENERATE_CASE(IFAR, 32) | |
+ GENERATE_CASE(ADFSR, 32) | |
+ GENERATE_CASE(AIFSR, 32) | |
+ /* AKA PRRR */ | |
+ GENERATE_CASE(MAIR0, 32) | |
+ /* AKA NMRR */ | |
+ GENERATE_CASE(MAIR1, 32) | |
+ GENERATE_CASE(AMAIR0, 32) | |
+ GENERATE_CASE(AMAIR1, 32) | |
+ GENERATE_CASE(CONTEXTIDR, 32) | |
/* | |
* MDCR_EL2.TPM | |
@@ -317,9 +317,9 @@ void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr) | |
*/ | |
default: | |
gdprintk(XENLOG_ERR, | |
- "%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", | |
- cp32.read ? "mrc" : "mcr", | |
- cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); | |
+ "%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%" PRIregister "\n", | |
+ cp32.read ? "mrc" : "mcr", cp32.op1, cp32.reg, cp32.crn, | |
+ cp32.crm, cp32.op2, regs->pc); | |
gdprintk(XENLOG_ERR, "unhandled 32-bit CP15 access %#x\n", | |
hsr.bits & HSR_CP32_REGS_MASK); | |
inject_undef_exception(regs, hsr); | |
@@ -336,7 +336,7 @@ void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr) | |
return; | |
} | |
- switch ( hsr.bits & HSR_CP64_REGS_MASK ) | |
+ switch (hsr.bits & HSR_CP64_REGS_MASK) | |
{ | |
/* | |
* !CNTHCTL_EL2.EL1PCEN / !CNTHCTL.PL1PCEN | |
@@ -361,8 +361,8 @@ void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr) | |
return inject_undef_exception(regs, hsr); | |
break; | |
- GENERATE_CASE(TTBR0, 64) | |
- GENERATE_CASE(TTBR1, 64) | |
+ GENERATE_CASE(TTBR0, 64) | |
+ GENERATE_CASE(TTBR1, 64) | |
/* | |
* CPTR_EL2.T{0..9,12..13} | |
@@ -382,18 +382,18 @@ void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr) | |
* And all other unknown registers. | |
*/ | |
default: | |
- { | |
- const struct hsr_cp64 cp64 = hsr.cp64; | |
- | |
- gdprintk(XENLOG_ERR, | |
- "%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", | |
- cp64.read ? "mrrc" : "mcrr", | |
- cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); | |
- gdprintk(XENLOG_ERR, "unhandled 64-bit CP15 access %#x\n", | |
- hsr.bits & HSR_CP64_REGS_MASK); | |
- inject_undef_exception(regs, hsr); | |
- return; | |
- } | |
+ { | |
+ const struct hsr_cp64 cp64 = hsr.cp64; | |
+ | |
+ gdprintk(XENLOG_ERR, | |
+ "%s p15, %d, r%d, r%d, cr%d @ 0x%" PRIregister "\n", | |
+ cp64.read ? "mrrc" : "mcrr", cp64.op1, cp64.reg1, cp64.reg2, | |
+ cp64.crm, regs->pc); | |
+ gdprintk(XENLOG_ERR, "unhandled 64-bit CP15 access %#x\n", | |
+ hsr.bits & HSR_CP64_REGS_MASK); | |
+ inject_undef_exception(regs, hsr); | |
+ return; | |
+ } | |
} | |
advance_pc(regs, hsr); | |
} | |
@@ -409,7 +409,7 @@ void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr) | |
return; | |
} | |
- switch ( hsr.bits & HSR_CP32_REGS_MASK ) | |
+ switch (hsr.bits & HSR_CP32_REGS_MASK) | |
{ | |
/* | |
* MDCR_EL2.TDOSA | |
@@ -467,7 +467,7 @@ void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr) | |
*/ | |
val = (1 << 24) | (5 << 16); | |
val |= ((current_cpu_data.midr.bits >> 20) & 0xf) | | |
- (current_cpu_data.midr.bits & 0xf); | |
+ (current_cpu_data.midr.bits & 0xf); | |
set_user_reg(regs, regidx, val); | |
break; | |
@@ -517,9 +517,9 @@ void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr) | |
*/ | |
default: | |
gdprintk(XENLOG_ERR, | |
- "%s p14, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", | |
- cp32.read ? "mrc" : "mcr", | |
- cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); | |
+ "%s p14, %d, r%d, cr%d, cr%d, %d @ 0x%" PRIregister "\n", | |
+ cp32.read ? "mrc" : "mcr", cp32.op1, cp32.reg, cp32.crn, | |
+ cp32.crm, cp32.op2, regs->pc); | |
gdprintk(XENLOG_ERR, "unhandled 32-bit cp14 access %#x\n", | |
hsr.bits & HSR_CP32_REGS_MASK); | |
inject_undef_exception(regs, hsr); | |
@@ -558,10 +558,9 @@ void do_cp14_64(struct cpu_user_regs *regs, const union hsr hsr) | |
* | |
* And all other unknown registers. | |
*/ | |
- gdprintk(XENLOG_ERR, | |
- "%s p14, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", | |
- cp64.read ? "mrrc" : "mcrr", | |
- cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); | |
+ gdprintk(XENLOG_ERR, "%s p14, %d, r%d, r%d, cr%d @ 0x%" PRIregister "\n", | |
+ cp64.read ? "mrrc" : "mcrr", cp64.op1, cp64.reg1, cp64.reg2, | |
+ cp64.crm, regs->pc); | |
gdprintk(XENLOG_ERR, "unhandled 64-bit CP14 access %#x\n", | |
hsr.bits & HSR_CP64_REGS_MASK); | |
inject_undef_exception(regs, hsr); | |
@@ -589,10 +588,9 @@ void do_cp14_dbg(struct cpu_user_regs *regs, const union hsr hsr) | |
* | |
* And all other unknown registers. | |
*/ | |
- gdprintk(XENLOG_ERR, | |
- "%s p14, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", | |
- cp64.read ? "mrrc" : "mcrr", | |
- cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); | |
+ gdprintk(XENLOG_ERR, "%s p14, %d, r%d, r%d, cr%d @ 0x%" PRIregister "\n", | |
+ cp64.read ? "mrrc" : "mcrr", cp64.op1, cp64.reg1, cp64.reg2, | |
+ cp64.crm, regs->pc); | |
gdprintk(XENLOG_ERR, "unhandled 64-bit CP14 DBG access %#x\n", | |
hsr.bits & HSR_CP64_REGS_MASK); | |
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c | |
index 64b141fea5..b56e548f3e 100644 | |
--- a/xen/arch/arm/vgic-v2.c | |
+++ b/xen/arch/arm/vgic-v2.c | |
@@ -33,7 +33,8 @@ | |
#include <asm/vgic-emul.h> | |
#include <asm/vreg.h> | |
-static struct { | |
+static struct | |
+{ | |
bool enabled; | |
/* Distributor interface address */ | |
paddr_t dbase; | |
@@ -58,8 +59,8 @@ void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t csize, | |
vgic_v2_hw.aliased_offset = aliased_offset; | |
} | |
-#define NR_TARGETS_PER_ITARGETSR 4U | |
-#define NR_BITS_PER_TARGET (32U / NR_TARGETS_PER_ITARGETSR) | |
+#define NR_TARGETS_PER_ITARGETSR 4U | |
+#define NR_BITS_PER_TARGET (32U / NR_TARGETS_PER_ITARGETSR) | |
/* | |
* Fetch an ITARGETSR register based on the offset from ITARGETSR0. Only | |
@@ -79,7 +80,8 @@ static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank *rank, | |
offset &= ~(NR_TARGETS_PER_ITARGETSR - 1); | |
for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ ) | |
- reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i * NR_BITS_PER_TARGET); | |
+ reg |= (1 << read_atomic(&rank->vcpu[offset])) | |
+ << (i * NR_BITS_PER_TARGET); | |
return reg; | |
} | |
@@ -121,7 +123,7 @@ static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank, | |
* Don't need to mask as we rely on new_mask to fit for only one | |
* target. | |
*/ | |
- BUILD_BUG_ON((sizeof (new_mask) * 8) != NR_BITS_PER_TARGET); | |
+ BUILD_BUG_ON((sizeof(new_mask) * 8) != NR_BITS_PER_TARGET); | |
new_mask = itargetsr >> (i * NR_BITS_PER_TARGET); | |
@@ -143,9 +145,10 @@ static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank, | |
*/ | |
if ( !new_target || (new_target > d->max_vcpus) ) | |
{ | |
- gprintk(XENLOG_WARNING, | |
- "No valid vCPU found for vIRQ%u in the target list (%#x). Skip it\n", | |
- virq, new_mask); | |
+ gprintk( | |
+ XENLOG_WARNING, | |
+ "No valid vCPU found for vIRQ%u in the target list (%#x). Skip it\n", | |
+ virq, new_mask); | |
continue; | |
} | |
@@ -157,9 +160,8 @@ static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank, | |
/* Only migrate the vIRQ if the target vCPU has changed */ | |
if ( new_target != old_target ) | |
{ | |
- if ( vgic_migrate_irq(d->vcpu[old_target], | |
- d->vcpu[new_target], | |
- virq) ) | |
+ if ( vgic_migrate_irq(d->vcpu[old_target], d->vcpu[new_target], | |
+ virq) ) | |
write_atomic(&rank->vcpu[offset], new_target); | |
} | |
} | |
@@ -175,10 +177,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
perfc_incr(vgicd_reads); | |
- switch ( gicd_reg ) | |
+ switch (gicd_reg) | |
{ | |
case VREG32(GICD_CTLR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
vgic_lock(v); | |
*r = vreg_reg32_extract(v->domain->arch.vgic.ctlr, info); | |
vgic_unlock(v); | |
@@ -188,11 +191,12 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
{ | |
uint32_t typer; | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
/* No secure world support for guests. */ | |
vgic_lock(v); | |
- typer = ((v->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT) | |
- | DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32); | |
+ typer = ((v->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT) | | |
+ DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32); | |
vgic_unlock(v); | |
*r = vreg_reg32_extract(typer, info); | |
@@ -201,7 +205,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
} | |
case VREG32(GICD_IIDR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
/* | |
* XXX Do we need a JEP106 manufacturer ID? | |
* Just use the physical h/w value for now | |
@@ -223,18 +228,22 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
goto read_as_zero_32; | |
case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ISENABLER, DABT_WORD); | |
- if ( rank == NULL) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
vgic_lock_rank(v, rank, flags); | |
*r = vreg_reg32_extract(rank->ienable, info); | |
vgic_unlock_rank(v, rank, flags); | |
return 1; | |
case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ICENABLER, DABT_WORD); | |
- if ( rank == NULL) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
vgic_lock_rank(v, rank, flags); | |
*r = vreg_reg32_extract(rank->ienable, info); | |
vgic_unlock_rank(v, rank, flags); | |
@@ -255,9 +264,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
uint32_t ipriorityr; | |
uint8_t rank_index; | |
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_IPRIORITYR, DABT_WORD); | |
- if ( rank == NULL ) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
rank_index = REG_RANK_INDEX(8, gicd_reg - GICD_IPRIORITYR, DABT_WORD); | |
vgic_lock_rank(v, rank, flags); | |
@@ -275,9 +286,11 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
{ | |
uint32_t itargetsr; | |
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD); | |
- if ( rank == NULL) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
vgic_lock_rank(v, rank, flags); | |
itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR); | |
vgic_unlock_rank(v, rank, flags); | |
@@ -293,11 +306,14 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
{ | |
uint32_t icfgr; | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 2, gicd_reg - GICD_ICFGR, DABT_WORD); | |
- if ( rank == NULL) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
vgic_lock_rank(v, rank, flags); | |
- icfgr = rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR, DABT_WORD)]; | |
+ icfgr = | |
+ rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR, DABT_WORD)]; | |
vgic_unlock_rank(v, rank, flags); | |
*r = vreg_reg32_extract(icfgr, info); | |
@@ -313,7 +329,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
goto read_as_zero_32; | |
case VREG32(GICD_SGIR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
/* Write only -- read unknown */ | |
*r = 0xdeadbeef; | |
return 1; | |
@@ -333,7 +350,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
goto read_impl_defined; | |
case VREG32(GICD_ICPIDR2): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
printk(XENLOG_G_ERR "%pv: vGICD: unhandled read from ICPIDR2\n", v); | |
return 0; | |
@@ -341,18 +359,19 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
goto read_impl_defined; | |
default: | |
- printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n", | |
- v, dabt.reg, gicd_reg); | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n", v, | |
+ dabt.reg, gicd_reg); | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n", | |
- v, dabt.size, dabt.reg, gicd_reg); | |
+ printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n", v, | |
+ dabt.size, dabt.reg, gicd_reg); | |
return 0; | |
read_as_zero_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
read_as_zero: | |
*r = 0; | |
return 1; | |
@@ -374,7 +393,6 @@ read_reserved: | |
static bool vgic_v2_to_sgi(struct vcpu *v, register_t sgir) | |
{ | |
- | |
int virq; | |
int irqmode; | |
enum gic_sgi_mode sgi_mode; | |
@@ -385,7 +403,7 @@ static bool vgic_v2_to_sgi(struct vcpu *v, register_t sgir) | |
virq = (sgir & GICD_SGI_INTID_MASK); | |
/* Map GIC sgi value to enum value */ | |
- switch ( irqmode ) | |
+ switch (irqmode) | |
{ | |
case GICD_SGI_TARGET_LIST_VAL: | |
target.list = (sgir & GICD_SGI_TARGET_MASK) >> GICD_SGI_TARGET_SHIFT; | |
@@ -399,7 +417,8 @@ static bool vgic_v2_to_sgi(struct vcpu *v, register_t sgir) | |
break; | |
default: | |
printk(XENLOG_G_DEBUG | |
- "%pv: vGICD: unhandled GICD_SGIR write %"PRIregister" with wrong mode\n", | |
+ "%pv: vGICD: unhandled GICD_SGIR write %" PRIregister | |
+ " with wrong mode\n", | |
v, sgir); | |
return false; | |
} | |
@@ -418,10 +437,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
perfc_incr(vgicd_writes); | |
- switch ( gicd_reg ) | |
+ switch (gicd_reg) | |
{ | |
case VREG32(GICD_CTLR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
/* Ignore all but the enable bit */ | |
vgic_lock(v); | |
vreg_reg32_update(&v->domain->arch.vgic.ctlr, r, info); | |
@@ -449,9 +469,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
goto write_ignore_32; | |
case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ISENABLER, DABT_WORD); | |
- if ( rank == NULL) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
tr = rank->ienable; | |
vreg_reg32_setbits(&rank->ienable, r, info); | |
@@ -460,9 +482,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
return 1; | |
case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ICENABLER, DABT_WORD); | |
- if ( rank == NULL) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
tr = rank->ienable; | |
vreg_reg32_clearbits(&rank->ienable, r, info); | |
@@ -471,31 +495,34 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
return 1; | |
case VRANGE32(GICD_ISPENDR, GICD_ISPENDRN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: unhandled word write %#"PRIregister" to ISPENDR%d\n", | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled word write %#" PRIregister | |
+ " to ISPENDR%d\n", | |
v, r, gicd_reg - GICD_ISPENDR); | |
return 0; | |
case VRANGE32(GICD_ICPENDR, GICD_ICPENDRN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: unhandled word write %#"PRIregister" to ICPENDR%d\n", | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled word write %#" PRIregister | |
+ " to ICPENDR%d\n", | |
v, r, gicd_reg - GICD_ICPENDR); | |
return 0; | |
case VRANGE32(GICD_ISACTIVER, GICD_ISACTIVERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
if ( r == 0 ) | |
goto write_ignore_32; | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: unhandled word write %#"PRIregister" to ISACTIVER%d\n", | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled word write %#" PRIregister | |
+ " to ISACTIVER%d\n", | |
v, r, gicd_reg - GICD_ISACTIVER); | |
return 0; | |
case VRANGE32(GICD_ICACTIVER, GICD_ICACTIVERN): | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: unhandled word write %#"PRIregister" to ICACTIVER%d\n", | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled word write %#" PRIregister | |
+ " to ICACTIVER%d\n", | |
v, r, gicd_reg - GICD_ICACTIVER); | |
goto write_ignore_32; | |
@@ -503,13 +530,14 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
{ | |
uint32_t *ipriorityr, priority; | |
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_IPRIORITYR, DABT_WORD); | |
- if ( rank == NULL) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
- ipriorityr = &rank->ipriorityr[REG_RANK_INDEX(8, | |
- gicd_reg - GICD_IPRIORITYR, | |
- DABT_WORD)]; | |
+ ipriorityr = &rank->ipriorityr[REG_RANK_INDEX( | |
+ 8, gicd_reg - GICD_IPRIORITYR, DABT_WORD)]; | |
priority = ACCESS_ONCE(*ipriorityr); | |
vreg_reg32_update(&priority, r, info); | |
ACCESS_ONCE(*ipriorityr) = priority; | |
@@ -529,9 +557,11 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
{ | |
uint32_t itargetsr; | |
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD); | |
- if ( rank == NULL) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR); | |
vreg_reg32_update(&itargetsr, r, info); | |
@@ -552,13 +582,15 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
goto write_ignore_32; | |
case VRANGE32(GICD_ICFGR2, GICD_ICFGRN): /* SPIs */ | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 2, gicd_reg - GICD_ICFGR, DABT_WORD); | |
- if ( rank == NULL) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
- vreg_reg32_update(&rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR, | |
- DABT_WORD)], | |
- r, info); | |
+ vreg_reg32_update( | |
+ &rank->icfg[REG_RANK_INDEX(2, gicd_reg - GICD_ICFGR, DABT_WORD)], | |
+ r, info); | |
vgic_unlock_rank(v, rank, flags); | |
return 1; | |
@@ -570,23 +602,26 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
goto write_ignore_32; | |
case VREG32(GICD_SGIR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
return vgic_v2_to_sgi(v, r); | |
case VRANGE32(0xF04, 0xF0C): | |
goto write_reserved; | |
case VRANGE32(GICD_CPENDSGIR, GICD_CPENDSGIRN): | |
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n", | |
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled %s write %#" PRIregister | |
+ " to ICPENDSGIR%d\n", | |
v, dabt.size ? "word" : "byte", r, gicd_reg - GICD_CPENDSGIR); | |
return 0; | |
case VRANGE32(GICD_SPENDSGIR, GICD_SPENDSGIRN): | |
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n", | |
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled %s write %#" PRIregister | |
+ " to ISPENDSGIR%d\n", | |
v, dabt.size ? "word" : "byte", r, gicd_reg - GICD_SPENDSGIR); | |
return 0; | |
@@ -605,20 +640,21 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
/* Implementation defined identification registers */ | |
default: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled write r%d=%" PRIregister | |
+ " offset %#08x\n", | |
v, dabt.reg, r, gicd_reg); | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", | |
+ printk(XENLOG_G_ERR "%pv: vGICD: bad write width %d r%d=%" PRIregister | |
+ " offset %#08x\n", | |
v, dabt.size, dabt.reg, r, gicd_reg); | |
return 0; | |
write_ignore_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
write_ignore: | |
return 1; | |
@@ -636,7 +672,7 @@ write_reserved: | |
} | |
static const struct mmio_handler_ops vgic_v2_distr_mmio_handler = { | |
- .read = vgic_v2_distr_mmio_read, | |
+ .read = vgic_v2_distr_mmio_read, | |
.write = vgic_v2_distr_mmio_write, | |
}; | |
@@ -720,7 +756,7 @@ static int vgic_v2_lpi_get_priority(struct domain *d, unsigned int vlpi) | |
} | |
static const struct vgic_ops vgic_v2_ops = { | |
- .vcpu_init = vgic_v2_vcpu_init, | |
+ .vcpu_init = vgic_v2_vcpu_init, | |
.domain_init = vgic_v2_domain_init, | |
.domain_free = vgic_v2_domain_free, | |
.lpi_to_pending = vgic_v2_lpi_to_pending, | |
@@ -731,8 +767,7 @@ int vgic_v2_init(struct domain *d, int *mmio_count) | |
{ | |
if ( !vgic_v2_hw.enabled ) | |
{ | |
- printk(XENLOG_G_ERR | |
- "d%d: vGICv2 is not supported on this platform.\n", | |
+ printk(XENLOG_G_ERR "d%d: vGICv2 is not supported on this platform.\n", | |
d->domain_id); | |
return -ENODEV; | |
} | |
diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c | |
index 6e153c698d..c09a0a0744 100644 | |
--- a/xen/arch/arm/vgic-v3-its.c | |
+++ b/xen/arch/arm/vgic-v3-its.c | |
@@ -52,19 +52,20 @@ | |
* If both the vcmd_lock and the its_lock are required, the vcmd_lock must | |
* be taken first. | |
*/ | |
-struct virt_its { | |
+struct virt_its | |
+{ | |
struct domain *d; | |
struct list_head vits_list; | |
paddr_t doorbell_address; | |
unsigned int devid_bits; | |
unsigned int evid_bits; | |
- spinlock_t vcmd_lock; /* Protects the virtual command buffer, which */ | |
- uint64_t cwriter; /* consists of CWRITER and CREADR and those */ | |
- uint64_t creadr; /* shadow variables cwriter and creadr. */ | |
+ spinlock_t vcmd_lock; /* Protects the virtual command buffer, which */ | |
+ uint64_t cwriter; /* consists of CWRITER and CREADR and those */ | |
+ uint64_t creadr; /* shadow variables cwriter and creadr. */ | |
/* Protects the rest of this structure, including the ITS tables. */ | |
spinlock_t its_lock; | |
uint64_t cbaser; | |
- uint64_t baser_dev, baser_coll; /* BASER0 and BASER1 for the guest */ | |
+ uint64_t baser_dev, baser_coll; /* BASER0 and BASER1 for the guest */ | |
unsigned int max_collections; | |
unsigned int max_devices; | |
/* changing "enabled" requires to hold *both* the vcmd_lock and its_lock */ | |
@@ -87,7 +88,7 @@ struct vits_itte | |
* Each entry just contains the VCPU ID of the respective vCPU. | |
*/ | |
typedef uint16_t coll_table_entry_t; | |
-#define UNMAPPED_COLLECTION ((coll_table_entry_t)~0) | |
+#define UNMAPPED_COLLECTION ((coll_table_entry_t)~0) | |
/* | |
* Our device table encodings: | |
@@ -96,13 +97,13 @@ typedef uint16_t coll_table_entry_t; | |
* in the lowest 5 bits of the word. | |
*/ | |
typedef uint64_t dev_table_entry_t; | |
-#define DEV_TABLE_ITT_ADDR(x) ((x) & GENMASK(51, 8)) | |
-#define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(4, 0)) + 1, UL)) | |
-#define DEV_TABLE_ENTRY(addr, bits) \ | |
- (((addr) & GENMASK(51, 8)) | (((bits) - 1) & GENMASK(4, 0))) | |
+#define DEV_TABLE_ITT_ADDR(x) ((x)&GENMASK(51, 8)) | |
+#define DEV_TABLE_ITT_SIZE(x) (BIT(((x)&GENMASK(4, 0)) + 1, UL)) | |
+#define DEV_TABLE_ENTRY(addr, bits) \ | |
+ (((addr)&GENMASK(51, 8)) | (((bits)-1) & GENMASK(4, 0))) | |
-#define GITS_BASER_RO_MASK (GITS_BASER_TYPE_MASK | \ | |
- (0x1fL << GITS_BASER_ENTRY_SIZE_SHIFT)) | |
+#define GITS_BASER_RO_MASK \ | |
+ (GITS_BASER_TYPE_MASK | (0x1fL << GITS_BASER_ENTRY_SIZE_SHIFT)) | |
/* | |
* The physical address is encoded slightly differently depending on | |
@@ -112,8 +113,7 @@ typedef uint64_t dev_table_entry_t; | |
static paddr_t get_baser_phys_addr(uint64_t reg) | |
{ | |
if ( reg & BIT(9, UL) ) | |
- return (reg & GENMASK(47, 16)) | | |
- ((reg & GENMASK(15, 12)) << 36); | |
+ return (reg & GENMASK(47, 16)) | ((reg & GENMASK(15, 12)) << 36); | |
else | |
return reg & GENMASK(47, 12); | |
} | |
@@ -132,9 +132,9 @@ static int its_set_collection(struct virt_its *its, uint16_t collid, | |
if ( collid >= its->max_collections ) | |
return -ENOENT; | |
- return access_guest_memory_by_ipa(its->d, | |
- addr + collid * sizeof(coll_table_entry_t), | |
- &vcpu_id, sizeof(vcpu_id), true); | |
+ return access_guest_memory_by_ipa( | |
+ its->d, addr + collid * sizeof(coll_table_entry_t), &vcpu_id, | |
+ sizeof(vcpu_id), true); | |
} | |
/* Must be called with the ITS lock held. */ | |
@@ -150,9 +150,9 @@ static struct vcpu *get_vcpu_from_collection(struct virt_its *its, | |
if ( collid >= its->max_collections ) | |
return NULL; | |
- ret = access_guest_memory_by_ipa(its->d, | |
- addr + collid * sizeof(coll_table_entry_t), | |
- &vcpu_id, sizeof(coll_table_entry_t), false); | |
+ ret = access_guest_memory_by_ipa( | |
+ its->d, addr + collid * sizeof(coll_table_entry_t), &vcpu_id, | |
+ sizeof(coll_table_entry_t), false); | |
if ( ret ) | |
return NULL; | |
@@ -200,8 +200,8 @@ static int its_get_itt(struct virt_its *its, uint32_t devid, | |
* a device ID and return the address of the ITTE belonging to the event ID | |
* (which is an index into that table). | |
*/ | |
-static paddr_t its_get_itte_address(struct virt_its *its, | |
- uint32_t devid, uint32_t evid) | |
+static paddr_t its_get_itte_address(struct virt_its *its, uint32_t devid, | |
+ uint32_t evid) | |
{ | |
dev_table_entry_t itt; | |
int ret; | |
@@ -256,8 +256,8 @@ static bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid, | |
* If vcpu_ptr is provided, returns the VCPU belonging to that collection. | |
* Must be called with the ITS lock held. | |
*/ | |
-static bool write_itte(struct virt_its *its, uint32_t devid, | |
- uint32_t evid, uint32_t collid, uint32_t vlpi) | |
+static bool write_itte(struct virt_its *its, uint32_t devid, uint32_t evid, | |
+ uint32_t collid, uint32_t vlpi) | |
{ | |
paddr_t addr; | |
struct vits_itte itte; | |
@@ -287,15 +287,15 @@ static uint64_t its_cmd_mask_field(uint64_t *its_cmd, unsigned int word, | |
return (its_cmd[word] >> shift) & GENMASK(size - 1, 0); | |
} | |
-#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8) | |
-#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32) | |
-#define its_cmd_get_size(cmd) its_cmd_mask_field(cmd, 1, 0, 5) | |
-#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32) | |
-#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32) | |
-#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16) | |
-#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32) | |
-#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1) | |
-#define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8) | |
+#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8) | |
+#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32) | |
+#define its_cmd_get_size(cmd) its_cmd_mask_field(cmd, 1, 0, 5) | |
+#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32) | |
+#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32) | |
+#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16) | |
+#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32) | |
+#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1) | |
+#define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8) | |
static int its_handle_int(struct virt_its *its, uint64_t *cmdptr) | |
{ | |
@@ -360,8 +360,8 @@ static int its_handle_clear(struct virt_its *its, uint64_t *cmdptr) | |
if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) ) | |
goto out_unlock; | |
- p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address, | |
- devid, eventid); | |
+ p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address, devid, | |
+ eventid); | |
/* Protect against an invalid LPI number. */ | |
if ( unlikely(!p) ) | |
goto out_unlock; | |
@@ -416,8 +416,8 @@ static int update_lpi_property(struct domain *d, struct pending_irq *p) | |
addr = d->arch.vgic.rdist_propbase & GENMASK(51, 12); | |
- ret = access_guest_memory_by_ipa(d, addr + p->irq - LPI_OFFSET, | |
- &property, sizeof(property), false); | |
+ ret = access_guest_memory_by_ipa(d, addr + p->irq - LPI_OFFSET, &property, | |
+ sizeof(property), false); | |
if ( ret ) | |
return ret; | |
@@ -482,8 +482,8 @@ static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr) | |
if ( vlpi == INVALID_LPI ) | |
goto out_unlock_its; | |
- p = gicv3_its_get_event_pending_irq(d, its->doorbell_address, | |
- devid, eventid); | |
+ p = gicv3_its_get_event_pending_irq(d, its->doorbell_address, devid, | |
+ eventid); | |
if ( unlikely(!p) ) | |
goto out_unlock_its; | |
@@ -517,7 +517,7 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr) | |
uint32_t collid = its_cmd_get_collection(cmdptr); | |
struct vcpu *vcpu; | |
struct pending_irq *pirqs[16]; | |
- uint64_t vlpi = 0; /* 64-bit to catch overflows */ | |
+ uint64_t vlpi = 0; /* 64-bit to catch overflows */ | |
unsigned int nr_lpis, i; | |
unsigned long flags; | |
int ret = 0; | |
@@ -547,13 +547,12 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr) | |
spin_lock_irqsave(&vcpu->arch.vgic.lock, flags); | |
read_lock(&its->d->arch.vgic.pend_lpi_tree_lock); | |
- do | |
- { | |
+ do { | |
int err; | |
- nr_lpis = radix_tree_gang_lookup(&its->d->arch.vgic.pend_lpi_tree, | |
- (void **)pirqs, vlpi, | |
- ARRAY_SIZE(pirqs)); | |
+ nr_lpis = | |
+ radix_tree_gang_lookup(&its->d->arch.vgic.pend_lpi_tree, | |
+ (void **)pirqs, vlpi, ARRAY_SIZE(pirqs)); | |
for ( i = 0; i < nr_lpis; i++ ) | |
{ | |
@@ -569,11 +568,11 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr) | |
else | |
ret = err; | |
} | |
- /* | |
- * Loop over the next gang of pending_irqs until we reached the end of | |
- * a (fully populated) tree or the lookup function returns less LPIs than | |
- * it has been asked for. | |
- */ | |
+ /* | |
+ * Loop over the next gang of pending_irqs until we reached the end of | |
+ * a (fully populated) tree or the lookup function returns less LPIs | |
+ * than it has been asked for. | |
+ */ | |
} while ( (++vlpi < its->d->arch.vgic.nr_lpis) && | |
(nr_lpis == ARRAY_SIZE(pirqs)) ); | |
@@ -584,8 +583,8 @@ static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr) | |
} | |
/* Must be called with the ITS lock held. */ | |
-static int its_discard_event(struct virt_its *its, | |
- uint32_t vdevid, uint32_t vevid) | |
+static int its_discard_event(struct virt_its *its, uint32_t vdevid, | |
+ uint32_t vevid) | |
{ | |
struct pending_irq *p; | |
unsigned long flags; | |
@@ -626,8 +625,8 @@ static int its_discard_event(struct virt_its *its, | |
spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags); | |
/* Remove the corresponding host LPI entry */ | |
- return gicv3_remove_guest_event(its->d, its->doorbell_address, | |
- vdevid, vevid); | |
+ return gicv3_remove_guest_event(its->d, its->doorbell_address, vdevid, | |
+ vevid); | |
} | |
static void its_unmap_device(struct virt_its *its, uint32_t devid) | |
@@ -683,7 +682,6 @@ static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr) | |
*/ | |
if ( is_hardware_domain(its->d) ) | |
{ | |
- | |
/* | |
* Dom0's ITSes are mapped 1:1, so both addresses are the same. | |
* Also the device IDs are equal. | |
@@ -755,8 +753,8 @@ static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr) | |
* determined by the same device ID and event ID on the host side. | |
* This returns us the corresponding, still unused pending_irq. | |
*/ | |
- pirq = gicv3_assign_guest_event(its->d, its->doorbell_address, | |
- devid, eventid, intid); | |
+ pirq = gicv3_assign_guest_event(its->d, its->doorbell_address, devid, | |
+ eventid, intid); | |
if ( !pirq ) | |
goto out_remove_mapping; | |
@@ -830,8 +828,8 @@ static int its_handle_movi(struct virt_its *its, uint64_t *cmdptr) | |
if ( !nvcpu ) | |
goto out_unlock; | |
- p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address, | |
- devid, eventid); | |
+ p = gicv3_its_get_event_pending_irq(its->d, its->doorbell_address, devid, | |
+ eventid); | |
if ( unlikely(!p) ) | |
goto out_unlock; | |
@@ -892,14 +890,14 @@ out_unlock: | |
return ret; | |
} | |
-#define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12) | |
-#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) | |
+#define ITS_CMD_BUFFER_SIZE(baser) ((((baser)&0xff) + 1) << 12) | |
+#define ITS_CMD_OFFSET(reg) ((reg)&GENMASK(19, 5)) | |
static void dump_its_command(uint64_t *command) | |
{ | |
gdprintk(XENLOG_WARNING, " cmd 0x%02lx: %016lx %016lx %016lx %016lx\n", | |
- its_cmd_get_command(command), | |
- command[0], command[1], command[2], command[3]); | |
+ its_cmd_get_command(command), command[0], command[1], command[2], | |
+ command[3]); | |
} | |
/* | |
@@ -921,12 +919,12 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its) | |
{ | |
int ret; | |
- ret = access_guest_memory_by_ipa(d, addr + its->creadr, | |
- command, sizeof(command), false); | |
+ ret = access_guest_memory_by_ipa(d, addr + its->creadr, command, | |
+ sizeof(command), false); | |
if ( ret ) | |
return ret; | |
- switch ( its_cmd_get_command(command) ) | |
+ switch (its_cmd_get_command(command)) | |
{ | |
case GITS_CMD_CLEAR: | |
ret = its_handle_clear(its, command); | |
@@ -969,7 +967,7 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its) | |
} | |
write_u64_atomic(&its->creadr, (its->creadr + ITS_CMD_SIZE) % | |
- ITS_CMD_BUFFER_SIZE(its->cbaser)); | |
+ ITS_CMD_BUFFER_SIZE(its->cbaser)); | |
if ( ret ) | |
{ | |
@@ -988,7 +986,7 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its) | |
*****************************/ | |
/* Identifying as an ARM IP, using "X" as the product ID. */ | |
-#define GITS_IIDR_VALUE 0x5800034c | |
+#define GITS_IIDR_VALUE 0x5800034c | |
static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
register_t *r, void *priv) | |
@@ -996,7 +994,7 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
struct virt_its *its = priv; | |
uint64_t reg; | |
- switch ( info->gpa & 0xffff ) | |
+ switch (info->gpa & 0xffff) | |
{ | |
case VREG32(GITS_CTLR): | |
{ | |
@@ -1006,7 +1004,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
*/ | |
bool have_cmd_lock; | |
- if ( info->dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( info->dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
have_cmd_lock = spin_trylock(&its->vcmd_lock); | |
reg = its->enabled ? GITS_CTLR_ENABLE : 0; | |
@@ -1022,12 +1021,14 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
} | |
case VREG32(GITS_IIDR): | |
- if ( info->dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( info->dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
*r = vreg_reg32_extract(GITS_IIDR_VALUE, info); | |
break; | |
case VREG64(GITS_TYPER): | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
reg = GITS_TYPER_PHYSICAL; | |
reg |= (sizeof(struct vits_itte) - 1) << GITS_TYPER_ITT_SIZE_SHIFT; | |
@@ -1044,14 +1045,16 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
goto read_reserved; | |
case VREG64(GITS_CBASER): | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
spin_lock(&its->its_lock); | |
*r = vreg_reg64_extract(its->cbaser, info); | |
spin_unlock(&its->its_lock); | |
break; | |
case VREG64(GITS_CWRITER): | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
/* CWRITER is only written by the guest, so no extra locking here. */ | |
reg = its->cwriter; | |
@@ -1059,7 +1062,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
break; | |
case VREG64(GITS_CREADR): | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
/* | |
* Lockless access, to avoid waiting for the whole command queue to be | |
@@ -1074,15 +1078,17 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
case VRANGE64(0x0098, 0x00F8): | |
goto read_reserved; | |
- case VREG64(GITS_BASER0): /* device table */ | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ case VREG64(GITS_BASER0): /* device table */ | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
spin_lock(&its->its_lock); | |
*r = vreg_reg64_extract(its->baser_dev, info); | |
spin_unlock(&its->its_lock); | |
break; | |
- case VREG64(GITS_BASER1): /* collection table */ | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ case VREG64(GITS_BASER1): /* collection table */ | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
spin_lock(&its->its_lock); | |
*r = vreg_reg64_extract(its->baser_coll, info); | |
spin_unlock(&its->its_lock); | |
@@ -1098,7 +1104,8 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
goto read_impl_defined; | |
case VREG32(GITS_PIDR2): | |
- if ( info->dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( info->dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
*r = vreg_reg32_extract(GIC_PIDR2_ARCH_GICv3, info); | |
break; | |
@@ -1106,8 +1113,7 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
goto read_impl_defined; | |
default: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGITS: unhandled read r%d offset %#04lx\n", | |
+ printk(XENLOG_G_ERR "%pv: vGITS: unhandled read r%d offset %#04lx\n", | |
v, info->dabt.reg, (unsigned long)info->gpa & 0xffff); | |
return 0; | |
} | |
@@ -1115,15 +1121,17 @@ static int vgic_v3_its_mmio_read(struct vcpu *v, mmio_info_t *info, | |
return 1; | |
read_as_zero_64: | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
*r = 0; | |
return 1; | |
read_impl_defined: | |
- printk(XENLOG_G_DEBUG | |
- "%pv: vGITS: RAZ on implementation defined register offset %#04lx\n", | |
- v, info->gpa & 0xffff); | |
+ printk( | |
+ XENLOG_G_DEBUG | |
+ "%pv: vGITS: RAZ on implementation defined register offset %#04lx\n", | |
+ v, info->gpa & 0xffff); | |
*r = 0; | |
return 1; | |
@@ -1173,20 +1181,22 @@ static bool vgic_v3_verify_its_status(struct virt_its *its, bool status) | |
!(its->baser_dev & GITS_VALID_BIT) || | |
!(its->baser_coll & GITS_VALID_BIT) ) | |
{ | |
- printk(XENLOG_G_WARNING "d%d tried to enable ITS without having the tables configured.\n", | |
- its->d->domain_id); | |
+ printk( | |
+ XENLOG_G_WARNING | |
+ "d%d tried to enable ITS without having the tables configured.\n", | |
+ its->d->domain_id); | |
return false; | |
} | |
/* | |
* TODO: Protect against a guest crafting ITS tables. | |
- * The spec says that "at the time of the new allocation for use by the ITS" | |
- * all tables must contain zeroes. We could enforce this here by clearing | |
- * all the tables, but this would be moot since at the moment the guest | |
- * can change the tables at any point in time anyway. Right now there are | |
- * expectations about the tables being consistent (a VCPU lock protecting | |
- * an LPI), which should go away with proper per-IRQ locking. | |
- * So for now we ignore this issue and rely on Dom0 not doing bad things. | |
+ * The spec says that "at the time of the new allocation for use by the | |
+ * ITS" all tables must contain zeroes. We could enforce this here by | |
+ * clearing all the tables, but this would be moot since at the moment the | |
+ * guest can change the tables at any point in time anyway. Right now there | |
+ * are expectations about the tables being consistent (a VCPU lock | |
+ * protecting an LPI), which should go away with proper per-IRQ locking. So | |
+ * for now we ignore this issue and rely on Dom0 not doing bad things. | |
*/ | |
ASSERT(is_hardware_domain(its->d)); | |
@@ -1198,7 +1208,7 @@ static void sanitize_its_base_reg(uint64_t *reg) | |
uint64_t r = *reg; | |
/* Avoid outer shareable. */ | |
- switch ( (r >> GITS_BASER_SHAREABILITY_SHIFT) & 0x03 ) | |
+ switch ((r >> GITS_BASER_SHAREABILITY_SHIFT) & 0x03) | |
{ | |
case GIC_BASER_OuterShareable: | |
r &= ~GITS_BASER_SHAREABILITY_MASK; | |
@@ -1209,7 +1219,7 @@ static void sanitize_its_base_reg(uint64_t *reg) | |
} | |
/* Avoid any inner non-cacheable mapping. */ | |
- switch ( (r >> GITS_BASER_INNER_CACHEABILITY_SHIFT) & 0x07 ) | |
+ switch ((r >> GITS_BASER_INNER_CACHEABILITY_SHIFT) & 0x07) | |
{ | |
case GIC_BASER_CACHE_nCnB: | |
case GIC_BASER_CACHE_nC: | |
@@ -1221,7 +1231,7 @@ static void sanitize_its_base_reg(uint64_t *reg) | |
} | |
/* Only allow non-cacheable or same-as-inner. */ | |
- switch ( (r >> GITS_BASER_OUTER_CACHEABILITY_SHIFT) & 0x07 ) | |
+ switch ((r >> GITS_BASER_OUTER_CACHEABILITY_SHIFT) & 0x07) | |
{ | |
case GIC_BASER_CACHE_SameAsInner: | |
case GIC_BASER_CACHE_nC: | |
@@ -1243,13 +1253,14 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
uint64_t reg; | |
uint32_t reg32; | |
- switch ( info->gpa & 0xffff ) | |
+ switch (info->gpa & 0xffff) | |
{ | |
case VREG32(GITS_CTLR): | |
{ | |
uint32_t ctlr; | |
- if ( info->dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( info->dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
/* | |
* We need to take the vcmd_lock to prevent a guest from disabling | |
@@ -1262,8 +1273,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
vreg_reg32_update(®32, r, info); | |
if ( ctlr ^ reg32 ) | |
- its->enabled = vgic_v3_verify_its_status(its, | |
- reg32 & GITS_CTLR_ENABLE); | |
+ its->enabled = | |
+ vgic_v3_verify_its_status(its, reg32 & GITS_CTLR_ENABLE); | |
spin_unlock(&its->its_lock); | |
spin_unlock(&its->vcmd_lock); | |
return 1; | |
@@ -1283,7 +1294,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
goto write_reserved; | |
case VREG64(GITS_CBASER): | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
spin_lock(&its->its_lock); | |
/* Changing base registers with the ITS enabled is UNPREDICTABLE. */ | |
@@ -1306,7 +1318,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
return 1; | |
case VREG64(GITS_CWRITER): | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
spin_lock(&its->vcmd_lock); | |
reg = ITS_CMD_OFFSET(its->cwriter); | |
@@ -1327,8 +1340,9 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
case VRANGE32(0x0098, 0x00FC): | |
goto write_reserved; | |
- case VREG64(GITS_BASER0): /* device table */ | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ case VREG64(GITS_BASER0): /* device table */ | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
spin_lock(&its->its_lock); | |
@@ -1339,7 +1353,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
if ( its->enabled ) | |
{ | |
spin_unlock(&its->its_lock); | |
- gdprintk(XENLOG_WARNING, "vGITS: tried to change BASER with the ITS enabled.\n"); | |
+ gdprintk(XENLOG_WARNING, | |
+ "vGITS: tried to change BASER with the ITS enabled.\n"); | |
return 1; | |
} | |
@@ -1366,8 +1381,9 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
spin_unlock(&its->its_lock); | |
return 1; | |
- case VREG64(GITS_BASER1): /* collection table */ | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ case VREG64(GITS_BASER1): /* collection table */ | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
spin_lock(&its->its_lock); | |
/* | |
@@ -1377,7 +1393,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
if ( its->enabled ) | |
{ | |
spin_unlock(&its->its_lock); | |
- gdprintk(XENLOG_INFO, "vGITS: tried to change BASER with the ITS enabled.\n"); | |
+ gdprintk(XENLOG_INFO, | |
+ "vGITS: tried to change BASER with the ITS enabled.\n"); | |
return 1; | |
} | |
@@ -1404,18 +1421,17 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
goto write_reserved; | |
case VRANGE32(0xC000, 0xFFCC): | |
goto write_impl_defined; | |
- case VRANGE32(0xFFD0, 0xFFE4): /* IMPDEF identification registers */ | |
+ case VRANGE32(0xFFD0, 0xFFE4): /* IMPDEF identification registers */ | |
goto write_impl_defined; | |
case VREG32(GITS_PIDR2): | |
goto write_ignore_32; | |
- case VRANGE32(0xFFEC, 0xFFFC): /* IMPDEF identification registers */ | |
+ case VRANGE32(0xFFEC, 0xFFFC): /* IMPDEF identification registers */ | |
goto write_impl_defined; | |
default: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGITS: unhandled write r%d offset %#04lx\n", | |
+ printk(XENLOG_G_ERR "%pv: vGITS: unhandled write r%d offset %#04lx\n", | |
v, info->dabt.reg, (unsigned long)info->gpa & 0xffff); | |
return 0; | |
} | |
@@ -1423,11 +1439,13 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info, | |
return 1; | |
write_ignore_64: | |
- if ( !vgic_reg64_check_access(info->dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(info->dabt) ) | |
+ goto bad_width; | |
return 1; | |
write_ignore_32: | |
- if ( info->dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( info->dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
return 1; | |
write_impl_defined: | |
@@ -1450,7 +1468,7 @@ bad_width: | |
} | |
static const struct mmio_handler_ops vgic_its_mmio_handler = { | |
- .read = vgic_v3_its_mmio_read, | |
+ .read = vgic_v3_its_mmio_read, | |
.write = vgic_v3_its_mmio_write, | |
}; | |
@@ -1465,19 +1483,20 @@ static int vgic_v3_its_init_virtual(struct domain *d, paddr_t guest_addr, | |
if ( !its ) | |
return -ENOMEM; | |
- base_attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; | |
- base_attr |= GIC_BASER_CACHE_SameAsInner << GITS_BASER_OUTER_CACHEABILITY_SHIFT; | |
+ base_attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT; | |
+ base_attr |= GIC_BASER_CACHE_SameAsInner | |
+ << GITS_BASER_OUTER_CACHEABILITY_SHIFT; | |
base_attr |= GIC_BASER_CACHE_RaWaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT; | |
- its->cbaser = base_attr; | |
- base_attr |= 0ULL << GITS_BASER_PAGE_SIZE_SHIFT; /* 4K pages */ | |
+ its->cbaser = base_attr; | |
+ base_attr |= 0ULL << GITS_BASER_PAGE_SIZE_SHIFT; /* 4K pages */ | |
its->baser_dev = GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT; | |
- its->baser_dev |= (sizeof(dev_table_entry_t) - 1) << | |
- GITS_BASER_ENTRY_SIZE_SHIFT; | |
+ its->baser_dev |= (sizeof(dev_table_entry_t) - 1) | |
+ << GITS_BASER_ENTRY_SIZE_SHIFT; | |
its->baser_dev |= base_attr; | |
- its->baser_coll = GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT; | |
- its->baser_coll |= (sizeof(coll_table_entry_t) - 1) << | |
- GITS_BASER_ENTRY_SIZE_SHIFT; | |
+ its->baser_coll = GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT; | |
+ its->baser_coll |= (sizeof(coll_table_entry_t) - 1) | |
+ << GITS_BASER_ENTRY_SIZE_SHIFT; | |
its->baser_coll |= base_attr; | |
its->d = d; | |
its->doorbell_address = guest_addr + ITS_DOORBELL_OFFSET; | |
@@ -1503,7 +1522,7 @@ unsigned int vgic_v3_its_count(const struct domain *d) | |
if ( !is_hardware_domain(d) ) | |
return 0; | |
- list_for_each_entry(hw_its, &host_its_list, entry) | |
+ list_for_each_entry (hw_its, &host_its_list, entry) | |
ret++; | |
return ret; | |
@@ -1525,15 +1544,14 @@ int vgic_v3_its_init_domain(struct domain *d) | |
{ | |
struct host_its *hw_its; | |
- list_for_each_entry(hw_its, &host_its_list, entry) | |
+ list_for_each_entry (hw_its, &host_its_list, entry) | |
{ | |
/* | |
* For each host ITS create a virtual ITS using the same | |
* base and thus doorbell address. | |
* Use the same number of device ID and event ID bits as the host. | |
*/ | |
- ret = vgic_v3_its_init_virtual(d, hw_its->addr, | |
- hw_its->devid_bits, | |
+ ret = vgic_v3_its_init_virtual(d, hw_its->addr, hw_its->devid_bits, | |
hw_its->evid_bits); | |
if ( ret ) | |
return ret; | |
@@ -1553,7 +1571,7 @@ void vgic_v3_its_free_domain(struct domain *d) | |
if ( list_head_is_null(&d->arch.vgic.vits_list) ) | |
return; | |
- list_for_each_entry_safe( pos, temp, &d->arch.vgic.vits_list, vits_list ) | |
+ list_for_each_entry_safe(pos, temp, &d->arch.vgic.vits_list, vits_list) | |
{ | |
list_del(&pos->vits_list); | |
xfree(pos); | |
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c | |
index 422b94f902..cd34be2ca6 100644 | |
--- a/xen/arch/arm/vgic-v3.c | |
+++ b/xen/arch/arm/vgic-v3.c | |
@@ -42,27 +42,27 @@ | |
* We don't emulate a specific registers scheme so implement the others | |
* bits as RES0 as recommended by the spec (see 8.1.13 in ARM IHI 0069A). | |
*/ | |
-#define GICV3_GICD_PIDR2 0x30 | |
-#define GICV3_GICR_PIDR2 GICV3_GICD_PIDR2 | |
+#define GICV3_GICD_PIDR2 0x30 | |
+#define GICV3_GICR_PIDR2 GICV3_GICD_PIDR2 | |
/* | |
* GICD_CTLR default value: | |
* - No GICv2 compatibility => ARE = 1 | |
*/ | |
-#define VGICD_CTLR_DEFAULT (GICD_CTLR_ARE_NS) | |
+#define VGICD_CTLR_DEFAULT (GICD_CTLR_ARE_NS) | |
-static struct { | |
+static struct | |
+{ | |
bool enabled; | |
/* Distributor interface address */ | |
paddr_t dbase; | |
/* Re-distributor regions */ | |
unsigned int nr_rdist_regions; | |
const struct rdist_region *regions; | |
- unsigned int intid_bits; /* Number of interrupt ID bits */ | |
+ unsigned int intid_bits; /* Number of interrupt ID bits */ | |
} vgic_v3_hw; | |
-void vgic_v3_setup_hw(paddr_t dbase, | |
- unsigned int nr_rdist_regions, | |
+void vgic_v3_setup_hw(paddr_t dbase, unsigned int nr_rdist_regions, | |
const struct rdist_region *regions, | |
unsigned int intid_bits) | |
{ | |
@@ -161,12 +161,11 @@ static void vgic_store_irouter(struct domain *d, struct vgic_irq_rank *rank, | |
} | |
static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, | |
- uint32_t gicr_reg, | |
- register_t *r) | |
+ uint32_t gicr_reg, register_t *r) | |
{ | |
struct hsr_dabt dabt = info->dabt; | |
- switch ( gicr_reg ) | |
+ switch (gicr_reg) | |
{ | |
case VREG32(GICR_CTLR): | |
{ | |
@@ -174,7 +173,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, | |
if ( !v->domain->arch.vgic.has_its ) | |
goto read_as_zero_32; | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
spin_lock_irqsave(&v->arch.vgic.lock, flags); | |
*r = vreg_reg32_extract(!!(v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED), | |
@@ -184,7 +184,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, | |
} | |
case VREG32(GICR_IIDR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
*r = vreg_reg32_extract(GICV3_GICR_IIDR_VAL, info); | |
return 1; | |
@@ -192,7 +193,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, | |
{ | |
uint64_t typer, aff; | |
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
aff = (MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 3) << 56 | | |
MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 2) << 48 | | |
MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 1) << 40 | | |
@@ -240,7 +242,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, | |
case VREG64(GICR_PROPBASER): | |
if ( !v->domain->arch.vgic.has_its ) | |
goto read_as_zero_64; | |
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
vgic_lock(v); | |
*r = vreg_reg64_extract(v->domain->arch.vgic.rdist_propbase, info); | |
@@ -253,11 +256,12 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, | |
if ( !v->domain->arch.vgic.has_its ) | |
goto read_as_zero_64; | |
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
spin_lock_irqsave(&v->arch.vgic.lock, flags); | |
*r = vreg_reg64_extract(v->arch.vgic.rdist_pendbase, info); | |
- *r &= ~GICR_PENDBASER_PTZ; /* WO, reads as 0 */ | |
+ *r &= ~GICR_PENDBASER_PTZ; /* WO, reads as 0 */ | |
spin_unlock_irqrestore(&v->arch.vgic.lock, flags); | |
return 1; | |
} | |
@@ -280,7 +284,8 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, | |
goto read_reserved; | |
case VREG32(GICR_SYNCR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
/* RO . But when read it always returns busy bito bit[0] */ | |
*r = vreg_reg32_extract(GICR_SYNCR_NOT_BUSY, info); | |
return 1; | |
@@ -305,35 +310,37 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, | |
case 0xFFD0 ... 0xFFE4: | |
/* Implementation defined identification registers */ | |
- goto read_impl_defined; | |
+ goto read_impl_defined; | |
case VREG32(GICR_PIDR2): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
*r = vreg_reg32_extract(GICV3_GICR_PIDR2, info); | |
- return 1; | |
+ return 1; | |
case 0xFFEC ... 0xFFFC: | |
- /* Implementation defined identification registers */ | |
- goto read_impl_defined; | |
+ /* Implementation defined identification registers */ | |
+ goto read_impl_defined; | |
default: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICR: unhandled read r%d offset %#08x\n", | |
- v, dabt.reg, gicr_reg); | |
+ printk(XENLOG_G_ERR "%pv: vGICR: unhandled read r%d offset %#08x\n", v, | |
+ dabt.reg, gicr_reg); | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR "%pv vGICR: bad read width %d r%d offset %#08x\n", | |
- v, dabt.size, dabt.reg, gicr_reg); | |
+ printk(XENLOG_G_ERR "%pv vGICR: bad read width %d r%d offset %#08x\n", v, | |
+ dabt.size, dabt.reg, gicr_reg); | |
return 0; | |
read_as_zero_64: | |
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
*r = 0; | |
return 1; | |
read_as_zero_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
*r = 0; | |
return 1; | |
@@ -370,7 +377,7 @@ static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask, | |
/* We want to avoid outer shareable. */ | |
static uint64_t vgic_sanitise_shareability(uint64_t field) | |
{ | |
- switch ( field ) | |
+ switch (field) | |
{ | |
case GIC_BASER_OuterShareable: | |
return GIC_BASER_InnerShareable; | |
@@ -382,7 +389,7 @@ static uint64_t vgic_sanitise_shareability(uint64_t field) | |
/* Avoid any inner non-cacheable mapping. */ | |
static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) | |
{ | |
- switch ( field ) | |
+ switch (field) | |
{ | |
case GIC_BASER_CACHE_nCnB: | |
case GIC_BASER_CACHE_nC: | |
@@ -395,7 +402,7 @@ static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) | |
/* Non-cacheable or same-as-inner are OK. */ | |
static uint64_t vgic_sanitise_outer_cacheability(uint64_t field) | |
{ | |
- switch ( field ) | |
+ switch (field) | |
{ | |
case GIC_BASER_CACHE_SameAsInner: | |
case GIC_BASER_CACHE_nC: | |
@@ -474,13 +481,12 @@ static void vgic_vcpu_enable_lpis(struct vcpu *v) | |
} | |
static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, | |
- uint32_t gicr_reg, | |
- register_t r) | |
+ uint32_t gicr_reg, register_t r) | |
{ | |
struct hsr_dabt dabt = info->dabt; | |
uint64_t reg; | |
- switch ( gicr_reg ) | |
+ switch (gicr_reg) | |
{ | |
case VREG32(GICR_CTLR): | |
{ | |
@@ -488,9 +494,10 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, | |
if ( !v->domain->arch.vgic.has_its ) | |
goto write_ignore_32; | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
- vgic_lock(v); /* protects rdists_enabled */ | |
+ vgic_lock(v); /* protects rdists_enabled */ | |
spin_lock_irqsave(&v->arch.vgic.lock, flags); | |
/* LPIs can only be enabled once, but never disabled again. */ | |
@@ -540,7 +547,8 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, | |
case VREG64(GICR_PROPBASER): | |
if ( !v->domain->arch.vgic.has_its ) | |
goto write_ignore_64; | |
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
vgic_lock(v); | |
@@ -566,7 +574,8 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, | |
if ( !v->domain->arch.vgic.has_its ) | |
goto write_ignore_64; | |
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width; | |
+ if ( !vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
spin_lock_irqsave(&v->arch.vgic.lock, flags); | |
@@ -625,15 +634,15 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, | |
case 0xFFD0 ... 0xFFE4: | |
/* Implementation defined identification registers */ | |
- goto write_impl_defined; | |
+ goto write_impl_defined; | |
case VREG32(GICR_PIDR2): | |
/* RO */ | |
goto write_ignore_32; | |
case 0xFFEC ... 0xFFFC: | |
- /* Implementation defined identification registers */ | |
- goto write_impl_defined; | |
+ /* Implementation defined identification registers */ | |
+ goto write_impl_defined; | |
default: | |
printk(XENLOG_G_ERR "%pv: vGICR: unhandled write r%d offset %#08x\n", | |
@@ -641,17 +650,19 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n", | |
- v, dabt.size, dabt.reg, r, gicr_reg); | |
+ printk(XENLOG_G_ERR "%pv: vGICR: bad write width %d r%d=%" PRIregister | |
+ " offset %#08x\n", | |
+ v, dabt.size, dabt.reg, r, gicr_reg); | |
return 0; | |
write_ignore_64: | |
- if ( vgic_reg64_check_access(dabt) ) goto bad_width; | |
+ if ( vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
return 1; | |
write_ignore_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
return 1; | |
write_impl_defined: | |
@@ -661,8 +672,7 @@ write_impl_defined: | |
return 1; | |
write_reserved: | |
- printk(XENLOG_G_DEBUG | |
- "%pv: vGICR: WI on reserved register offset %#08x\n", | |
+ printk(XENLOG_G_DEBUG "%pv: vGICR: WI on reserved register offset %#08x\n", | |
v, gicr_reg); | |
return 1; | |
} | |
@@ -675,26 +685,31 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v, | |
struct vgic_irq_rank *rank; | |
unsigned long flags; | |
- switch ( reg ) | |
+ switch (reg) | |
{ | |
case VRANGE32(GICD_IGROUPR, GICD_IGROUPRN): | |
/* We do not implement security extensions for guests, read zero */ | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
goto read_as_zero; | |
case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 1, reg - GICD_ISENABLER, DABT_WORD); | |
- if ( rank == NULL ) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
vgic_lock_rank(v, rank, flags); | |
*r = vreg_reg32_extract(rank->ienable, info); | |
vgic_unlock_rank(v, rank, flags); | |
return 1; | |
case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 1, reg - GICD_ICENABLER, DABT_WORD); | |
- if ( rank == NULL ) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
vgic_lock_rank(v, rank, flags); | |
*r = vreg_reg32_extract(rank->ienable, info); | |
vgic_unlock_rank(v, rank, flags); | |
@@ -715,9 +730,11 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v, | |
uint32_t ipriorityr; | |
uint8_t rank_index; | |
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 8, reg - GICD_IPRIORITYR, DABT_WORD); | |
- if ( rank == NULL ) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
rank_index = REG_RANK_INDEX(8, reg - GICD_IPRIORITYR, DABT_WORD); | |
vgic_lock_rank(v, rank, flags); | |
@@ -733,9 +750,11 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v, | |
{ | |
uint32_t icfgr; | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 2, reg - GICD_ICFGR, DABT_WORD); | |
- if ( rank == NULL ) goto read_as_zero; | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
vgic_lock_rank(v, rank, flags); | |
icfgr = rank->icfg[REG_RANK_INDEX(2, reg - GICD_ICFGR, DABT_WORD)]; | |
vgic_unlock_rank(v, rank, flags); | |
@@ -746,15 +765,14 @@ static int __vgic_v3_distr_common_mmio_read(const char *name, struct vcpu *v, | |
} | |
default: | |
- printk(XENLOG_G_ERR | |
- "%pv: %s: unhandled read r%d offset %#08x\n", | |
- v, name, dabt.reg, reg); | |
+ printk(XENLOG_G_ERR "%pv: %s: unhandled read r%d offset %#08x\n", v, | |
+ name, dabt.reg, reg); | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR "%pv: %s: bad read width %d r%d offset %#08x\n", | |
- v, name, dabt.size, dabt.reg, reg); | |
+ printk(XENLOG_G_ERR "%pv: %s: bad read width %d r%d offset %#08x\n", v, | |
+ name, dabt.size, dabt.reg, reg); | |
return 0; | |
read_as_zero: | |
@@ -771,16 +789,18 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, | |
uint32_t tr; | |
unsigned long flags; | |
- switch ( reg ) | |
+ switch (reg) | |
{ | |
case VRANGE32(GICD_IGROUPR, GICD_IGROUPRN): | |
/* We do not implement security extensions for guests, write ignore */ | |
goto write_ignore_32; | |
case VRANGE32(GICD_ISENABLER, GICD_ISENABLERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 1, reg - GICD_ISENABLER, DABT_WORD); | |
- if ( rank == NULL ) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
tr = rank->ienable; | |
vreg_reg32_setbits(&rank->ienable, r, info); | |
@@ -789,9 +809,11 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, | |
return 1; | |
case VRANGE32(GICD_ICENABLER, GICD_ICENABLERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 1, reg - GICD_ICENABLER, DABT_WORD); | |
- if ( rank == NULL ) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
tr = rank->ienable; | |
vreg_reg32_clearbits(&rank->ienable, r, info); | |
@@ -800,29 +822,32 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, | |
return 1; | |
case VRANGE32(GICD_ISPENDR, GICD_ISPENDRN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
- printk(XENLOG_G_ERR | |
- "%pv: %s: unhandled word write %#"PRIregister" to ISPENDR%d\n", | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
+ printk(XENLOG_G_ERR "%pv: %s: unhandled word write %#" PRIregister | |
+ " to ISPENDR%d\n", | |
v, name, r, reg - GICD_ISPENDR); | |
return 0; | |
case VRANGE32(GICD_ICPENDR, GICD_ICPENDRN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
- printk(XENLOG_G_ERR | |
- "%pv: %s: unhandled word write %#"PRIregister" to ICPENDR%d\n", | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
+ printk(XENLOG_G_ERR "%pv: %s: unhandled word write %#" PRIregister | |
+ " to ICPENDR%d\n", | |
v, name, r, reg - GICD_ICPENDR); | |
return 0; | |
case VRANGE32(GICD_ISACTIVER, GICD_ISACTIVERN): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
- printk(XENLOG_G_ERR | |
- "%pv: %s: unhandled word write %#"PRIregister" to ISACTIVER%d\n", | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
+ printk(XENLOG_G_ERR "%pv: %s: unhandled word write %#" PRIregister | |
+ " to ISACTIVER%d\n", | |
v, name, r, reg - GICD_ISACTIVER); | |
return 0; | |
case VRANGE32(GICD_ICACTIVER, GICD_ICACTIVERN): | |
- printk(XENLOG_G_ERR | |
- "%pv: %s: unhandled word write %#"PRIregister" to ICACTIVER%d\n", | |
+ printk(XENLOG_G_ERR "%pv: %s: unhandled word write %#" PRIregister | |
+ " to ICACTIVER%d\n", | |
v, name, r, reg - GICD_ICACTIVER); | |
goto write_ignore_32; | |
@@ -830,9 +855,11 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, | |
{ | |
uint32_t *ipriorityr, priority; | |
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 8, reg - GICD_IPRIORITYR, DABT_WORD); | |
- if ( rank == NULL ) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
ipriorityr = &rank->ipriorityr[REG_RANK_INDEX(8, reg - GICD_IPRIORITYR, | |
DABT_WORD)]; | |
@@ -849,31 +876,34 @@ static int __vgic_v3_distr_common_mmio_write(const char *name, struct vcpu *v, | |
case VRANGE32(GICD_ICFGR + 4, GICD_ICFGRN): /* PPI + SPIs */ | |
/* ICFGR1 for PPI's, which is implementation defined | |
if ICFGR1 is programmable or not. We chose to program */ | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
rank = vgic_rank_offset(v, 2, reg - GICD_ICFGR, DABT_WORD); | |
- if ( rank == NULL ) goto write_ignore; | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
- vreg_reg32_update(&rank->icfg[REG_RANK_INDEX(2, reg - GICD_ICFGR, | |
- DABT_WORD)], | |
- r, info); | |
+ vreg_reg32_update( | |
+ &rank->icfg[REG_RANK_INDEX(2, reg - GICD_ICFGR, DABT_WORD)], r, | |
+ info); | |
vgic_unlock_rank(v, rank, flags); | |
return 1; | |
default: | |
- printk(XENLOG_G_ERR | |
- "%pv: %s: unhandled write r%d=%"PRIregister" offset %#08x\n", | |
+ printk(XENLOG_G_ERR "%pv: %s: unhandled write r%d=%" PRIregister | |
+ " offset %#08x\n", | |
v, name, dabt.reg, r, reg); | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR | |
- "%pv: %s: bad write width %d r%d=%"PRIregister" offset %#08x\n", | |
+ printk(XENLOG_G_ERR "%pv: %s: bad write width %d r%d=%" PRIregister | |
+ " offset %#08x\n", | |
v, name, dabt.size, dabt.reg, r, reg); | |
return 0; | |
write_ignore_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
write_ignore: | |
return 1; | |
} | |
@@ -883,7 +913,7 @@ static int vgic_v3_rdistr_sgi_mmio_read(struct vcpu *v, mmio_info_t *info, | |
{ | |
struct hsr_dabt dabt = info->dabt; | |
- switch ( gicr_reg ) | |
+ switch (gicr_reg) | |
{ | |
case VREG32(GICR_IGROUPR0): | |
case VREG32(GICR_ISENABLER0): | |
@@ -892,10 +922,10 @@ static int vgic_v3_rdistr_sgi_mmio_read(struct vcpu *v, mmio_info_t *info, | |
case VREG32(GICR_ICACTIVER0): | |
case VRANGE32(GICR_IPRIORITYR0, GICR_IPRIORITYR7): | |
case VRANGE32(GICR_ICFGR0, GICR_ICFGR1): | |
- /* | |
- * Above registers offset are common with GICD. | |
- * So handle in common with GICD handling | |
- */ | |
+ /* | |
+ * Above registers offset are common with GICD. | |
+ * So handle in common with GICD handling | |
+ */ | |
return __vgic_v3_distr_common_mmio_read("vGICR: SGI", v, info, | |
gicr_reg, r); | |
@@ -928,20 +958,23 @@ static int vgic_v3_rdistr_sgi_mmio_read(struct vcpu *v, mmio_info_t *info, | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR "%pv: vGICR: SGI: bad read width %d r%d offset %#08x\n", | |
+ printk(XENLOG_G_ERR | |
+ "%pv: vGICR: SGI: bad read width %d r%d offset %#08x\n", | |
v, dabt.size, dabt.reg, gicr_reg); | |
return 0; | |
read_as_zero_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
read_as_zero: | |
*r = 0; | |
return 1; | |
read_impl_defined: | |
- printk(XENLOG_G_DEBUG | |
- "%pv: vGICR: SGI: RAZ on implementation defined register offset %#08x\n", | |
- v, gicr_reg); | |
+ printk( | |
+ XENLOG_G_DEBUG | |
+ "%pv: vGICR: SGI: RAZ on implementation defined register offset %#08x\n", | |
+ v, gicr_reg); | |
*r = 0; | |
return 1; | |
@@ -951,7 +984,6 @@ read_reserved: | |
v, gicr_reg); | |
*r = 0; | |
return 1; | |
- | |
} | |
static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, | |
@@ -959,7 +991,7 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, | |
{ | |
struct hsr_dabt dabt = info->dabt; | |
- switch ( gicr_reg ) | |
+ switch (gicr_reg) | |
{ | |
case VREG32(GICR_IGROUPR0): | |
case VREG32(GICR_ISENABLER0): | |
@@ -968,24 +1000,28 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, | |
case VREG32(GICR_ICACTIVER0): | |
case VREG32(GICR_ICFGR1): | |
case VRANGE32(GICR_IPRIORITYR0, GICR_IPRIORITYR7): | |
- /* | |
- * Above registers offset are common with GICD. | |
- * So handle common with GICD handling | |
- */ | |
- return __vgic_v3_distr_common_mmio_write("vGICR: SGI", v, | |
- info, gicr_reg, r); | |
+ /* | |
+ * Above registers offset are common with GICD. | |
+ * So handle common with GICD handling | |
+ */ | |
+ return __vgic_v3_distr_common_mmio_write("vGICR: SGI", v, info, | |
+ gicr_reg, r); | |
case VREG32(GICR_ISPENDR0): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
printk(XENLOG_G_ERR | |
- "%pv: vGICR: SGI: unhandled word write %#"PRIregister" to ISPENDR0\n", | |
+ "%pv: vGICR: SGI: unhandled word write %#" PRIregister | |
+ " to ISPENDR0\n", | |
v, r); | |
return 0; | |
case VREG32(GICR_ICPENDR0): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
printk(XENLOG_G_ERR | |
- "%pv: vGICR: SGI: unhandled word write %#"PRIregister" to ICPENDR0\n", | |
+ "%pv: vGICR: SGI: unhandled word write %#" PRIregister | |
+ " to ICPENDR0\n", | |
v, r); | |
return 0; | |
@@ -993,7 +1029,6 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, | |
/* We do not implement security extensions for guests, write ignore */ | |
goto write_ignore_32; | |
- | |
case VREG32(GICR_NSACR): | |
/* We do not implement security extensions for guests, write ignore */ | |
goto write_ignore_32; | |
@@ -1006,19 +1041,20 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info, | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICR: SGI: bad write width %d r%d=%"PRIregister" offset %#08x\n", | |
+ printk(XENLOG_G_ERR "%pv: vGICR: SGI: bad write width %d r%d=%" PRIregister | |
+ " offset %#08x\n", | |
v, dabt.size, dabt.reg, r, gicr_reg); | |
return 0; | |
write_ignore_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
return 1; | |
} | |
static struct vcpu *get_vcpu_from_rdist(struct domain *d, | |
- const struct vgic_rdist_region *region, | |
- paddr_t gpa, uint32_t *offset) | |
+ const struct vgic_rdist_region *region, | |
+ paddr_t gpa, uint32_t *offset) | |
{ | |
struct vcpu *v; | |
unsigned int vcpu_id; | |
@@ -1048,12 +1084,12 @@ static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
if ( offset < SZ_64K ) | |
return __vgic_v3_rdistr_rd_mmio_read(v, info, offset, r); | |
- else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) ) | |
+ else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) ) | |
return vgic_v3_rdistr_sgi_mmio_read(v, info, (offset - SZ_64K), r); | |
else | |
printk(XENLOG_G_WARNING | |
- "%pv: vGICR: unknown gpa read address %"PRIpaddr"\n", | |
- v, info->gpa); | |
+ "%pv: vGICR: unknown gpa read address %" PRIpaddr "\n", | |
+ v, info->gpa); | |
return 0; | |
} | |
@@ -1072,11 +1108,11 @@ static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
if ( offset < SZ_64K ) | |
return __vgic_v3_rdistr_rd_mmio_write(v, info, offset, r); | |
- else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) ) | |
+ else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) ) | |
return vgic_v3_rdistr_sgi_mmio_write(v, info, (offset - SZ_64K), r); | |
else | |
printk(XENLOG_G_WARNING | |
- "%pv: vGICR: unknown gpa write address %"PRIpaddr"\n", | |
+ "%pv: vGICR: unknown gpa write address %" PRIpaddr "\n", | |
v, info->gpa); | |
return 0; | |
@@ -1092,10 +1128,11 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
perfc_incr(vgicd_reads); | |
- switch ( gicd_reg ) | |
+ switch (gicd_reg) | |
{ | |
case VREG32(GICD_CTLR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
vgic_lock(v); | |
*r = vreg_reg32_extract(v->domain->arch.vgic.ctlr, info); | |
vgic_unlock(v); | |
@@ -1114,7 +1151,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
unsigned int ncpus = min_t(unsigned int, v->domain->max_vcpus, 8); | |
uint32_t typer; | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
/* No secure world support for guests. */ | |
typer = ((ncpus - 1) << GICD_TYPE_CPUS_SHIFT | | |
DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32)); | |
@@ -1122,7 +1160,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
if ( v->domain->arch.vgic.has_its ) | |
typer |= GICD_TYPE_LPIS; | |
- typer |= (v->domain->arch.vgic.intid_bits - 1) << GICD_TYPE_ID_BITS_SHIFT; | |
+ typer |= (v->domain->arch.vgic.intid_bits - 1) | |
+ << GICD_TYPE_ID_BITS_SHIFT; | |
*r = vreg_reg32_extract(typer, info); | |
@@ -1130,7 +1169,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
} | |
case VREG32(GICD_IIDR): | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
*r = vreg_reg32_extract(GICV3_GICD_IIDR_VAL, info); | |
return 1; | |
@@ -1216,10 +1256,12 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
{ | |
uint64_t irouter; | |
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width; | |
- rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER, | |
- DABT_DOUBLE_WORD); | |
- if ( rank == NULL ) goto read_as_zero; | |
+ if ( !vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
+ rank = | |
+ vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER, DABT_DOUBLE_WORD); | |
+ if ( rank == NULL ) | |
+ goto read_as_zero; | |
vgic_lock_rank(v, rank, flags); | |
irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER); | |
vgic_unlock_rank(v, rank, flags); | |
@@ -1237,31 +1279,33 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info, | |
case VRANGE32(0xFFD0, 0xFFE4): | |
/* Implementation defined identification registers */ | |
- goto read_impl_defined; | |
+ goto read_impl_defined; | |
case VREG32(GICD_PIDR2): | |
/* GICv3 identification value */ | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
*r = vreg_reg32_extract(GICV3_GICD_PIDR2, info); | |
return 1; | |
case VRANGE32(0xFFEC, 0xFFFC): | |
- /* Implementation defined identification registers */ | |
- goto read_impl_defined; | |
+ /* Implementation defined identification registers */ | |
+ goto read_impl_defined; | |
default: | |
- printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n", | |
- v, dabt.reg, gicd_reg); | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n", v, | |
+ dabt.reg, gicd_reg); | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n", | |
- v, dabt.size, dabt.reg, gicd_reg); | |
+ printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n", v, | |
+ dabt.size, dabt.reg, gicd_reg); | |
return 0; | |
read_as_zero_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
*r = 0; | |
return 1; | |
@@ -1294,13 +1338,14 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
perfc_incr(vgicd_writes); | |
- switch ( gicd_reg ) | |
+ switch (gicd_reg) | |
{ | |
case VREG32(GICD_CTLR): | |
{ | |
uint32_t ctlr = 0; | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
vgic_lock(v); | |
@@ -1376,8 +1421,8 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
case VRANGE32(GICD_ICFGR, GICD_ICFGRN): | |
/* Above registers are common with GICR and GICD | |
* Manage in common */ | |
- return __vgic_v3_distr_common_mmio_write("vGICD", v, info, | |
- gicd_reg, r); | |
+ return __vgic_v3_distr_common_mmio_write("vGICD", v, info, gicd_reg, | |
+ r); | |
case VRANGE32(GICD_NSACR, GICD_NSACRN): | |
/* We do not implement security extensions for guests, write ignore */ | |
@@ -1389,12 +1434,14 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
case VRANGE32(GICD_CPENDSGIR, GICD_CPENDSGIRN): | |
/* Replaced with GICR_ICPENDR0. So ignore write */ | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
return 0; | |
case VRANGE32(GICD_SPENDSGIR, GICD_SPENDSGIRN): | |
/* Replaced with GICR_ISPENDR0. So ignore write */ | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
return 0; | |
case VRANGE32(0x0F30, 0x60FC): | |
@@ -1404,10 +1451,12 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
{ | |
uint64_t irouter; | |
- if ( !vgic_reg64_check_access(dabt) ) goto bad_width; | |
- rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER, | |
- DABT_DOUBLE_WORD); | |
- if ( rank == NULL ) goto write_ignore; | |
+ if ( !vgic_reg64_check_access(dabt) ) | |
+ goto bad_width; | |
+ rank = | |
+ vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER, DABT_DOUBLE_WORD); | |
+ if ( rank == NULL ) | |
+ goto write_ignore; | |
vgic_lock_rank(v, rank, flags); | |
irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER); | |
vreg_reg64_update(&irouter, r, info); | |
@@ -1424,31 +1473,32 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info, | |
case VRANGE32(0xFFD0, 0xFFE4): | |
/* Implementation defined identification registers */ | |
- goto write_impl_defined; | |
+ goto write_impl_defined; | |
case VREG32(GICD_PIDR2): | |
/* RO -- write ignore */ | |
goto write_ignore_32; | |
case VRANGE32(0xFFEC, 0xFFFC): | |
- /* Implementation defined identification registers */ | |
- goto write_impl_defined; | |
+ /* Implementation defined identification registers */ | |
+ goto write_impl_defined; | |
default: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", | |
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled write r%d=%" PRIregister | |
+ " offset %#08x\n", | |
v, dabt.reg, r, gicd_reg); | |
return 0; | |
} | |
bad_width: | |
- printk(XENLOG_G_ERR | |
- "%pv: vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", | |
+ printk(XENLOG_G_ERR "%pv: vGICD: bad write width %d r%d=%" PRIregister | |
+ " offset %#08x\n", | |
v, dabt.size, dabt.reg, r, gicd_reg); | |
return 0; | |
write_ignore_32: | |
- if ( dabt.size != DABT_WORD ) goto bad_width; | |
+ if ( dabt.size != DABT_WORD ) | |
+ goto bad_width; | |
return 1; | |
write_ignore: | |
@@ -1461,8 +1511,7 @@ write_impl_defined: | |
return 1; | |
write_reserved: | |
- printk(XENLOG_G_DEBUG | |
- "%pv: vGICD: WI on reserved register offset %#08x\n", | |
+ printk(XENLOG_G_DEBUG "%pv: vGICD: WI on reserved register offset %#08x\n", | |
v, gicd_reg); | |
return 1; | |
} | |
@@ -1476,10 +1525,10 @@ static bool vgic_v3_to_sgi(struct vcpu *v, register_t sgir) | |
sgi_target_init(&target); | |
irqmode = (sgir >> ICH_SGI_IRQMODE_SHIFT) & ICH_SGI_IRQMODE_MASK; | |
- virq = (sgir >> ICH_SGI_IRQ_SHIFT ) & ICH_SGI_IRQ_MASK; | |
+ virq = (sgir >> ICH_SGI_IRQ_SHIFT) & ICH_SGI_IRQ_MASK; | |
/* Map GIC sgi value to enum value */ | |
- switch ( irqmode ) | |
+ switch (irqmode) | |
{ | |
case ICH_SGI_TARGET_LIST: | |
/* We assume that only AFF1 is used in ICC_SGI1R_EL1. */ | |
@@ -1515,14 +1564,14 @@ static bool vgic_v3_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr) | |
{ | |
struct hsr_sysreg sysreg = hsr.sysreg; | |
- ASSERT (hsr.ec == HSR_EC_SYSREG); | |
+ ASSERT(hsr.ec == HSR_EC_SYSREG); | |
if ( sysreg.read ) | |
perfc_incr(vgic_sysreg_reads); | |
else | |
perfc_incr(vgic_sysreg_writes); | |
- switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) | |
+ switch (hsr.bits & HSR_SYSREG_REGS_MASK) | |
{ | |
case HSR_SYSREG_ICC_SGI1R_EL1: | |
return vreg_emulate_sysreg64(regs, hsr, vgic_v3_emulate_sgi1r); | |
@@ -1541,7 +1590,7 @@ static bool vgic_v3_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr) | |
else | |
perfc_incr(vgic_cp64_writes); | |
- switch ( hsr.bits & HSR_CP64_REGS_MASK ) | |
+ switch (hsr.bits & HSR_CP64_REGS_MASK) | |
{ | |
case HSR_CPREG64(ICC_SGI1R): | |
return vreg_emulate_cp64(regs, hsr, vgic_v3_emulate_sgi1r); | |
@@ -1564,12 +1613,12 @@ static bool vgic_v3_emulate_reg(struct cpu_user_regs *regs, union hsr hsr) | |
} | |
static const struct mmio_handler_ops vgic_rdistr_mmio_handler = { | |
- .read = vgic_v3_rdistr_mmio_read, | |
+ .read = vgic_v3_rdistr_mmio_read, | |
.write = vgic_v3_rdistr_mmio_write, | |
}; | |
static const struct mmio_handler_ops vgic_distr_mmio_handler = { | |
- .read = vgic_v3_distr_mmio_read, | |
+ .read = vgic_v3_distr_mmio_read, | |
.write = vgic_v3_distr_mmio_write, | |
}; | |
@@ -1641,8 +1690,8 @@ static inline unsigned int vgic_v3_max_rdist_count(struct domain *d) | |
* However DomU get a constructed memory map, so we can go with | |
* the architected single redistributor region. | |
*/ | |
- return is_hardware_domain(d) ? vgic_v3_hw.nr_rdist_regions : | |
- GUEST_GICV3_RDIST_REGIONS; | |
+ return is_hardware_domain(d) ? vgic_v3_hw.nr_rdist_regions | |
+ : GUEST_GICV3_RDIST_REGIONS; | |
} | |
static int vgic_v3_domain_init(struct domain *d) | |
@@ -1707,7 +1756,8 @@ static int vgic_v3_domain_init(struct domain *d) | |
BUILD_BUG_ON(GUEST_GICV3_RDIST_REGIONS != 1); | |
/* The first redistributor should contain enough space for all CPUs */ | |
- BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GICV3_GICR_SIZE) < MAX_VIRT_CPUS); | |
+ BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GICV3_GICR_SIZE) < | |
+ MAX_VIRT_CPUS); | |
d->arch.vgic.rdist_regions[0].base = GUEST_GICV3_GICR0_BASE; | |
d->arch.vgic.rdist_regions[0].size = GUEST_GICV3_GICR0_SIZE; | |
d->arch.vgic.rdist_regions[0].first_cpu = 0; | |
@@ -1739,8 +1789,8 @@ static int vgic_v3_domain_init(struct domain *d) | |
{ | |
struct vgic_rdist_region *region = &d->arch.vgic.rdist_regions[i]; | |
- register_mmio_handler(d, &vgic_rdistr_mmio_handler, | |
- region->base, region->size, region); | |
+ register_mmio_handler(d, &vgic_rdistr_mmio_handler, region->base, | |
+ region->size, region); | |
} | |
d->arch.vgic.ctlr = VGICD_CTLR_DEFAULT; | |
@@ -1791,10 +1841,10 @@ static int vgic_v3_lpi_get_priority(struct domain *d, uint32_t vlpi) | |
} | |
static const struct vgic_ops v3_ops = { | |
- .vcpu_init = vgic_v3_vcpu_init, | |
+ .vcpu_init = vgic_v3_vcpu_init, | |
.domain_init = vgic_v3_domain_init, | |
.domain_free = vgic_v3_domain_free, | |
- .emulate_reg = vgic_v3_emulate_reg, | |
+ .emulate_reg = vgic_v3_emulate_reg, | |
.lpi_to_pending = vgic_v3_lpi_to_pending, | |
.lpi_get_priority = vgic_v3_lpi_get_priority, | |
}; | |
@@ -1803,8 +1853,7 @@ int vgic_v3_init(struct domain *d, int *mmio_count) | |
{ | |
if ( !vgic_v3_hw.enabled ) | |
{ | |
- printk(XENLOG_G_ERR | |
- "d%d: vGICv3 is not supported on this platform.\n", | |
+ printk(XENLOG_G_ERR "d%d: vGICv3 is not supported on this platform.\n", | |
d->domain_id); | |
return -ENODEV; | |
} | |
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c | |
index 82f524a35c..c286a886a8 100644 | |
--- a/xen/arch/arm/vgic.c | |
+++ b/xen/arch/arm/vgic.c | |
@@ -47,8 +47,7 @@ static inline struct vgic_irq_rank *vgic_get_rank(struct vcpu *v, int rank) | |
* Returns rank corresponding to a GICD_<FOO><n> register for | |
* GICD_<FOO> with <b>-bits-per-interrupt. | |
*/ | |
-struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, | |
- int s) | |
+struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, int s) | |
{ | |
int rank = REG_RANK_NR(b, (n >> s)); | |
@@ -57,7 +56,7 @@ struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, | |
struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq) | |
{ | |
- int rank = irq/32; | |
+ int rank = irq / 32; | |
return vgic_get_rank(v, rank); | |
} | |
@@ -96,12 +95,12 @@ static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index, | |
int domain_vgic_register(struct domain *d, int *mmio_count) | |
{ | |
- switch ( d->arch.vgic.version ) | |
+ switch (d->arch.vgic.version) | |
{ | |
#ifdef CONFIG_GICV3 | |
case GIC_V3: | |
if ( vgic_v3_init(d, mmio_count) ) | |
- return -ENODEV; | |
+ return -ENODEV; | |
break; | |
#endif | |
case GIC_V2: | |
@@ -109,8 +108,8 @@ int domain_vgic_register(struct domain *d, int *mmio_count) | |
return -ENODEV; | |
break; | |
default: | |
- printk(XENLOG_G_ERR "d%d: Unknown vGIC version %u\n", | |
- d->domain_id, d->arch.vgic.version); | |
+ printk(XENLOG_G_ERR "d%d: Unknown vGIC version %u\n", d->domain_id, | |
+ d->arch.vgic.version); | |
return -ENODEV; | |
} | |
@@ -149,7 +148,7 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis) | |
if ( d->arch.vgic.pending_irqs == NULL ) | |
return -ENOMEM; | |
- for (i=0; i<d->arch.vgic.nr_spis; i++) | |
+ for ( i = 0; i < d->arch.vgic.nr_spis; i++ ) | |
vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32); | |
/* SPIs are routed to VCPU0 by default */ | |
@@ -174,7 +173,7 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis) | |
void register_vgic_ops(struct domain *d, const struct vgic_ops *ops) | |
{ | |
- d->arch.vgic.handler = ops; | |
+ d->arch.vgic.handler = ops; | |
} | |
void domain_vgic_free(struct domain *d) | |
@@ -190,7 +189,8 @@ void domain_vgic_free(struct domain *d) | |
{ | |
ret = release_guest_irq(d, p->irq); | |
if ( ret ) | |
- dprintk(XENLOG_G_WARNING, "d%u: Failed to release virq %u ret = %d\n", | |
+ dprintk(XENLOG_G_WARNING, | |
+ "d%u: Failed to release virq %u ret = %d\n", | |
d->domain_id, p->irq, ret); | |
} | |
} | |
@@ -208,7 +208,7 @@ int vcpu_vgic_init(struct vcpu *v) | |
v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank); | |
if ( v->arch.vgic.private_irqs == NULL ) | |
- return -ENOMEM; | |
+ return -ENOMEM; | |
/* SGIs/PPIs are always routed to this VCPU */ | |
vgic_rank_init(v->arch.vgic.private_irqs, 0, v->vcpu_id); | |
@@ -216,7 +216,7 @@ int vcpu_vgic_init(struct vcpu *v) | |
v->domain->arch.vgic.handler->vcpu_init(v); | |
memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs)); | |
- for (i = 0; i < 32; i++) | |
+ for ( i = 0; i < 32; i++ ) | |
vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i); | |
INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs); | |
@@ -273,7 +273,8 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq) | |
/* migration already in progress, no need to do anything */ | |
if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) ) | |
{ | |
- gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in progress\n", irq); | |
+ gprintk(XENLOG_WARNING, | |
+ "irq %u migration failed: requested while in progress\n", irq); | |
spin_unlock_irqrestore(&old->arch.vgic.lock, flags); | |
return false; | |
} | |
@@ -346,7 +347,8 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n) | |
/* LPIs will never be disabled via this function. */ | |
ASSERT(!is_lpi(32 * n + 31)); | |
- while ( (i = find_next_bit(&mask, 32, i)) < 32 ) { | |
+ while ( (i = find_next_bit(&mask, 32, i)) < 32 ) | |
+ { | |
irq = i + (32 * n); | |
v_target = vgic_get_target_vcpu(v, irq); | |
@@ -396,13 +398,15 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n) | |
/* LPIs will never be enabled via this function. */ | |
ASSERT(!is_lpi(32 * n + 31)); | |
- while ( (i = find_next_bit(&mask, 32, i)) < 32 ) { | |
+ while ( (i = find_next_bit(&mask, 32, i)) < 32 ) | |
+ { | |
irq = i + (32 * n); | |
v_target = vgic_get_target_vcpu(v, irq); | |
spin_lock_irqsave(&v_target->arch.vgic.lock, flags); | |
p = irq_to_pending(v_target, irq); | |
set_bit(GIC_IRQ_GUEST_ENABLED, &p->status); | |
- if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) ) | |
+ if ( !list_empty(&p->inflight) && | |
+ !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) ) | |
gic_raise_guest_irq(v_target, irq, p->priority); | |
spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags); | |
if ( p->desc != NULL ) | |
@@ -432,21 +436,21 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode, | |
unsigned int base; | |
unsigned long int bitmap; | |
- ASSERT( virq < 16 ); | |
+ ASSERT(virq < 16); | |
- switch ( irqmode ) | |
+ switch (irqmode) | |
{ | |
case SGI_TARGET_LIST: | |
perfc_incr(vgic_sgi_list); | |
base = target->aff1 << 4; | |
bitmap = target->list; | |
- for_each_set_bit( i, &bitmap, sizeof(target->list) * 8 ) | |
+ for_each_set_bit (i, &bitmap, sizeof(target->list) * 8) | |
{ | |
vcpuid = base + i; | |
if ( vcpuid >= d->max_vcpus || d->vcpu[vcpuid] == NULL || | |
!is_vcpu_online(d->vcpu[vcpuid]) ) | |
{ | |
- gprintk(XENLOG_WARNING, "VGIC: write r=%"PRIregister" \ | |
+ gprintk(XENLOG_WARNING, "VGIC: write r=%" PRIregister " \ | |
target->list=%hx, wrong CPUTargetList \n", | |
sgir, target->list); | |
continue; | |
@@ -469,8 +473,9 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode, | |
break; | |
default: | |
gprintk(XENLOG_WARNING, | |
- "vGICD:unhandled GICD_SGIR write %"PRIregister" \ | |
- with wrong mode\n", sgir); | |
+ "vGICD:unhandled GICD_SGIR write %" PRIregister " \ | |
+ with wrong mode\n", | |
+ sgir); | |
return false; | |
} | |
@@ -510,7 +515,7 @@ void vgic_clear_pending_irqs(struct vcpu *v) | |
unsigned long flags; | |
spin_lock_irqsave(&v->arch.vgic.lock, flags); | |
- list_for_each_entry_safe ( p, t, &v->arch.vgic.inflight_irqs, inflight ) | |
+ list_for_each_entry_safe(p, t, &v->arch.vgic.inflight_irqs, inflight) | |
list_del_init(&p->inflight); | |
gic_clear_pending_irqs(v); | |
spin_unlock_irqrestore(&v->arch.vgic.lock, flags); | |
@@ -579,7 +584,7 @@ void vgic_inject_irq(struct domain *d, struct vcpu *v, unsigned int virq, | |
if ( test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) ) | |
gic_raise_guest_irq(v, virq, priority); | |
- list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight ) | |
+ list_for_each_entry (iter, &v->arch.vgic.inflight_irqs, inflight) | |
{ | |
if ( iter->priority > priority ) | |
{ | |
@@ -646,13 +651,11 @@ int vgic_allocate_virq(struct domain *d, bool spi) | |
* There is no spinlock to protect allocated_irqs, therefore | |
* test_and_set_bit may fail. If so retry it. | |
*/ | |
- do | |
- { | |
+ do { | |
virq = find_next_zero_bit(d->arch.vgic.allocated_irqs, end, first); | |
if ( virq >= end ) | |
return -1; | |
- } | |
- while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) ); | |
+ } while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) ); | |
return virq; | |
} | |
@@ -664,7 +667,7 @@ void vgic_free_virq(struct domain *d, unsigned int virq) | |
unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version) | |
{ | |
- switch ( domctl_vgic_version ) | |
+ switch (domctl_vgic_version) | |
{ | |
case XEN_DOMCTL_CONFIG_GIC_V2: | |
return 8; | |
@@ -687,4 +690,3 @@ unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version) | |
* indent-tabs-mode: nil | |
* End: | |
*/ | |
- | |
diff --git a/xen/arch/arm/vgic/vgic-init.c b/xen/arch/arm/vgic/vgic-init.c | |
index 62ae553699..18bcdcb1fe 100644 | |
--- a/xen/arch/arm/vgic/vgic-init.c | |
+++ b/xen/arch/arm/vgic/vgic-init.c | |
@@ -103,7 +103,7 @@ static void vgic_vcpu_early_init(struct vcpu *vcpu) | |
*/ | |
int domain_vgic_register(struct domain *d, int *mmio_count) | |
{ | |
- switch ( d->arch.vgic.version ) | |
+ switch (d->arch.vgic.version) | |
{ | |
case GIC_V2: | |
*mmio_count = 1; | |
@@ -142,7 +142,7 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis) | |
dist->nr_spis = nr_spis; | |
dist->spis = xzalloc_array(struct vgic_irq, nr_spis); | |
if ( !dist->spis ) | |
- return -ENOMEM; | |
+ return -ENOMEM; | |
/* | |
* In the following code we do not take the irq struct lock since | |
@@ -215,7 +215,7 @@ int vcpu_vgic_init(struct vcpu *vcpu) | |
void domain_vgic_free(struct domain *d) | |
{ | |
struct vgic_dist *dist = &d->arch.vgic; | |
- int i, ret; | |
+ int i, ret; | |
for ( i = 0; i < dist->nr_spis; i++ ) | |
{ | |
@@ -227,8 +227,8 @@ void domain_vgic_free(struct domain *d) | |
ret = release_guest_irq(d, irq->hwintid); | |
if ( ret ) | |
dprintk(XENLOG_G_WARNING, | |
- "d%u: Failed to release virq %u ret = %d\n", | |
- d->domain_id, 32 + i, ret); | |
+ "d%u: Failed to release virq %u ret = %d\n", d->domain_id, | |
+ 32 + i, ret); | |
} | |
dist->ready = false; | |
diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c | |
index 2e507b10fe..27ab85c325 100644 | |
--- a/xen/arch/arm/vgic/vgic-mmio-v2.c | |
+++ b/xen/arch/arm/vgic/vgic-mmio-v2.c | |
@@ -20,24 +20,23 @@ | |
#include "vgic.h" | |
#include "vgic-mmio.h" | |
-static unsigned long vgic_mmio_read_v2_misc(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+static unsigned long vgic_mmio_read_v2_misc(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
uint32_t value; | |
- switch ( addr & 0x0c ) /* filter for the 4 registers handled here */ | |
+ switch (addr & 0x0c) /* filter for the 4 registers handled here */ | |
{ | |
case GICD_CTLR: | |
value = vcpu->domain->arch.vgic.enabled ? GICD_CTL_ENABLE : 0; | |
break; | |
case GICD_TYPER: | |
value = vcpu->domain->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; | |
- value = (value >> 5) - 1; /* stored as multiples of 32 */ | |
+ value = (value >> 5) - 1; /* stored as multiples of 32 */ | |
value |= (vcpu->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT; | |
break; | |
case GICD_IIDR: | |
- value = (PRODUCT_ID_KVM << 24) | | |
- (VARIANT_ID_XEN << 16) | | |
+ value = (PRODUCT_ID_KVM << 24) | (VARIANT_ID_XEN << 16) | | |
(IMPLEMENTER_ARM << 0); | |
break; | |
default: | |
@@ -47,14 +46,13 @@ static unsigned long vgic_mmio_read_v2_misc(struct vcpu *vcpu, | |
return value; | |
} | |
-static void vgic_mmio_write_v2_misc(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val) | |
+static void vgic_mmio_write_v2_misc(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val) | |
{ | |
struct vgic_dist *dist = &vcpu->domain->arch.vgic; | |
bool enabled; | |
- switch ( addr & 0x0c ) /* filter for the 4 registers handled here */ | |
+ switch (addr & 0x0c) /* filter for the 4 registers handled here */ | |
{ | |
case GICD_CTLR: | |
domain_lock(vcpu->domain); | |
@@ -81,34 +79,33 @@ static void vgic_mmio_write_v2_misc(struct vcpu *vcpu, | |
} | |
} | |
-static void vgic_mmio_write_sgir(struct vcpu *source_vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val) | |
+static void vgic_mmio_write_sgir(struct vcpu *source_vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val) | |
{ | |
struct domain *d = source_vcpu->domain; | |
unsigned int nr_vcpus = d->max_vcpus; | |
unsigned int intid = val & GICD_SGI_INTID_MASK; | |
- unsigned long targets = (val & GICD_SGI_TARGET_MASK) >> | |
- GICD_SGI_TARGET_SHIFT; | |
+ unsigned long targets = | |
+ (val & GICD_SGI_TARGET_MASK) >> GICD_SGI_TARGET_SHIFT; | |
unsigned int vcpu_id; | |
- switch ( val & GICD_SGI_TARGET_LIST_MASK ) | |
+ switch (val & GICD_SGI_TARGET_LIST_MASK) | |
{ | |
- case GICD_SGI_TARGET_LIST: /* as specified by targets */ | |
- targets &= GENMASK(nr_vcpus - 1, 0); /* limit to existing VCPUs */ | |
+ case GICD_SGI_TARGET_LIST: /* as specified by targets */ | |
+ targets &= GENMASK(nr_vcpus - 1, 0); /* limit to existing VCPUs */ | |
break; | |
case GICD_SGI_TARGET_OTHERS: | |
targets = GENMASK(nr_vcpus - 1, 0); /* all, ... */ | |
targets &= ~(1U << source_vcpu->vcpu_id); /* but self */ | |
break; | |
- case GICD_SGI_TARGET_SELF: /* this very vCPU only */ | |
+ case GICD_SGI_TARGET_SELF: /* this very vCPU only */ | |
targets = (1U << source_vcpu->vcpu_id); | |
break; | |
- case 0x3: /* reserved */ | |
+ case 0x3: /* reserved */ | |
return; | |
} | |
- for_each_set_bit( vcpu_id, &targets, 8 ) | |
+ for_each_set_bit (vcpu_id, &targets, 8) | |
{ | |
struct vcpu *vcpu = d->vcpu[vcpu_id]; | |
struct vgic_irq *irq = vgic_get_irq(d, vcpu, intid); | |
@@ -124,8 +121,8 @@ static void vgic_mmio_write_sgir(struct vcpu *source_vcpu, | |
} | |
} | |
-static unsigned long vgic_mmio_read_target(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+static unsigned long vgic_mmio_read_target(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); | |
uint32_t val = 0; | |
@@ -143,9 +140,8 @@ static unsigned long vgic_mmio_read_target(struct vcpu *vcpu, | |
return val; | |
} | |
-static void vgic_mmio_write_target(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val) | |
+static void vgic_mmio_write_target(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); | |
uint8_t cpu_mask = GENMASK(vcpu->domain->max_vcpus - 1, 0); | |
@@ -170,7 +166,8 @@ static void vgic_mmio_write_target(struct vcpu *vcpu, | |
{ | |
struct irq_desc *desc = irq_to_desc(irq->hwintid); | |
- irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor)); | |
+ irq_set_affinity(desc, | |
+ cpumask_of(irq->target_vcpu->processor)); | |
} | |
} | |
else | |
@@ -181,8 +178,8 @@ static void vgic_mmio_write_target(struct vcpu *vcpu, | |
} | |
} | |
-static unsigned long vgic_mmio_read_sgipend(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+static unsigned long vgic_mmio_read_sgipend(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); | |
uint32_t val = 0; | |
@@ -202,9 +199,8 @@ static unsigned long vgic_mmio_read_sgipend(struct vcpu *vcpu, | |
return val; | |
} | |
-static void vgic_mmio_write_sgipendc(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val) | |
+static void vgic_mmio_write_sgipendc(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); | |
unsigned int i; | |
@@ -227,9 +223,8 @@ static void vgic_mmio_write_sgipendc(struct vcpu *vcpu, | |
} | |
} | |
-static void vgic_mmio_write_sgipends(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val) | |
+static void vgic_mmio_write_sgipends(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); | |
unsigned int i; | |
@@ -259,48 +254,45 @@ static void vgic_mmio_write_sgipends(struct vcpu *vcpu, | |
} | |
static const struct vgic_register_region vgic_v2_dist_registers[] = { | |
- REGISTER_DESC_WITH_LENGTH(GICD_CTLR, | |
- vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IGROUPR, | |
- vgic_mmio_read_rao, vgic_mmio_write_wi, 1, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER, | |
- vgic_mmio_read_enable, vgic_mmio_write_senable, 1, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER, | |
- vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR, | |
- vgic_mmio_read_pending, vgic_mmio_write_spending, 1, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR, | |
- vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER, | |
- vgic_mmio_read_active, vgic_mmio_write_sactive, 1, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICACTIVER, | |
- vgic_mmio_read_active, vgic_mmio_write_cactive, 1, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IPRIORITYR, | |
- vgic_mmio_read_priority, vgic_mmio_write_priority, 8, | |
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR, | |
- vgic_mmio_read_target, vgic_mmio_write_target, 8, | |
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), | |
- REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR, | |
- vgic_mmio_read_config, vgic_mmio_write_config, 2, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_LENGTH(GICD_SGIR, | |
- vgic_mmio_read_raz, vgic_mmio_write_sgir, 4, | |
- VGIC_ACCESS_32bit), | |
- REGISTER_DESC_WITH_LENGTH(GICD_CPENDSGIR, | |
- vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16, | |
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), | |
- REGISTER_DESC_WITH_LENGTH(GICD_SPENDSGIR, | |
- vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16, | |
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), | |
+ REGISTER_DESC_WITH_LENGTH(GICD_CTLR, vgic_mmio_read_v2_misc, | |
+ vgic_mmio_write_v2_misc, 12, VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IGROUPR, vgic_mmio_read_rao, | |
+ vgic_mmio_write_wi, 1, VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER, vgic_mmio_read_enable, | |
+ vgic_mmio_write_senable, 1, | |
+ VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER, vgic_mmio_read_enable, | |
+ vgic_mmio_write_cenable, 1, | |
+ VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR, vgic_mmio_read_pending, | |
+ vgic_mmio_write_spending, 1, | |
+ VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR, vgic_mmio_read_pending, | |
+ vgic_mmio_write_cpending, 1, | |
+ VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER, vgic_mmio_read_active, | |
+ vgic_mmio_write_sactive, 1, | |
+ VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICACTIVER, vgic_mmio_read_active, | |
+ vgic_mmio_write_cactive, 1, | |
+ VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IPRIORITYR, vgic_mmio_read_priority, | |
+ vgic_mmio_write_priority, 8, | |
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR, vgic_mmio_read_target, | |
+ vgic_mmio_write_target, 8, | |
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), | |
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR, vgic_mmio_read_config, | |
+ vgic_mmio_write_config, 2, | |
+ VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_LENGTH(GICD_SGIR, vgic_mmio_read_raz, | |
+ vgic_mmio_write_sgir, 4, VGIC_ACCESS_32bit), | |
+ REGISTER_DESC_WITH_LENGTH(GICD_CPENDSGIR, vgic_mmio_read_sgipend, | |
+ vgic_mmio_write_sgipendc, 16, | |
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), | |
+ REGISTER_DESC_WITH_LENGTH(GICD_SPENDSGIR, vgic_mmio_read_sgipend, | |
+ vgic_mmio_write_sgipends, 16, | |
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), | |
}; | |
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev) | |
diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c | |
index 5d935a7301..c6cd4dd5ad 100644 | |
--- a/xen/arch/arm/vgic/vgic-mmio.c | |
+++ b/xen/arch/arm/vgic/vgic-mmio.c | |
@@ -21,20 +21,20 @@ | |
#include "vgic.h" | |
#include "vgic-mmio.h" | |
-unsigned long vgic_mmio_read_raz(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+unsigned long vgic_mmio_read_raz(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
return 0; | |
} | |
-unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
return -1UL; | |
} | |
-void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, | |
- unsigned int len, unsigned long val) | |
+void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
+ unsigned long val) | |
{ | |
/* Ignore */ | |
} | |
@@ -43,8 +43,8 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, | |
* Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value | |
* of the enabled bit, so there is only one function for both here. | |
*/ | |
-unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
uint32_t value = 0; | |
@@ -64,14 +64,13 @@ unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, | |
return value; | |
} | |
-void vgic_mmio_write_senable(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_senable(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
unsigned int i; | |
- for_each_set_bit( i, &val, len * 8 ) | |
+ for_each_set_bit (i, &val, len * 8) | |
{ | |
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); | |
unsigned long flags; | |
@@ -79,7 +78,7 @@ void vgic_mmio_write_senable(struct vcpu *vcpu, | |
spin_lock_irqsave(&irq->irq_lock, flags); | |
- if ( irq->enabled ) /* skip already enabled IRQs */ | |
+ if ( irq->enabled ) /* skip already enabled IRQs */ | |
{ | |
spin_unlock_irqrestore(&irq->irq_lock, flags); | |
vgic_put_irq(vcpu->domain, irq); | |
@@ -109,14 +108,13 @@ void vgic_mmio_write_senable(struct vcpu *vcpu, | |
} | |
} | |
-void vgic_mmio_write_cenable(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_cenable(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
unsigned int i; | |
- for_each_set_bit( i, &val, len * 8 ) | |
+ for_each_set_bit (i, &val, len * 8) | |
{ | |
struct vgic_irq *irq; | |
unsigned long flags; | |
@@ -125,7 +123,7 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu, | |
irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); | |
spin_lock_irqsave(&irq->irq_lock, flags); | |
- if ( !irq->enabled ) /* skip already disabled IRQs */ | |
+ if ( !irq->enabled ) /* skip already disabled IRQs */ | |
{ | |
spin_unlock_irqrestore(&irq->irq_lock, flags); | |
vgic_put_irq(vcpu->domain, irq); | |
@@ -156,8 +154,8 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu, | |
} | |
} | |
-unsigned long vgic_mmio_read_pending(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+unsigned long vgic_mmio_read_pending(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
uint32_t value = 0; | |
@@ -177,23 +175,23 @@ unsigned long vgic_mmio_read_pending(struct vcpu *vcpu, | |
return value; | |
} | |
-void vgic_mmio_write_spending(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val) | |
+void vgic_mmio_write_spending(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
unsigned int i; | |
unsigned long flags; | |
irq_desc_t *desc; | |
- for_each_set_bit( i, &val, len * 8 ) | |
+ for_each_set_bit (i, &val, len * 8) | |
{ | |
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); | |
spin_lock_irqsave(&irq->irq_lock, flags); | |
irq->pending_latch = true; | |
- /* To observe the locking order, just take the irq_desc pointer here. */ | |
+ /* To observe the locking order, just take the irq_desc pointer here. | |
+ */ | |
if ( irq->hw ) | |
desc = irq_to_desc(irq->hwintid); | |
else | |
@@ -225,23 +223,23 @@ void vgic_mmio_write_spending(struct vcpu *vcpu, | |
} | |
} | |
-void vgic_mmio_write_cpending(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val) | |
+void vgic_mmio_write_cpending(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
unsigned int i; | |
unsigned long flags; | |
irq_desc_t *desc; | |
- for_each_set_bit( i, &val, len * 8 ) | |
+ for_each_set_bit (i, &val, len * 8) | |
{ | |
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); | |
spin_lock_irqsave(&irq->irq_lock, flags); | |
irq->pending_latch = false; | |
- /* To observe the locking order, just take the irq_desc pointer here. */ | |
+ /* To observe the locking order, just take the irq_desc pointer here. | |
+ */ | |
if ( irq->hw ) | |
desc = irq_to_desc(irq->hwintid); | |
else | |
@@ -280,7 +278,6 @@ void vgic_mmio_write_cpending(struct vcpu *vcpu, | |
spin_unlock_irqrestore(&desc->lock, flags); | |
} | |
- | |
vgic_put_irq(vcpu->domain, irq); | |
} | |
} | |
@@ -293,8 +290,8 @@ void vgic_mmio_write_cpending(struct vcpu *vcpu, | |
* VCPUs processing any affected vIRQs), so we use a simple implementation | |
* to get the best possible answer. | |
*/ | |
-unsigned long vgic_mmio_read_active(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+unsigned long vgic_mmio_read_active(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
uint32_t value = 0; | |
@@ -321,14 +318,13 @@ unsigned long vgic_mmio_read_active(struct vcpu *vcpu, | |
* and only print our warning in this case. So clearing already non-active | |
* IRQs would not be moaned about in the logs. | |
*/ | |
-void vgic_mmio_write_cactive(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_cactive(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
unsigned int i; | |
- for_each_set_bit( i, &val, len * 8 ) | |
+ for_each_set_bit (i, &val, len * 8) | |
{ | |
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); | |
@@ -351,14 +347,13 @@ void vgic_mmio_write_cactive(struct vcpu *vcpu, | |
* We check whether this MMIO access would actually affect any non-active IRQ, | |
* and only print our warning in this case. | |
*/ | |
-void vgic_mmio_write_sactive(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_sactive(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); | |
unsigned int i; | |
- for_each_set_bit( i, &val, len * 8 ) | |
+ for_each_set_bit (i, &val, len * 8) | |
{ | |
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); | |
@@ -376,8 +371,8 @@ void vgic_mmio_write_sactive(struct vcpu *vcpu, | |
} | |
} | |
-unsigned long vgic_mmio_read_priority(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+unsigned long vgic_mmio_read_priority(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); | |
unsigned int i; | |
@@ -402,9 +397,8 @@ unsigned long vgic_mmio_read_priority(struct vcpu *vcpu, | |
* leading to this interrupt getting presented now to the guest (if it has | |
* been masked by the priority mask before). | |
*/ | |
-void vgic_mmio_write_priority(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val) | |
+void vgic_mmio_write_priority(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 8); | |
unsigned int i; | |
@@ -423,8 +417,8 @@ void vgic_mmio_write_priority(struct vcpu *vcpu, | |
} | |
} | |
-unsigned long vgic_mmio_read_config(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len) | |
+unsigned long vgic_mmio_read_config(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 2); | |
uint32_t value = 0; | |
@@ -443,8 +437,7 @@ unsigned long vgic_mmio_read_config(struct vcpu *vcpu, | |
return value; | |
} | |
-void vgic_mmio_write_config(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_config(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val) | |
{ | |
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 2); | |
@@ -505,7 +498,7 @@ static bool check_region(const struct domain *d, | |
{ | |
unsigned int flags, nr_irqs = d->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; | |
- switch ( len ) | |
+ switch (len) | |
{ | |
case sizeof(uint8_t): | |
flags = VGIC_ACCESS_8bit; | |
@@ -614,7 +607,7 @@ int vgic_register_dist_iodev(struct domain *d, gfn_t dist_base_fn, | |
struct vgic_io_device *io_device = &d->arch.vgic.dist_iodev; | |
unsigned int len; | |
- switch ( type ) | |
+ switch (type) | |
{ | |
case VGIC_V2: | |
len = vgic_v2_init_dist_iodev(io_device); | |
diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h | |
index 3566cf237c..b4f086bfb1 100644 | |
--- a/xen/arch/arm/vgic/vgic-mmio.h | |
+++ b/xen/arch/arm/vgic/vgic-mmio.h | |
@@ -16,29 +16,29 @@ | |
#ifndef __XEN_ARM_VGIC_VGIC_MMIO_H__ | |
#define __XEN_ARM_VGIC_VGIC_MMIO_H__ | |
-struct vgic_register_region { | |
+struct vgic_register_region | |
+{ | |
unsigned int reg_offset; | |
unsigned int len; | |
unsigned int bits_per_irq; | |
unsigned int access_flags; | |
- unsigned long (*read)(struct vcpu *vcpu, paddr_t addr, | |
- unsigned int len); | |
- void (*write)(struct vcpu *vcpu, paddr_t addr, | |
- unsigned int len, unsigned long val); | |
+ unsigned long (*read)(struct vcpu *vcpu, paddr_t addr, unsigned int len); | |
+ void (*write)(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
+ unsigned long val); | |
}; | |
extern struct mmio_handler_ops vgic_io_ops; | |
-#define VGIC_ACCESS_8bit 1 | |
-#define VGIC_ACCESS_32bit 2 | |
-#define VGIC_ACCESS_64bit 4 | |
+#define VGIC_ACCESS_8bit 1 | |
+#define VGIC_ACCESS_32bit 2 | |
+#define VGIC_ACCESS_64bit 4 | |
/* | |
* Generate a mask that covers the number of bytes required to address | |
* up to 1024 interrupts, each represented by <bits> bits. This assumes | |
* that <bits> is a power of two. | |
*/ | |
-#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) | |
+#define VGIC_ADDR_IRQ_MASK(bits) (((bits)*1024 / 8) - 1) | |
/* | |
* (addr & mask) gives us the _byte_ offset for the INT ID. | |
@@ -48,8 +48,8 @@ extern struct mmio_handler_ops vgic_io_ops; | |
* we shift by the binary logarithm of <bits>. | |
* This assumes that <bits> is a power of two. | |
*/ | |
-#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ | |
- 8 >> ilog2(bits)) | |
+#define VGIC_ADDR_TO_INTID(addr, bits) \ | |
+ (((addr)&VGIC_ADDR_IRQ_MASK(bits)) * 8 >> ilog2(bits)) | |
/* | |
* Some VGIC registers store per-IRQ information, with a different number | |
@@ -57,81 +57,65 @@ extern struct mmio_handler_ops vgic_io_ops; | |
* The _WITH_LENGTH version instantiates registers with a fixed length | |
* and is mutually exclusive with the _PER_IRQ version. | |
*/ | |
-#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, bpi, acc) \ | |
- { \ | |
- .reg_offset = off, \ | |
- .bits_per_irq = bpi, \ | |
- .len = bpi * 1024 / 8, \ | |
- .access_flags = acc, \ | |
- .read = rd, \ | |
- .write = wr, \ | |
+#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, bpi, acc) \ | |
+ { \ | |
+ .reg_offset = off, .bits_per_irq = bpi, .len = bpi * 1024 / 8, \ | |
+ .access_flags = acc, .read = rd, .write = wr, \ | |
} | |
-#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \ | |
- { \ | |
- .reg_offset = off, \ | |
- .bits_per_irq = 0, \ | |
- .len = length, \ | |
- .access_flags = acc, \ | |
- .read = rd, \ | |
- .write = wr, \ | |
+#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \ | |
+ { \ | |
+ .reg_offset = off, .bits_per_irq = 0, .len = length, \ | |
+ .access_flags = acc, .read = rd, .write = wr, \ | |
} | |
-unsigned long vgic_mmio_read_raz(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len); | |
+unsigned long vgic_mmio_read_raz(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len); | |
-unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len); | |
+unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len); | |
-void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, | |
- unsigned int len, unsigned long val); | |
+void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
+ unsigned long val); | |
-unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len); | |
+unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len); | |
-void vgic_mmio_write_senable(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_senable(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val); | |
-void vgic_mmio_write_cenable(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_cenable(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val); | |
-unsigned long vgic_mmio_read_pending(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len); | |
+unsigned long vgic_mmio_read_pending(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len); | |
-void vgic_mmio_write_spending(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val); | |
+void vgic_mmio_write_spending(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val); | |
-void vgic_mmio_write_cpending(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val); | |
+void vgic_mmio_write_cpending(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val); | |
-unsigned long vgic_mmio_read_active(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len); | |
+unsigned long vgic_mmio_read_active(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len); | |
-void vgic_mmio_write_cactive(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_cactive(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val); | |
-void vgic_mmio_write_sactive(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
+void vgic_mmio_write_sactive(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
unsigned long val); | |
-unsigned long vgic_mmio_read_priority(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len); | |
+unsigned long vgic_mmio_read_priority(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len); | |
-void vgic_mmio_write_priority(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val); | |
+void vgic_mmio_write_priority(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len, unsigned long val); | |
-unsigned long vgic_mmio_read_config(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len); | |
+unsigned long vgic_mmio_read_config(struct vcpu *vcpu, paddr_t addr, | |
+ unsigned int len); | |
-void vgic_mmio_write_config(struct vcpu *vcpu, | |
- paddr_t addr, unsigned int len, | |
- unsigned long val); | |
+void vgic_mmio_write_config(struct vcpu *vcpu, paddr_t addr, unsigned int len, | |
+ unsigned long val); | |
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); | |
diff --git a/xen/arch/arm/vgic/vgic-v2.c b/xen/arch/arm/vgic/vgic-v2.c | |
index b5ba4ace87..5977ca735c 100644 | |
--- a/xen/arch/arm/vgic/vgic-v2.c | |
+++ b/xen/arch/arm/vgic/vgic-v2.c | |
@@ -23,12 +23,13 @@ | |
#include "vgic.h" | |
-static struct { | |
+static struct | |
+{ | |
bool enabled; | |
- paddr_t dbase; /* Distributor interface address */ | |
- paddr_t cbase; /* CPU interface address & size */ | |
+ paddr_t dbase; /* Distributor interface address */ | |
+ paddr_t cbase; /* CPU interface address & size */ | |
paddr_t csize; | |
- paddr_t vbase; /* Virtual CPU interface address */ | |
+ paddr_t vbase; /* Virtual CPU interface address */ | |
/* Offset to add to get an 8kB contiguous region if GIC is aliased */ | |
uint32_t aliased_offset; | |
@@ -61,7 +62,7 @@ void vgic_v2_fold_lr_state(struct vcpu *vcpu) | |
unsigned long flags; | |
unsigned int lr; | |
- if ( !used_lrs ) /* No LRs used, so nothing to sync back here. */ | |
+ if ( !used_lrs ) /* No LRs used, so nothing to sync back here. */ | |
return; | |
gic_hw_ops->update_hcr_status(GICH_HCR_UIE, false); | |
@@ -80,11 +81,11 @@ void vgic_v2_fold_lr_state(struct vcpu *vcpu) | |
* Read the ELRSR to find out which of our LRs have been cleared | |
* by the guest. We just need to know the IRQ number for those, which | |
* we could save in an array when populating the LRs. | |
- * This trades one MMIO access (ELRSR) for possibly more than one (LRs), | |
- * but requires some more code to save the IRQ number and to handle | |
- * those finished IRQs according to the algorithm below. | |
- * We need some numbers to justify this: chances are that we don't | |
- * have many LRs in use most of the time, so we might not save much. | |
+ * This trades one MMIO access (ELRSR) for possibly more than one | |
+ * (LRs), but requires some more code to save the IRQ number and to | |
+ * handle those finished IRQs according to the algorithm below. We need | |
+ * some numbers to justify this: chances are that we don't have many | |
+ * LRs in use most of the time, so we might not save much. | |
*/ | |
gic_hw_ops->clear_lr(lr); | |
@@ -295,7 +296,6 @@ int vgic_v2_map_resources(struct domain *d) | |
vbase = gic_v2_hw_data.vbase + gic_v2_hw_data.aliased_offset; | |
} | |
- | |
ret = vgic_register_dist_iodev(d, gaddr_to_gfn(dist->vgic_dist_base), | |
VGIC_V2); | |
if ( ret ) | |
diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c | |
index f0f2ea5021..1f4271bbe9 100644 | |
--- a/xen/arch/arm/vgic/vgic.c | |
+++ b/xen/arch/arm/vgic/vgic.c | |
@@ -64,7 +64,7 @@ static struct vgic_irq *vgic_get_lpi(struct domain *d, uint32_t intid) | |
spin_lock(&dist->lpi_list_lock); | |
- list_for_each_entry( irq, &dist->lpi_list_head, lpi_list ) | |
+ list_for_each_entry (irq, &dist->lpi_list_head, lpi_list) | |
{ | |
if ( irq->intid != intid ) | |
continue; | |
@@ -170,7 +170,7 @@ static struct vcpu *vgic_target_oracle(struct vgic_irq *irq) | |
/* If the interrupt is active, it must stay on the current vcpu */ | |
if ( irq->active ) | |
- return irq->vcpu ? : irq->target_vcpu; | |
+ return irq->vcpu ?: irq->target_vcpu; | |
/* | |
* If the IRQ is not active but enabled and pending, we should direct | |
@@ -429,7 +429,7 @@ static void vgic_prune_ap_list(struct vcpu *vcpu) | |
retry: | |
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); | |
- list_for_each_entry_safe( irq, tmp, &vgic_cpu->ap_list_head, ap_list ) | |
+ list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) | |
{ | |
struct vcpu *target_vcpu, *vcpuA, *vcpuB; | |
@@ -524,8 +524,7 @@ static void vgic_fold_lr_state(struct vcpu *vcpu) | |
} | |
/* Requires the irq_lock to be held. */ | |
-static void vgic_populate_lr(struct vcpu *vcpu, | |
- struct vgic_irq *irq, int lr) | |
+static void vgic_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr) | |
{ | |
ASSERT(spin_is_locked(&irq->irq_lock)); | |
@@ -548,7 +547,7 @@ static int compute_ap_list_depth(struct vcpu *vcpu) | |
ASSERT(spin_is_locked(&vgic_cpu->ap_list_lock)); | |
- list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) | |
+ list_for_each_entry (irq, &vgic_cpu->ap_list_head, ap_list) | |
count++; | |
return count; | |
@@ -566,7 +565,7 @@ static void vgic_flush_lr_state(struct vcpu *vcpu) | |
if ( compute_ap_list_depth(vcpu) > gic_get_nr_lrs() ) | |
vgic_sort_ap_list(vcpu); | |
- list_for_each_entry( irq, &vgic_cpu->ap_list_head, ap_list ) | |
+ list_for_each_entry (irq, &vgic_cpu->ap_list_head, ap_list) | |
{ | |
spin_lock(&irq->irq_lock); | |
@@ -662,7 +661,7 @@ int vgic_vcpu_pending_irq(struct vcpu *vcpu) | |
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); | |
- list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) | |
+ list_for_each_entry (irq, &vgic_cpu->ap_list_head, ap_list) | |
{ | |
spin_lock(&irq->irq_lock); | |
ret = irq_is_pending(irq) && irq->enabled; | |
@@ -685,7 +684,7 @@ void vgic_kick_vcpus(struct domain *d) | |
* We've injected an interrupt, time to find out who deserves | |
* a good kick... | |
*/ | |
- for_each_vcpu( d, vcpu ) | |
+ for_each_vcpu (d, vcpu) | |
{ | |
if ( vgic_vcpu_pending_irq(vcpu) ) | |
vcpu_kick(vcpu); | |
@@ -739,8 +738,7 @@ int vgic_allocate_virq(struct domain *d, bool spi) | |
* There is no spinlock to protect allocated_irqs, therefore | |
* test_and_set_bit may fail. If so retry it. | |
*/ | |
- do | |
- { | |
+ do { | |
virq = find_next_zero_bit(d->arch.vgic.allocated_irqs, end, first); | |
if ( virq >= end ) | |
return -1; | |
@@ -765,14 +763,14 @@ void gic_dump_vgic_info(struct vcpu *v) | |
if ( !list_empty(&vgic_cpu->ap_list_head) ) | |
printk(" active or pending interrupts queued:\n"); | |
- list_for_each_entry ( irq, &vgic_cpu->ap_list_head, ap_list ) | |
+ list_for_each_entry (irq, &vgic_cpu->ap_list_head, ap_list) | |
{ | |
spin_lock(&irq->irq_lock); | |
printk(" %s %s irq %u: %spending, %sactive, %senabled\n", | |
irq->hw ? "hardware" : "virtual", | |
- irq->config == VGIC_CONFIG_LEVEL ? "level" : "edge", | |
- irq->intid, irq_is_pending(irq) ? "" : "not ", | |
- irq->active ? "" : "not ", irq->enabled ? "" : "not "); | |
+ irq->config == VGIC_CONFIG_LEVEL ? "level" : "edge", irq->intid, | |
+ irq_is_pending(irq) ? "" : "not ", irq->active ? "" : "not ", | |
+ irq->enabled ? "" : "not "); | |
spin_unlock(&irq->irq_lock); | |
} | |
@@ -814,7 +812,7 @@ void arch_move_irqs(struct vcpu *v) | |
spin_lock_irqsave(&irq->irq_lock, flags); | |
/* Only hardware mapped vIRQs that are targeting this vCPU. */ | |
- if ( irq->hw && irq->target_vcpu == v) | |
+ if ( irq->hw && irq->target_vcpu == v ) | |
{ | |
irq_desc_t *desc = irq_to_desc(irq->hwintid); | |
@@ -858,8 +856,9 @@ bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr) | |
/* | |
* was: | |
- * int kvm_vgic_map_phys_irq(struct vcpu *vcpu, u32 virt_irq, u32 phys_irq) | |
- * int kvm_vgic_unmap_phys_irq(struct vcpu *vcpu, unsigned int virt_irq) | |
+ * int kvm_vgic_map_phys_irq(struct vcpu *vcpu, u32 virt_irq, u32 | |
+ * phys_irq) int kvm_vgic_unmap_phys_irq(struct vcpu *vcpu, unsigned int | |
+ * virt_irq) | |
*/ | |
int vgic_connect_hw_irq(struct domain *d, struct vcpu *vcpu, | |
unsigned int virt_irq, struct irq_desc *desc, | |
@@ -874,7 +873,7 @@ int vgic_connect_hw_irq(struct domain *d, struct vcpu *vcpu, | |
spin_lock_irqsave(&irq->irq_lock, flags); | |
- if ( connect ) /* assign a mapped IRQ */ | |
+ if ( connect ) /* assign a mapped IRQ */ | |
{ | |
/* The VIRQ should not be already enabled by the guest */ | |
if ( !irq->hw && !irq->enabled ) | |
@@ -885,7 +884,7 @@ int vgic_connect_hw_irq(struct domain *d, struct vcpu *vcpu, | |
else | |
ret = -EBUSY; | |
} | |
- else /* remove a mapped IRQ */ | |
+ else /* remove a mapped IRQ */ | |
{ | |
if ( desc && irq->hwintid != desc->irq ) | |
{ | |
@@ -909,8 +908,8 @@ static unsigned int translate_irq_type(bool is_level) | |
return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING; | |
} | |
-void vgic_sync_hardware_irq(struct domain *d, | |
- irq_desc_t *desc, struct vgic_irq *irq) | |
+void vgic_sync_hardware_irq(struct domain *d, irq_desc_t *desc, | |
+ struct vgic_irq *irq) | |
{ | |
unsigned long flags; | |
@@ -946,7 +945,7 @@ void vgic_sync_hardware_irq(struct domain *d, | |
unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version) | |
{ | |
- switch ( domctl_vgic_version ) | |
+ switch (domctl_vgic_version) | |
{ | |
case XEN_DOMCTL_CONFIG_GIC_V2: | |
return VGIC_V2_MAX_CPUS; | |
@@ -958,8 +957,7 @@ unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version) | |
#ifdef CONFIG_GICV3 | |
/* Dummy implementation to allow building without actual vGICv3 support. */ | |
-void vgic_v3_setup_hw(paddr_t dbase, | |
- unsigned int nr_rdist_regions, | |
+void vgic_v3_setup_hw(paddr_t dbase, unsigned int nr_rdist_regions, | |
const struct rdist_region *regions, | |
unsigned int intid_bits) | |
{ | |
diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h | |
index 534b24bcd3..6fc55bfbe8 100644 | |
--- a/xen/arch/arm/vgic/vgic.h | |
+++ b/xen/arch/arm/vgic/vgic.h | |
@@ -21,14 +21,14 @@ | |
* We piggy-back on the already used KVM product ID, but use a different | |
* variant (major revision) for Xen. | |
*/ | |
-#define PRODUCT_ID_KVM 0x4b /* ASCII code K */ | |
-#define VARIANT_ID_XEN 0x01 | |
-#define IMPLEMENTER_ARM 0x43b | |
+#define PRODUCT_ID_KVM 0x4b /* ASCII code K */ | |
+#define VARIANT_ID_XEN 0x01 | |
+#define IMPLEMENTER_ARM 0x43b | |
-#define VGIC_ADDR_UNDEF INVALID_PADDR | |
-#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | |
+#define VGIC_ADDR_UNDEF INVALID_PADDR | |
+#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | |
-#define VGIC_PRI_BITS 5 | |
+#define VGIC_PRI_BITS 5 | |
#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS) | |
@@ -60,8 +60,8 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq) | |
atomic_inc(&irq->refcount); | |
} | |
-void vgic_sync_hardware_irq(struct domain *d, | |
- irq_desc_t *desc, struct vgic_irq *irq); | |
+void vgic_sync_hardware_irq(struct domain *d, irq_desc_t *desc, | |
+ struct vgic_irq *irq); | |
void vgic_v2_fold_lr_state(struct vcpu *vcpu); | |
void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr); | |
diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c | |
index 7bc5eeb207..243c3a160f 100644 | |
--- a/xen/arch/arm/vpl011.c | |
+++ b/xen/arch/arm/vpl011.c | |
@@ -111,8 +111,7 @@ static void vpl011_write_data_xen(struct domain *d, uint8_t data) | |
} | |
else | |
{ | |
- if ( intf->out_prod == SBSA_UART_OUT_BUF_SIZE - 2 || | |
- data == '\n' ) | |
+ if ( intf->out_prod == SBSA_UART_OUT_BUF_SIZE - 2 || data == '\n' ) | |
{ | |
if ( data != '\n' ) | |
intf->out[intf->out_prod++] = '\n'; | |
@@ -154,7 +153,8 @@ static uint8_t vpl011_read_data_xen(struct domain *d) | |
* It is expected that there will be data in the ring buffer when this | |
* function is called since the guest is expected to read the data register | |
* only if the TXFE flag is not set. | |
- * If the guest still does read when TXFE bit is set then 0 will be returned. | |
+ * If the guest still does read when TXFE bit is set then 0 will be | |
+ * returned. | |
*/ | |
if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) > 0 ) | |
{ | |
@@ -213,7 +213,8 @@ static uint8_t vpl011_read_data(struct domain *d) | |
* It is expected that there will be data in the ring buffer when this | |
* function is called since the guest is expected to read the data register | |
* only if the TXFE flag is not set. | |
- * If the guest still does read when TXFE bit is set then 0 will be returned. | |
+ * If the guest still does read when TXFE bit is set then 0 will be | |
+ * returned. | |
*/ | |
if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) > 0 ) | |
{ | |
@@ -299,7 +300,7 @@ static void vpl011_write_data(struct domain *d, uint8_t data) | |
* data will be silently dropped. | |
*/ | |
if ( xencons_queued(out_prod, out_cons, sizeof(intf->out)) != | |
- sizeof (intf->out) ) | |
+ sizeof(intf->out) ) | |
{ | |
unsigned int fifo_level; | |
@@ -341,9 +342,7 @@ static void vpl011_write_data(struct domain *d, uint8_t data) | |
notify_via_xen_event_channel(d, vpl011->evtchn); | |
} | |
-static int vpl011_mmio_read(struct vcpu *v, | |
- mmio_info_t *info, | |
- register_t *r, | |
+static int vpl011_mmio_read(struct vcpu *v, mmio_info_t *info, register_t *r, | |
void *priv) | |
{ | |
struct hsr_dabt dabt = info->dabt; | |
@@ -352,10 +351,11 @@ static int vpl011_mmio_read(struct vcpu *v, | |
struct domain *d = v->domain; | |
unsigned long flags; | |
- switch ( vpl011_reg ) | |
+ switch (vpl011_reg) | |
{ | |
case DR: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
if ( vpl011->backend_in_domain ) | |
*r = vreg_reg32_extract(vpl011_read_data(d), info); | |
@@ -364,14 +364,16 @@ static int vpl011_mmio_read(struct vcpu *v, | |
return 1; | |
case RSR: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
/* It always returns 0 as there are no physical errors. */ | |
*r = 0; | |
return 1; | |
case FR: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
VPL011_LOCK(d, flags); | |
*r = vreg_reg32_extract(vpl011->uartfr, info); | |
@@ -379,7 +381,8 @@ static int vpl011_mmio_read(struct vcpu *v, | |
return 1; | |
case RIS: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
VPL011_LOCK(d, flags); | |
*r = vreg_reg32_extract(vpl011->uartris, info); | |
@@ -387,16 +390,17 @@ static int vpl011_mmio_read(struct vcpu *v, | |
return 1; | |
case MIS: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
VPL011_LOCK(d, flags); | |
- *r = vreg_reg32_extract(vpl011->uartris & vpl011->uartimsc, | |
- info); | |
+ *r = vreg_reg32_extract(vpl011->uartris & vpl011->uartimsc, info); | |
VPL011_UNLOCK(d, flags); | |
return 1; | |
case IMSC: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
VPL011_LOCK(d, flags); | |
*r = vreg_reg32_extract(vpl011->uartimsc, info); | |
@@ -404,7 +408,8 @@ static int vpl011_mmio_read(struct vcpu *v, | |
return 1; | |
case ICR: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
/* Only write is valid. */ | |
return 0; | |
@@ -421,12 +426,9 @@ bad_width: | |
gprintk(XENLOG_ERR, "vpl011: bad read width %d r%d offset %#08x\n", | |
dabt.size, dabt.reg, vpl011_reg); | |
return 0; | |
- | |
} | |
-static int vpl011_mmio_write(struct vcpu *v, | |
- mmio_info_t *info, | |
- register_t r, | |
+static int vpl011_mmio_write(struct vcpu *v, mmio_info_t *info, register_t r, | |
void *priv) | |
{ | |
struct hsr_dabt dabt = info->dabt; | |
@@ -435,13 +437,14 @@ static int vpl011_mmio_write(struct vcpu *v, | |
struct domain *d = v->domain; | |
unsigned long flags; | |
- switch ( vpl011_reg ) | |
+ switch (vpl011_reg) | |
{ | |
case DR: | |
{ | |
uint32_t data = 0; | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
vreg_reg32_update(&data, r, info); | |
data &= 0xFF; | |
@@ -453,7 +456,8 @@ static int vpl011_mmio_write(struct vcpu *v, | |
} | |
case RSR: /* Nothing to clear. */ | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
return 1; | |
@@ -463,7 +467,8 @@ static int vpl011_mmio_write(struct vcpu *v, | |
goto write_ignore; | |
case IMSC: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
VPL011_LOCK(d, flags); | |
vreg_reg32_update(&vpl011->uartimsc, r, info); | |
@@ -472,7 +477,8 @@ static int vpl011_mmio_write(struct vcpu *v, | |
return 1; | |
case ICR: | |
- if ( !vpl011_reg32_check_access(dabt) ) goto bad_width; | |
+ if ( !vpl011_reg32_check_access(dabt) ) | |
+ goto bad_width; | |
VPL011_LOCK(d, flags); | |
vreg_reg32_clearbits(&vpl011->uartris, r, info); | |
@@ -493,7 +499,6 @@ bad_width: | |
gprintk(XENLOG_ERR, "vpl011: bad write width %d r%d offset %#08x\n", | |
dabt.size, dabt.reg, vpl011_reg); | |
return 0; | |
- | |
} | |
static const struct mmio_handler_ops vpl011_mmio_handler = { | |
@@ -501,8 +506,7 @@ static const struct mmio_handler_ops vpl011_mmio_handler = { | |
.write = vpl011_mmio_write, | |
}; | |
-static void vpl011_data_avail(struct domain *d, | |
- XENCONS_RING_IDX in_fifo_level, | |
+static void vpl011_data_avail(struct domain *d, XENCONS_RING_IDX in_fifo_level, | |
XENCONS_RING_IDX in_size, | |
XENCONS_RING_IDX out_fifo_level, | |
XENCONS_RING_IDX out_size) | |
@@ -524,9 +528,9 @@ static void vpl011_data_avail(struct domain *d, | |
vpl011->uartris |= RXI; | |
/* | |
- * If the input queue is not empty, we assert the receive timeout interrupt. | |
- * As we don't emulate any timing here, so we ignore the actual timeout | |
- * of 32 baud cycles. | |
+ * If the input queue is not empty, we assert the receive timeout | |
+ * interrupt. As we don't emulate any timing here, so we ignore the actual | |
+ * timeout of 32 baud cycles. | |
*/ | |
if ( in_fifo_level > 0 ) | |
vpl011->uartris |= RTI; | |
@@ -569,7 +573,8 @@ void vpl011_rx_char_xen(struct domain *d, char c) | |
in_cons = intf->in_cons; | |
in_prod = intf->in_prod; | |
- if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) == sizeof(intf->in) ) | |
+ if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) == | |
+ sizeof(intf->in) ) | |
{ | |
VPL011_UNLOCK(d, flags); | |
return; | |
@@ -578,11 +583,10 @@ void vpl011_rx_char_xen(struct domain *d, char c) | |
intf->in[xencons_mask(in_prod, sizeof(intf->in))] = c; | |
intf->in_prod = ++in_prod; | |
- in_fifo_level = xencons_queued(in_prod, | |
- in_cons, | |
- sizeof(intf->in)); | |
+ in_fifo_level = xencons_queued(in_prod, in_cons, sizeof(intf->in)); | |
- vpl011_data_avail(d, in_fifo_level, sizeof(intf->in), 0, SBSA_UART_FIFO_SIZE); | |
+ vpl011_data_avail(d, in_fifo_level, sizeof(intf->in), 0, | |
+ SBSA_UART_FIFO_SIZE); | |
VPL011_UNLOCK(d, flags); | |
} | |
@@ -604,13 +608,9 @@ static void vpl011_notification(struct vcpu *v, unsigned int port) | |
smp_rmb(); | |
- in_fifo_level = xencons_queued(in_prod, | |
- in_cons, | |
- sizeof(intf->in)); | |
+ in_fifo_level = xencons_queued(in_prod, in_cons, sizeof(intf->in)); | |
- out_fifo_level = xencons_queued(out_prod, | |
- out_cons, | |
- sizeof(intf->out)); | |
+ out_fifo_level = xencons_queued(out_prod, out_cons, sizeof(intf->out)); | |
vpl011_data_avail(v->domain, in_fifo_level, sizeof(intf->in), | |
out_fifo_level, sizeof(intf->out)); | |
@@ -635,10 +635,9 @@ int domain_vpl011_init(struct domain *d, struct vpl011_init_info *info) | |
vpl011->backend_in_domain = true; | |
/* Map the guest PFN to Xen address space. */ | |
- rc = prepare_ring_for_helper(d, | |
- gfn_x(info->gfn), | |
- &vpl011->backend.dom.ring_page, | |
- &vpl011->backend.dom.ring_buf); | |
+ rc = prepare_ring_for_helper(d, gfn_x(info->gfn), | |
+ &vpl011->backend.dom.ring_page, | |
+ &vpl011->backend.dom.ring_buf); | |
if ( rc < 0 ) | |
goto out; | |
@@ -670,8 +669,8 @@ int domain_vpl011_init(struct domain *d, struct vpl011_init_info *info) | |
spin_lock_init(&vpl011->lock); | |
- register_mmio_handler(d, &vpl011_mmio_handler, | |
- GUEST_PL011_BASE, GUEST_PL011_SIZE, NULL); | |
+ register_mmio_handler(d, &vpl011_mmio_handler, GUEST_PL011_BASE, | |
+ GUEST_PL011_SIZE, NULL); | |
return 0; | |
diff --git a/xen/arch/arm/vpsci.c b/xen/arch/arm/vpsci.c | |
index c1e250be59..05f9b3f269 100644 | |
--- a/xen/arch/arm/vpsci.c | |
+++ b/xen/arch/arm/vpsci.c | |
@@ -49,7 +49,7 @@ static int do_common_cpu_on(register_t target_cpu, register_t entry_point, | |
vgic_clear_pending_irqs(v); | |
memset(ctxt, 0, sizeof(*ctxt)); | |
- ctxt->user_regs.pc64 = (u64) entry_point; | |
+ ctxt->user_regs.pc64 = (u64)entry_point; | |
ctxt->sctlr = SCTLR_GUEST_INIT; | |
ctxt->ttbr0 = 0; | |
ctxt->ttbr1 = 0; | |
@@ -155,11 +155,11 @@ static int32_t do_psci_0_2_cpu_on(register_t target_cpu, | |
} | |
static const unsigned long target_affinity_mask[] = { | |
- ( MPIDR_HWID_MASK & AFFINITY_MASK( 0 ) ), | |
- ( MPIDR_HWID_MASK & AFFINITY_MASK( 1 ) ), | |
- ( MPIDR_HWID_MASK & AFFINITY_MASK( 2 ) ) | |
+ (MPIDR_HWID_MASK & AFFINITY_MASK(0)), (MPIDR_HWID_MASK & AFFINITY_MASK(1)), | |
+ (MPIDR_HWID_MASK & AFFINITY_MASK(2)) | |
#ifdef CONFIG_ARM_64 | |
- ,( MPIDR_HWID_MASK & AFFINITY_MASK( 3 ) ) | |
+ , | |
+ (MPIDR_HWID_MASK & AFFINITY_MASK(3)) | |
#endif | |
}; | |
@@ -183,8 +183,8 @@ static int32_t do_psci_0_2_affinity_info(register_t target_affinity, | |
{ | |
v = d->vcpu[vcpuid]; | |
- if ( ( ( v->arch.vmpidr & tmask ) == target_affinity ) | |
- && ( !test_bit(_VPF_down, &v->pause_flags) ) ) | |
+ if ( ((v->arch.vmpidr & tmask) == target_affinity) && | |
+ (!test_bit(_VPF_down, &v->pause_flags)) ) | |
return PSCI_0_2_AFFINITY_LEVEL_ON; | |
} | |
@@ -196,22 +196,22 @@ static int32_t do_psci_0_2_migrate_info_type(void) | |
return PSCI_0_2_TOS_MP_OR_NOT_PRESENT; | |
} | |
-static void do_psci_0_2_system_off( void ) | |
+static void do_psci_0_2_system_off(void) | |
{ | |
struct domain *d = current->domain; | |
- domain_shutdown(d,SHUTDOWN_poweroff); | |
+ domain_shutdown(d, SHUTDOWN_poweroff); | |
} | |
static void do_psci_0_2_system_reset(void) | |
{ | |
struct domain *d = current->domain; | |
- domain_shutdown(d,SHUTDOWN_reboot); | |
+ domain_shutdown(d, SHUTDOWN_reboot); | |
} | |
static int32_t do_psci_1_0_features(uint32_t psci_func_id) | |
{ | |
/* /!\ Ordered by function ID and not name */ | |
- switch ( psci_func_id ) | |
+ switch (psci_func_id) | |
{ | |
case PSCI_0_2_FN32_PSCI_VERSION: | |
case PSCI_0_2_FN32_CPU_SUSPEND: | |
@@ -247,7 +247,7 @@ static int32_t do_psci_1_0_features(uint32_t psci_func_id) | |
*/ | |
bool do_vpsci_0_1_call(struct cpu_user_regs *regs, uint32_t fid) | |
{ | |
- switch ( (uint32_t)get_user_reg(regs, 0) ) | |
+ switch ((uint32_t)get_user_reg(regs, 0)) | |
{ | |
case PSCI_cpu_off: | |
{ | |
@@ -282,7 +282,7 @@ bool do_vpsci_0_2_call(struct cpu_user_regs *regs, uint32_t fid) | |
* adding/removing a function. SCCC_SMCCC_*_REVISION should be | |
* updated once per release. | |
*/ | |
- switch ( fid ) | |
+ switch (fid) | |
{ | |
case PSCI_0_2_FN32_PSCI_VERSION: | |
perfc_incr(vpsci_version); | |
diff --git a/xen/arch/arm/vsmc.c b/xen/arch/arm/vsmc.c | |
index f8e350311d..924754a6b5 100644 | |
--- a/xen/arch/arm/vsmc.c | |
+++ b/xen/arch/arm/vsmc.c | |
@@ -14,7 +14,6 @@ | |
* GNU General Public License for more details. | |
*/ | |
- | |
#include <xen/lib.h> | |
#include <xen/types.h> | |
#include <public/arch-arm/smccc.h> | |
@@ -31,7 +30,8 @@ | |
/* Number of functions currently supported by Hypervisor Service. */ | |
#define XEN_SMCCC_FUNCTION_COUNT 3 | |
-/* Number of functions currently supported by Standard Service Service Calls. */ | |
+/* Number of functions currently supported by Standard Service Service Calls. | |
+ */ | |
#define SSSC_SMCCC_FUNCTION_COUNT (3 + VPSCI_NR_FUNCS) | |
static bool fill_uid(struct cpu_user_regs *regs, xen_uuid_t uuid) | |
@@ -43,7 +43,7 @@ static bool fill_uid(struct cpu_user_regs *regs, xen_uuid_t uuid) | |
* first byte is stored in low-order bits of a register. | |
* (ARM DEN 0028B page 14) | |
*/ | |
- for (n = 0; n < 4; n++) | |
+ for ( n = 0; n < 4; n++ ) | |
{ | |
const uint8_t *bytes = uuid.a + n * 4; | |
uint32_t r; | |
@@ -60,7 +60,7 @@ static bool fill_uid(struct cpu_user_regs *regs, xen_uuid_t uuid) | |
} | |
static bool fill_revision(struct cpu_user_regs *regs, uint32_t major, | |
- uint32_t minor) | |
+ uint32_t minor) | |
{ | |
/* | |
* Revision is returned in registers r0 and r1. | |
@@ -90,7 +90,7 @@ static bool handle_arch(struct cpu_user_regs *regs) | |
{ | |
uint32_t fid = (uint32_t)get_user_reg(regs, 0); | |
- switch ( fid ) | |
+ switch (fid) | |
{ | |
case ARM_SMCCC_VERSION_FID: | |
set_user_reg(regs, 0, ARM_SMCCC_VERSION_1_1); | |
@@ -101,14 +101,14 @@ static bool handle_arch(struct cpu_user_regs *regs) | |
uint32_t arch_func_id = get_user_reg(regs, 1); | |
int ret = ARM_SMCCC_NOT_SUPPORTED; | |
- switch ( arch_func_id ) | |
+ switch (arch_func_id) | |
{ | |
case ARM_SMCCC_ARCH_WORKAROUND_1_FID: | |
if ( cpus_have_cap(ARM_HARDEN_BRANCH_PREDICTOR) ) | |
ret = 0; | |
break; | |
case ARM_SMCCC_ARCH_WORKAROUND_2_FID: | |
- switch ( get_ssbd_state() ) | |
+ switch (get_ssbd_state()) | |
{ | |
case ARM_SSBD_UNKNOWN: | |
case ARM_SSBD_FORCE_DISABLE: | |
@@ -163,7 +163,7 @@ static bool handle_hypervisor(struct cpu_user_regs *regs) | |
{ | |
uint32_t fid = (uint32_t)get_user_reg(regs, 0); | |
- switch ( fid ) | |
+ switch (fid) | |
{ | |
case ARM_SMCCC_CALL_COUNT_FID(HYPERVISOR): | |
return fill_function_call_count(regs, XEN_SMCCC_FUNCTION_COUNT); | |
@@ -194,7 +194,7 @@ static bool handle_sssc(struct cpu_user_regs *regs) | |
if ( do_vpsci_0_2_call(regs, fid) ) | |
return true; | |
- switch ( fid ) | |
+ switch (fid) | |
{ | |
case ARM_SMCCC_CALL_COUNT_FID(STANDARD): | |
return fill_function_call_count(regs, SSSC_SMCCC_FUNCTION_COUNT); | |
@@ -219,7 +219,7 @@ static bool handle_sssc(struct cpu_user_regs *regs) | |
static bool vsmccc_handle_call(struct cpu_user_regs *regs) | |
{ | |
bool handled = false; | |
- const union hsr hsr = { .bits = regs->hsr }; | |
+ const union hsr hsr = {.bits = regs->hsr}; | |
register_t funcid = get_user_reg(regs, 0); | |
/* | |
@@ -229,14 +229,14 @@ static bool vsmccc_handle_call(struct cpu_user_regs *regs) | |
* value we need to disassemble instruction at current pc, which | |
* is expensive. So we will assume that it is 0x0. | |
*/ | |
- switch ( hsr.ec ) | |
+ switch (hsr.ec) | |
{ | |
case HSR_EC_HVC32: | |
#ifdef CONFIG_ARM_64 | |
case HSR_EC_HVC64: | |
case HSR_EC_SMC64: | |
#endif | |
- if ( (hsr.iss & HSR_XXC_IMM_MASK) != 0) | |
+ if ( (hsr.iss & HSR_XXC_IMM_MASK) != 0 ) | |
return false; | |
break; | |
case HSR_EC_SMC32: | |
@@ -263,7 +263,7 @@ static bool vsmccc_handle_call(struct cpu_user_regs *regs) | |
handled = handle_existing_apis(regs); | |
else | |
{ | |
- switch ( smccc_get_owner(funcid) ) | |
+ switch (smccc_get_owner(funcid)) | |
{ | |
case ARM_SMCCC_OWNER_ARCH: | |
handled = handle_arch(regs); | |
@@ -286,7 +286,8 @@ static bool vsmccc_handle_call(struct cpu_user_regs *regs) | |
if ( !handled ) | |
{ | |
- gprintk(XENLOG_INFO, "Unhandled SMC/HVC: %08"PRIregister"\n", funcid); | |
+ gprintk(XENLOG_INFO, "Unhandled SMC/HVC: %08" PRIregister "\n", | |
+ funcid); | |
/* Inform caller that function is not supported. */ | |
set_user_reg(regs, 0, ARM_SMCCC_ERR_UNKNOWN_FUNCTION); | |
@@ -327,7 +328,7 @@ void do_trap_smc(struct cpu_user_regs *regs, const union hsr hsr) | |
void do_trap_hvc_smccc(struct cpu_user_regs *regs) | |
{ | |
- const union hsr hsr = { .bits = regs->hsr }; | |
+ const union hsr hsr = {.bits = regs->hsr}; | |
/* | |
* vsmccc_handle_call() will return false if this call is not | |
diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c | |
index e6aebdac9e..08dc14106d 100644 | |
--- a/xen/arch/arm/vtimer.c | |
+++ b/xen/arch/arm/vtimer.c | |
@@ -36,8 +36,8 @@ | |
* CNTKCTL_EL1_ bit name which gates user access | |
*/ | |
#define ACCESS_ALLOWED(regs, user_gate) \ | |
- ( !psr_mode_is_user(regs) || \ | |
- (READ_SYSREG(CNTKCTL_EL1) & CNTKCTL_EL1_##user_gate) ) | |
+ (!psr_mode_is_user(regs) || \ | |
+ (READ_SYSREG(CNTKCTL_EL1) & CNTKCTL_EL1_##user_gate)) | |
static void phys_timer_expired(void *data) | |
{ | |
@@ -64,7 +64,8 @@ int domain_vtimer_init(struct domain *d, struct xen_arch_domainconfig *config) | |
{ | |
d->arch.phys_timer_base.offset = NOW(); | |
d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0); | |
- d->time_offset_seconds = ticks_to_ns(d->arch.virt_timer_base.offset - boot_count); | |
+ d->time_offset_seconds = | |
+ ticks_to_ns(d->arch.virt_timer_base.offset - boot_count); | |
do_div(d->time_offset_seconds, 1000000000); | |
config->clock_frequency = timer_dt_clock_frequency; | |
@@ -109,17 +110,14 @@ int vcpu_vtimer_init(struct vcpu *v) | |
init_timer(&t->timer, phys_timer_expired, t, v->processor); | |
t->ctl = 0; | |
t->cval = NOW(); | |
- t->irq = d0 | |
- ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI) | |
- : GUEST_TIMER_PHYS_NS_PPI; | |
+ t->irq = | |
+ d0 ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI) : GUEST_TIMER_PHYS_NS_PPI; | |
t->v = v; | |
t = &v->arch.virt_timer; | |
init_timer(&t->timer, virt_timer_expired, t, v->processor); | |
t->ctl = 0; | |
- t->irq = d0 | |
- ? timer_get_irq(TIMER_VIRT_PPI) | |
- : GUEST_TIMER_VIRT_PPI; | |
+ t->irq = d0 ? timer_get_irq(TIMER_VIRT_PPI) : GUEST_TIMER_VIRT_PPI; | |
t->v = v; | |
v->arch.vtimer_initialized = 1; | |
@@ -144,10 +142,12 @@ void virt_timer_save(struct vcpu *v) | |
WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0); | |
v->arch.virt_timer.cval = READ_SYSREG64(CNTV_CVAL_EL0); | |
if ( (v->arch.virt_timer.ctl & CNTx_CTL_ENABLE) && | |
- !(v->arch.virt_timer.ctl & CNTx_CTL_MASK)) | |
+ !(v->arch.virt_timer.ctl & CNTx_CTL_MASK) ) | |
{ | |
- set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval + | |
- v->domain->arch.virt_timer_base.offset - boot_count)); | |
+ set_timer(&v->arch.virt_timer.timer, | |
+ ticks_to_ns(v->arch.virt_timer.cval + | |
+ v->domain->arch.virt_timer_base.offset - | |
+ boot_count)); | |
} | |
} | |
@@ -185,7 +185,8 @@ static bool vtimer_cntp_ctl(struct cpu_user_regs *regs, uint32_t *r, bool read) | |
if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) | |
{ | |
set_timer(&v->arch.phys_timer.timer, | |
- v->arch.phys_timer.cval + v->domain->arch.phys_timer_base.offset); | |
+ v->arch.phys_timer.cval + | |
+ v->domain->arch.phys_timer_base.offset); | |
} | |
else | |
stop_timer(&v->arch.phys_timer.timer); | |
@@ -206,7 +207,8 @@ static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r, | |
if ( read ) | |
{ | |
- *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & 0xffffffffull); | |
+ *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & | |
+ 0xffffffffull); | |
} | |
else | |
{ | |
@@ -216,7 +218,7 @@ static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r, | |
v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING; | |
set_timer(&v->arch.phys_timer.timer, | |
v->arch.phys_timer.cval + | |
- v->domain->arch.phys_timer_base.offset); | |
+ v->domain->arch.phys_timer_base.offset); | |
} | |
} | |
return true; | |
@@ -242,7 +244,7 @@ static bool vtimer_cntp_cval(struct cpu_user_regs *regs, uint64_t *r, | |
v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING; | |
set_timer(&v->arch.phys_timer.timer, | |
v->arch.phys_timer.cval + | |
- v->domain->arch.phys_timer_base.offset); | |
+ v->domain->arch.phys_timer_base.offset); | |
} | |
} | |
return true; | |
@@ -257,7 +259,7 @@ static bool vtimer_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr) | |
else | |
perfc_incr(vtimer_cp32_writes); | |
- switch ( hsr.bits & HSR_CP32_REGS_MASK ) | |
+ switch (hsr.bits & HSR_CP32_REGS_MASK) | |
{ | |
case HSR_CPREG32(CNTP_CTL): | |
return vreg_emulate_cp32(regs, hsr, vtimer_cntp_ctl); | |
@@ -279,7 +281,7 @@ static bool vtimer_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr) | |
else | |
perfc_incr(vtimer_cp64_writes); | |
- switch ( hsr.bits & HSR_CP64_REGS_MASK ) | |
+ switch (hsr.bits & HSR_CP64_REGS_MASK) | |
{ | |
case HSR_CPREG64(CNTP_CVAL): | |
return vreg_emulate_cp64(regs, hsr, vtimer_cntp_cval); | |
@@ -299,7 +301,7 @@ static bool vtimer_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr) | |
else | |
perfc_incr(vtimer_sysreg_writes); | |
- switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) | |
+ switch (hsr.bits & HSR_SYSREG_REGS_MASK) | |
{ | |
case HSR_SYSREG_CNTP_CTL_EL0: | |
return vreg_emulate_sysreg32(regs, hsr, vtimer_cntp_ctl); | |
@@ -311,14 +313,13 @@ static bool vtimer_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr) | |
default: | |
return false; | |
} | |
- | |
} | |
#endif | |
bool vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr) | |
{ | |
- | |
- switch (hsr.ec) { | |
+ switch (hsr.ec) | |
+ { | |
case HSR_EC_CP15_32: | |
return vtimer_emulate_cp32(regs, hsr); | |
case HSR_EC_CP15_64: | |
@@ -340,20 +341,23 @@ static void vtimer_update_irq(struct vcpu *v, struct vtimer *vtimer, | |
/* Filter for the three bits that determine the status of the timer */ | |
vtimer_ctl &= (CNTx_CTL_ENABLE | CNTx_CTL_PENDING | CNTx_CTL_MASK); | |
- /* The level is high if the timer is pending and enabled, but not masked. */ | |
+ /* The level is high if the timer is pending and enabled, but not masked. | |
+ */ | |
level = (vtimer_ctl == (CNTx_CTL_ENABLE | CNTx_CTL_PENDING)); | |
/* | |
* This is mostly here to *lower* the virtual interrupt line if the timer | |
* is no longer pending. | |
- * We would have injected an IRQ already via SOFTIRQ when the timer expired. | |
- * Doing it here again is basically a NOP if the line was already high. | |
+ * We would have injected an IRQ already via SOFTIRQ when the timer | |
+ * expired. Doing it here again is basically a NOP if the line was already | |
+ * high. | |
*/ | |
vgic_inject_irq(v->domain, v, vtimer->irq, level); | |
} | |
/** | |
- * vtimer_update_irqs() - update the virtual timers' IRQ lines after a guest run | |
+ * vtimer_update_irqs() - update the virtual timers' IRQ lines after a guest | |
+ * run | |
* @vcpu: The VCPU to sync the timer state | |
* | |
* After returning from a guest, update the state of the timers' virtual | |
diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c | |
index 80d4755d43..dd06c28858 100644 | |
--- a/xen/arch/arm/vuart.c | |
+++ b/xen/arch/arm/vuart.c | |
@@ -42,19 +42,19 @@ | |
#define domain_has_vuart(d) ((d)->arch.vuart.info != NULL) | |
-static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, | |
- register_t *r, void *priv); | |
-static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, | |
- register_t r, void *priv); | |
+static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, register_t *r, | |
+ void *priv); | |
+static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, register_t r, | |
+ void *priv); | |
static const struct mmio_handler_ops vuart_mmio_handler = { | |
- .read = vuart_mmio_read, | |
+ .read = vuart_mmio_read, | |
.write = vuart_mmio_write, | |
}; | |
int domain_vuart_init(struct domain *d) | |
{ | |
- ASSERT( is_hardware_domain(d) ); | |
+ ASSERT(is_hardware_domain(d)); | |
d->arch.vuart.info = serial_vuart_info(SERHND_DTUART); | |
if ( !d->arch.vuart.info ) | |
@@ -69,8 +69,7 @@ int domain_vuart_init(struct domain *d) | |
register_mmio_handler(d, &vuart_mmio_handler, | |
d->arch.vuart.info->base_addr, | |
- d->arch.vuart.info->size, | |
- NULL); | |
+ d->arch.vuart.info->size, NULL); | |
return 0; | |
} | |
@@ -90,7 +89,7 @@ static void vuart_print_char(struct vcpu *v, char c) | |
/* Accept only printable characters, newline, and horizontal tab. */ | |
if ( !isprint(c) && (c != '\n') && (c != '\t') ) | |
- return ; | |
+ return; | |
spin_lock(&uart->lock); | |
uart->buf[uart->idx++] = c; | |
@@ -105,8 +104,8 @@ static void vuart_print_char(struct vcpu *v, char c) | |
spin_unlock(&uart->lock); | |
} | |
-static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, | |
- register_t *r, void *priv) | |
+static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, register_t *r, | |
+ void *priv) | |
{ | |
struct domain *d = v->domain; | |
paddr_t offset = info->gpa - d->arch.vuart.info->base_addr; | |
@@ -123,8 +122,8 @@ static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, | |
return 1; | |
} | |
-static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, | |
- register_t r, void *priv) | |
+static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, register_t r, | |
+ void *priv) | |
{ | |
struct domain *d = v->domain; | |
paddr_t offset = info->gpa - d->arch.vuart.info->base_addr; | |
@@ -146,4 +145,3 @@ static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, | |
* indent-tabs-mode: nil | |
* End: | |
*/ | |
- | |
diff --git a/xen/arch/x86/acpi/boot.c b/xen/arch/x86/acpi/boot.c | |
index 1382b4dcd0..3688d94e37 100644 | |
--- a/xen/arch/x86/acpi/boot.c | |
+++ b/xen/arch/x86/acpi/boot.c | |
@@ -42,7 +42,7 @@ | |
#include <mach_apic.h> | |
#include <mach_mpparse.h> | |
-#define PREFIX "ACPI: " | |
+#define PREFIX "ACPI: " | |
bool __initdata acpi_noirq; /* skip ACPI IRQ initialization */ | |
bool __initdata acpi_ht = true; /* enable HT */ | |
@@ -58,454 +58,452 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | |
/* -------------------------------------------------------------------------- | |
Boot-time Configuration | |
- -------------------------------------------------------------------------- */ | |
+ -------------------------------------------------------------------------- | |
+ */ | |
static int __init acpi_parse_madt(struct acpi_table_header *table) | |
{ | |
- struct acpi_table_madt *madt; | |
+ struct acpi_table_madt *madt; | |
- madt = (struct acpi_table_madt *)table; | |
+ madt = (struct acpi_table_madt *)table; | |
- if (madt->address) { | |
- acpi_lapic_addr = (u64) madt->address; | |
+ if ( madt->address ) | |
+ { | |
+ acpi_lapic_addr = (u64)madt->address; | |
- printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", | |
- madt->address); | |
- } | |
+ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", madt->address); | |
+ } | |
- acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); | |
+ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); | |
- return 0; | |
+ return 0; | |
} | |
-static int __init | |
-acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) | |
+static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
- struct acpi_madt_local_x2apic *processor = | |
- container_of(header, struct acpi_madt_local_x2apic, header); | |
- bool enabled = false, log = false; | |
- | |
- if (BAD_MADT_ENTRY(processor, end)) | |
- return -EINVAL; | |
- | |
- if ((processor->lapic_flags & ACPI_MADT_ENABLED) || | |
- processor->local_apic_id != 0xffffffff || opt_cpu_info) { | |
- acpi_table_print_madt_entry(header); | |
- log = true; | |
- } | |
- | |
- /* Record local apic id only when enabled and fitting. */ | |
- if (processor->local_apic_id >= MAX_APICS || | |
- processor->uid >= MAX_MADT_ENTRIES) { | |
- if (log) | |
- printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit" | |
- " - processor ignored\n", | |
- processor->lapic_flags & ACPI_MADT_ENABLED | |
- ? KERN_WARNING "WARNING: " : KERN_INFO, | |
- processor->local_apic_id, processor->uid); | |
- /* | |
- * Must not return an error here, to prevent | |
- * acpi_table_parse_entries() from terminating early. | |
- */ | |
- return 0 /* -ENOSPC */; | |
- } | |
- if (processor->lapic_flags & ACPI_MADT_ENABLED) { | |
- x86_acpiid_to_apicid[processor->uid] = | |
- processor->local_apic_id; | |
- enabled = true; | |
- } | |
- | |
- /* | |
- * We need to register disabled CPU as well to permit | |
- * counting disabled CPUs. This allows us to size | |
- * cpus_possible_map more accurately, to permit | |
- * to not preallocating memory for all NR_CPUS | |
- * when we use CPU hotplug. | |
- */ | |
- mp_register_lapic(processor->local_apic_id, enabled, 0); | |
- | |
- return 0; | |
+ struct acpi_madt_local_x2apic *processor = | |
+ container_of(header, struct acpi_madt_local_x2apic, header); | |
+ bool enabled = false, log = false; | |
+ | |
+ if ( BAD_MADT_ENTRY(processor, end) ) | |
+ return -EINVAL; | |
+ | |
+ if ( (processor->lapic_flags & ACPI_MADT_ENABLED) || | |
+ processor->local_apic_id != 0xffffffff || opt_cpu_info ) | |
+ { | |
+ acpi_table_print_madt_entry(header); | |
+ log = true; | |
+ } | |
+ | |
+ /* Record local apic id only when enabled and fitting. */ | |
+ if ( processor->local_apic_id >= MAX_APICS || | |
+ processor->uid >= MAX_MADT_ENTRIES ) | |
+ { | |
+ if ( log ) | |
+ printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit" | |
+ " - processor ignored\n", | |
+ processor->lapic_flags & ACPI_MADT_ENABLED ? KERN_WARNING | |
+ "WARNING: " | |
+ : KERN_INFO, | |
+ processor->local_apic_id, processor->uid); | |
+ /* | |
+ * Must not return an error here, to prevent | |
+ * acpi_table_parse_entries() from terminating early. | |
+ */ | |
+ return 0 /* -ENOSPC */; | |
+ } | |
+ if ( processor->lapic_flags & ACPI_MADT_ENABLED ) | |
+ { | |
+ x86_acpiid_to_apicid[processor->uid] = processor->local_apic_id; | |
+ enabled = true; | |
+ } | |
+ | |
+ /* | |
+ * We need to register disabled CPU as well to permit | |
+ * counting disabled CPUs. This allows us to size | |
+ * cpus_possible_map more accurately, to permit | |
+ * to not preallocating memory for all NR_CPUS | |
+ * when we use CPU hotplug. | |
+ */ | |
+ mp_register_lapic(processor->local_apic_id, enabled, 0); | |
+ | |
+ return 0; | |
} | |
-static int __init | |
-acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) | |
+static int __init acpi_parse_lapic(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
- struct acpi_madt_local_apic *processor = | |
- container_of(header, struct acpi_madt_local_apic, header); | |
- bool enabled = false; | |
- | |
- if (BAD_MADT_ENTRY(processor, end)) | |
- return -EINVAL; | |
- | |
- if ((processor->lapic_flags & ACPI_MADT_ENABLED) || | |
- processor->id != 0xff || opt_cpu_info) | |
- acpi_table_print_madt_entry(header); | |
- | |
- /* Record local apic id only when enabled */ | |
- if (processor->lapic_flags & ACPI_MADT_ENABLED) { | |
- x86_acpiid_to_apicid[processor->processor_id] = processor->id; | |
- enabled = true; | |
- } | |
- | |
- /* | |
- * We need to register disabled CPU as well to permit | |
- * counting disabled CPUs. This allows us to size | |
- * cpus_possible_map more accurately, to permit | |
- * to not preallocating memory for all NR_CPUS | |
- * when we use CPU hotplug. | |
- */ | |
- mp_register_lapic(processor->id, enabled, 0); | |
- | |
- return 0; | |
+ struct acpi_madt_local_apic *processor = | |
+ container_of(header, struct acpi_madt_local_apic, header); | |
+ bool enabled = false; | |
+ | |
+ if ( BAD_MADT_ENTRY(processor, end) ) | |
+ return -EINVAL; | |
+ | |
+ if ( (processor->lapic_flags & ACPI_MADT_ENABLED) || | |
+ processor->id != 0xff || opt_cpu_info ) | |
+ acpi_table_print_madt_entry(header); | |
+ | |
+ /* Record local apic id only when enabled */ | |
+ if ( processor->lapic_flags & ACPI_MADT_ENABLED ) | |
+ { | |
+ x86_acpiid_to_apicid[processor->processor_id] = processor->id; | |
+ enabled = true; | |
+ } | |
+ | |
+ /* | |
+ * We need to register disabled CPU as well to permit | |
+ * counting disabled CPUs. This allows us to size | |
+ * cpus_possible_map more accurately, to permit | |
+ * to not preallocating memory for all NR_CPUS | |
+ * when we use CPU hotplug. | |
+ */ | |
+ mp_register_lapic(processor->id, enabled, 0); | |
+ | |
+ return 0; | |
} | |
-static int __init | |
-acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, | |
- const unsigned long end) | |
+static int __init acpi_parse_lapic_addr_ovr( | |
+ struct acpi_subtable_header *header, const unsigned long end) | |
{ | |
- struct acpi_madt_local_apic_override *lapic_addr_ovr = | |
- container_of(header, struct acpi_madt_local_apic_override, | |
- header); | |
+ struct acpi_madt_local_apic_override *lapic_addr_ovr = | |
+ container_of(header, struct acpi_madt_local_apic_override, header); | |
- if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) | |
- return -EINVAL; | |
+ if ( BAD_MADT_ENTRY(lapic_addr_ovr, end) ) | |
+ return -EINVAL; | |
- acpi_lapic_addr = lapic_addr_ovr->address; | |
+ acpi_lapic_addr = lapic_addr_ovr->address; | |
- return 0; | |
+ return 0; | |
} | |
-static int __init | |
-acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, | |
- const unsigned long end) | |
+static int __init acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
- struct acpi_madt_local_x2apic_nmi *x2apic_nmi = | |
- container_of(header, struct acpi_madt_local_x2apic_nmi, | |
- header); | |
+ struct acpi_madt_local_x2apic_nmi *x2apic_nmi = | |
+ container_of(header, struct acpi_madt_local_x2apic_nmi, header); | |
- if (BAD_MADT_ENTRY(x2apic_nmi, end)) | |
- return -EINVAL; | |
+ if ( BAD_MADT_ENTRY(x2apic_nmi, end) ) | |
+ return -EINVAL; | |
- acpi_table_print_madt_entry(header); | |
+ acpi_table_print_madt_entry(header); | |
- if (x2apic_nmi->lint != 1) | |
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); | |
+ if ( x2apic_nmi->lint != 1 ) | |
+ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); | |
- return 0; | |
+ return 0; | |
} | |
-static int __init | |
-acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) | |
+static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
- struct acpi_madt_local_apic_nmi *lapic_nmi = | |
- container_of(header, struct acpi_madt_local_apic_nmi, header); | |
+ struct acpi_madt_local_apic_nmi *lapic_nmi = | |
+ container_of(header, struct acpi_madt_local_apic_nmi, header); | |
- if (BAD_MADT_ENTRY(lapic_nmi, end)) | |
- return -EINVAL; | |
+ if ( BAD_MADT_ENTRY(lapic_nmi, end) ) | |
+ return -EINVAL; | |
- acpi_table_print_madt_entry(header); | |
+ acpi_table_print_madt_entry(header); | |
- if (lapic_nmi->lint != 1) | |
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); | |
+ if ( lapic_nmi->lint != 1 ) | |
+ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); | |
- return 0; | |
+ return 0; | |
} | |
-static int __init | |
-acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) | |
+static int __init acpi_parse_ioapic(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
- struct acpi_madt_io_apic *ioapic = | |
- container_of(header, struct acpi_madt_io_apic, header); | |
+ struct acpi_madt_io_apic *ioapic = | |
+ container_of(header, struct acpi_madt_io_apic, header); | |
- if (BAD_MADT_ENTRY(ioapic, end)) | |
- return -EINVAL; | |
+ if ( BAD_MADT_ENTRY(ioapic, end) ) | |
+ return -EINVAL; | |
- acpi_table_print_madt_entry(header); | |
+ acpi_table_print_madt_entry(header); | |
- mp_register_ioapic(ioapic->id, | |
- ioapic->address, ioapic->global_irq_base); | |
+ mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base); | |
- return 0; | |
+ return 0; | |
} | |
-static int __init | |
-acpi_parse_int_src_ovr(struct acpi_subtable_header * header, | |
- const unsigned long end) | |
+static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
- struct acpi_madt_interrupt_override *intsrc = | |
- container_of(header, struct acpi_madt_interrupt_override, | |
- header); | |
+ struct acpi_madt_interrupt_override *intsrc = | |
+ container_of(header, struct acpi_madt_interrupt_override, header); | |
- if (BAD_MADT_ENTRY(intsrc, end)) | |
- return -EINVAL; | |
+ if ( BAD_MADT_ENTRY(intsrc, end) ) | |
+ return -EINVAL; | |
- acpi_table_print_madt_entry(header); | |
+ acpi_table_print_madt_entry(header); | |
- if (acpi_skip_timer_override && | |
- intsrc->source_irq == 0 && intsrc->global_irq == 2) { | |
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); | |
- return 0; | |
- } | |
+ if ( acpi_skip_timer_override && intsrc->source_irq == 0 && | |
+ intsrc->global_irq == 2 ) | |
+ { | |
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); | |
+ return 0; | |
+ } | |
- mp_override_legacy_irq(intsrc->source_irq, | |
- ACPI_MADT_GET_POLARITY(intsrc->inti_flags), | |
- ACPI_MADT_GET_TRIGGER(intsrc->inti_flags), | |
- intsrc->global_irq); | |
+ mp_override_legacy_irq( | |
+ intsrc->source_irq, ACPI_MADT_GET_POLARITY(intsrc->inti_flags), | |
+ ACPI_MADT_GET_TRIGGER(intsrc->inti_flags), intsrc->global_irq); | |
- return 0; | |
+ return 0; | |
} | |
-static int __init | |
-acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) | |
+static int __init acpi_parse_nmi_src(struct acpi_subtable_header *header, | |
+ const unsigned long end) | |
{ | |
- struct acpi_madt_nmi_source *nmi_src = | |
- container_of(header, struct acpi_madt_nmi_source, header); | |
+ struct acpi_madt_nmi_source *nmi_src = | |
+ container_of(header, struct acpi_madt_nmi_source, header); | |
- if (BAD_MADT_ENTRY(nmi_src, end)) | |
- return -EINVAL; | |
+ if ( BAD_MADT_ENTRY(nmi_src, end) ) | |
+ return -EINVAL; | |
- acpi_table_print_madt_entry(header); | |
+ acpi_table_print_madt_entry(header); | |
- /* TBD: Support nimsrc entries? */ | |
+ /* TBD: Support nimsrc entries? */ | |
- return 0; | |
+ return 0; | |
} | |
#ifdef CONFIG_HPET_TIMER | |
static int __init acpi_parse_hpet(struct acpi_table_header *table) | |
{ | |
- struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table; | |
- | |
- if (hpet_tbl->address.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { | |
- printk(KERN_WARNING PREFIX "HPET timers must be located in " | |
- "memory.\n"); | |
- return -1; | |
- } | |
- | |
- /* | |
- * Some BIOSes provide multiple HPET tables. Sometimes this is a BIOS | |
- * bug; the intended way of supporting more than 1 HPET is to use AML | |
- * entries. | |
- * | |
- * If someone finds a real system with two genuine HPET tables, perhaps | |
- * they will be kind and implement support. Until then however, warn | |
- * that we will ignore subsequent tables. | |
- */ | |
- if (hpet_address) | |
- { | |
- printk(KERN_WARNING PREFIX | |
- "Found multiple HPET tables. Only using first\n"); | |
- return -1; | |
- } | |
- | |
- hpet_address = hpet_tbl->address.address; | |
- hpet_blockid = hpet_tbl->sequence; | |
- hpet_flags = hpet_tbl->flags; | |
- printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", | |
- hpet_tbl->id, hpet_address); | |
- | |
- return 0; | |
+ struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table; | |
+ | |
+ if ( hpet_tbl->address.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY ) | |
+ { | |
+ printk(KERN_WARNING PREFIX "HPET timers must be located in " | |
+ "memory.\n"); | |
+ return -1; | |
+ } | |
+ | |
+ /* | |
+ * Some BIOSes provide multiple HPET tables. Sometimes this is a BIOS | |
+ * bug; the intended way of supporting more than 1 HPET is to use AML | |
+ * entries. | |
+ * | |
+ * If someone finds a real system with two genuine HPET tables, perhaps | |
+ * they will be kind and implement support. Until then however, warn | |
+ * that we will ignore subsequent tables. | |
+ */ | |
+ if ( hpet_address ) | |
+ { | |
+ printk(KERN_WARNING PREFIX | |
+ "Found multiple HPET tables. Only using first\n"); | |
+ return -1; | |
+ } | |
+ | |
+ hpet_address = hpet_tbl->address.address; | |
+ hpet_blockid = hpet_tbl->sequence; | |
+ hpet_flags = hpet_tbl->flags; | |
+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", hpet_tbl->id, | |
+ hpet_address); | |
+ | |
+ return 0; | |
} | |
#else | |
-#define acpi_parse_hpet NULL | |
+#define acpi_parse_hpet NULL | |
#endif | |
static int __init acpi_invalidate_bgrt(struct acpi_table_header *table) | |
{ | |
- struct acpi_table_bgrt *bgrt_tbl = | |
- container_of(table, struct acpi_table_bgrt, header); | |
+ struct acpi_table_bgrt *bgrt_tbl = | |
+ container_of(table, struct acpi_table_bgrt, header); | |
- if (table->length < sizeof(*bgrt_tbl)) | |
- return -1; | |
+ if ( table->length < sizeof(*bgrt_tbl) ) | |
+ return -1; | |
- if (bgrt_tbl->version == 1 && bgrt_tbl->image_address | |
- && !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address), | |
- RAM_TYPE_CONVENTIONAL)) | |
- return 0; | |
+ if ( bgrt_tbl->version == 1 && bgrt_tbl->image_address && | |
+ !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address), | |
+ RAM_TYPE_CONVENTIONAL) ) | |
+ return 0; | |
- printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#"PRIx64"\n", | |
- bgrt_tbl->version, bgrt_tbl->image_address); | |
- bgrt_tbl->image_address = 0; | |
- bgrt_tbl->status &= ~1; | |
+ printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#" PRIx64 "\n", | |
+ bgrt_tbl->version, bgrt_tbl->image_address); | |
+ bgrt_tbl->image_address = 0; | |
+ bgrt_tbl->status &= ~1; | |
- return 0; | |
+ return 0; | |
} | |
#ifdef CONFIG_ACPI_SLEEP | |
-#define acpi_fadt_copy_address(dst, src, len) do { \ | |
- if (fadt->header.revision >= FADT2_REVISION_ID && \ | |
- fadt->header.length >= ACPI_FADT_V2_SIZE) \ | |
- acpi_sinfo.dst##_blk = fadt->x##src##_block; \ | |
- if (!acpi_sinfo.dst##_blk.address) { \ | |
- acpi_sinfo.dst##_blk.address = fadt->src##_block; \ | |
- acpi_sinfo.dst##_blk.space_id = ACPI_ADR_SPACE_SYSTEM_IO; \ | |
- acpi_sinfo.dst##_blk.bit_width = fadt->len##_length << 3; \ | |
- acpi_sinfo.dst##_blk.bit_offset = 0; \ | |
- acpi_sinfo.dst##_blk.access_width = fadt->len##_length; \ | |
- } \ | |
-} while (0) | |
+#define acpi_fadt_copy_address(dst, src, len) \ | |
+ do { \ | |
+ if ( fadt->header.revision >= FADT2_REVISION_ID && \ | |
+ fadt->header.length >= ACPI_FADT_V2_SIZE ) \ | |
+ acpi_sinfo.dst##_blk = fadt->x##src##_block; \ | |
+ if ( !acpi_sinfo.dst##_blk.address ) \ | |
+ { \ | |
+ acpi_sinfo.dst##_blk.address = fadt->src##_block; \ | |
+ acpi_sinfo.dst##_blk.space_id = ACPI_ADR_SPACE_SYSTEM_IO; \ | |
+ acpi_sinfo.dst##_blk.bit_width = fadt->len##_length << 3; \ | |
+ acpi_sinfo.dst##_blk.bit_offset = 0; \ | |
+ acpi_sinfo.dst##_blk.access_width = fadt->len##_length; \ | |
+ } \ | |
+ } while ( 0 ) | |
/* Get pm1x_cnt and pm1x_evt information for ACPI sleep */ | |
-static void __init | |
-acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt) | |
+static void __init acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt) | |
{ | |
- struct acpi_table_facs *facs = NULL; | |
- uint64_t facs_pa; | |
- | |
- if (fadt->header.revision >= 5 && | |
- fadt->header.length >= ACPI_FADT_V5_SIZE) { | |
- acpi_sinfo.sleep_control = fadt->sleep_control; | |
- acpi_sinfo.sleep_status = fadt->sleep_status; | |
- | |
- printk(KERN_INFO PREFIX | |
- "v5 SLEEP INFO: control[%d:%"PRIx64"]," | |
- " status[%d:%"PRIx64"]\n", | |
- acpi_sinfo.sleep_control.space_id, | |
- acpi_sinfo.sleep_control.address, | |
- acpi_sinfo.sleep_status.space_id, | |
- acpi_sinfo.sleep_status.address); | |
- | |
- if ((fadt->sleep_control.address && | |
- (fadt->sleep_control.bit_offset || | |
- fadt->sleep_control.bit_width != | |
- fadt->sleep_control.access_width * 8)) || | |
- (fadt->sleep_status.address && | |
- (fadt->sleep_status.bit_offset || | |
- fadt->sleep_status.bit_width != | |
- fadt->sleep_status.access_width * 8))) { | |
- printk(KERN_WARNING PREFIX | |
- "Invalid sleep control/status register data:" | |
- " %#x:%#x:%#x %#x:%#x:%#x\n", | |
- fadt->sleep_control.bit_offset, | |
- fadt->sleep_control.bit_width, | |
- fadt->sleep_control.access_width, | |
- fadt->sleep_status.bit_offset, | |
- fadt->sleep_status.bit_width, | |
- fadt->sleep_status.access_width); | |
- fadt->sleep_control.address = 0; | |
- fadt->sleep_status.address = 0; | |
- } | |
- } | |
- | |
- if (fadt->flags & ACPI_FADT_HW_REDUCED) | |
- goto bad; | |
- | |
- acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control); | |
- acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control); | |
- acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event); | |
- acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event); | |
- | |
- printk(KERN_INFO PREFIX | |
- "SLEEP INFO: pm1x_cnt[%d:%"PRIx64",%d:%"PRIx64"], " | |
- "pm1x_evt[%d:%"PRIx64",%d:%"PRIx64"]\n", | |
- acpi_sinfo.pm1a_cnt_blk.space_id, | |
- acpi_sinfo.pm1a_cnt_blk.address, | |
- acpi_sinfo.pm1b_cnt_blk.space_id, | |
- acpi_sinfo.pm1b_cnt_blk.address, | |
- acpi_sinfo.pm1a_evt_blk.space_id, | |
- acpi_sinfo.pm1a_evt_blk.address, | |
- acpi_sinfo.pm1b_evt_blk.space_id, | |
- acpi_sinfo.pm1b_evt_blk.address); | |
- | |
- /* Now FACS... */ | |
- facs_pa = ((fadt->header.revision >= FADT2_REVISION_ID) | |
- ? fadt->Xfacs : (uint64_t)fadt->facs); | |
- if (fadt->facs && ((uint64_t)fadt->facs != facs_pa)) { | |
- printk(KERN_WARNING PREFIX | |
- "32/64X FACS address mismatch in FADT - " | |
- "%08x/%016"PRIx64", using 32\n", | |
- fadt->facs, facs_pa); | |
- facs_pa = (uint64_t)fadt->facs; | |
- } | |
- if (!facs_pa) | |
- goto bad; | |
- | |
- facs = (struct acpi_table_facs *) | |
- __acpi_map_table(facs_pa, sizeof(struct acpi_table_facs)); | |
- if (!facs) | |
- goto bad; | |
- | |
- if (strncmp(facs->signature, "FACS", 4)) { | |
- printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n", | |
- facs->signature); | |
- goto bad; | |
- } | |
- | |
- if (facs->length < 24) { | |
- printk(KERN_ERR PREFIX "Invalid FACS table length: %#x", | |
- facs->length); | |
- goto bad; | |
- } | |
- | |
- if (facs->length < 64) | |
- printk(KERN_WARNING PREFIX | |
- "FACS is shorter than ACPI spec allow: %#x", | |
- facs->length); | |
- | |
- acpi_sinfo.wakeup_vector = facs_pa + | |
- offsetof(struct acpi_table_facs, firmware_waking_vector); | |
- acpi_sinfo.vector_width = 32; | |
- | |
- printk(KERN_INFO PREFIX | |
- " wakeup_vec[%"PRIx64"], vec_size[%x]\n", | |
- acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width); | |
- return; | |
+ struct acpi_table_facs *facs = NULL; | |
+ uint64_t facs_pa; | |
+ | |
+ if ( fadt->header.revision >= 5 && | |
+ fadt->header.length >= ACPI_FADT_V5_SIZE ) | |
+ { | |
+ acpi_sinfo.sleep_control = fadt->sleep_control; | |
+ acpi_sinfo.sleep_status = fadt->sleep_status; | |
+ | |
+ printk(KERN_INFO PREFIX "v5 SLEEP INFO: control[%d:%" PRIx64 "]," | |
+ " status[%d:%" PRIx64 "]\n", | |
+ acpi_sinfo.sleep_control.space_id, | |
+ acpi_sinfo.sleep_control.address, | |
+ acpi_sinfo.sleep_status.space_id, | |
+ acpi_sinfo.sleep_status.address); | |
+ | |
+ if ( (fadt->sleep_control.address && | |
+ (fadt->sleep_control.bit_offset || | |
+ fadt->sleep_control.bit_width != | |
+ fadt->sleep_control.access_width * 8)) || | |
+ (fadt->sleep_status.address && | |
+ (fadt->sleep_status.bit_offset || | |
+ fadt->sleep_status.bit_width != | |
+ fadt->sleep_status.access_width * 8)) ) | |
+ { | |
+ printk(KERN_WARNING PREFIX | |
+ "Invalid sleep control/status register data:" | |
+ " %#x:%#x:%#x %#x:%#x:%#x\n", | |
+ fadt->sleep_control.bit_offset, | |
+ fadt->sleep_control.bit_width, | |
+ fadt->sleep_control.access_width, | |
+ fadt->sleep_status.bit_offset, fadt->sleep_status.bit_width, | |
+ fadt->sleep_status.access_width); | |
+ fadt->sleep_control.address = 0; | |
+ fadt->sleep_status.address = 0; | |
+ } | |
+ } | |
+ | |
+ if ( fadt->flags & ACPI_FADT_HW_REDUCED ) | |
+ goto bad; | |
+ | |
+ acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control); | |
+ acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control); | |
+ acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event); | |
+ acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event); | |
+ | |
+ printk(KERN_INFO PREFIX "SLEEP INFO: pm1x_cnt[%d:%" PRIx64 ",%d:%" PRIx64 | |
+ "], " | |
+ "pm1x_evt[%d:%" PRIx64 ",%d:%" PRIx64 "]\n", | |
+ acpi_sinfo.pm1a_cnt_blk.space_id, acpi_sinfo.pm1a_cnt_blk.address, | |
+ acpi_sinfo.pm1b_cnt_blk.space_id, acpi_sinfo.pm1b_cnt_blk.address, | |
+ acpi_sinfo.pm1a_evt_blk.space_id, acpi_sinfo.pm1a_evt_blk.address, | |
+ acpi_sinfo.pm1b_evt_blk.space_id, acpi_sinfo.pm1b_evt_blk.address); | |
+ | |
+ /* Now FACS... */ | |
+ facs_pa = | |
+ ((fadt->header.revision >= FADT2_REVISION_ID) ? fadt->Xfacs | |
+ : (uint64_t)fadt->facs); | |
+ if ( fadt->facs && ((uint64_t)fadt->facs != facs_pa) ) | |
+ { | |
+ printk(KERN_WARNING PREFIX "32/64X FACS address mismatch in FADT - " | |
+ "%08x/%016" PRIx64 ", using 32\n", | |
+ fadt->facs, facs_pa); | |
+ facs_pa = (uint64_t)fadt->facs; | |
+ } | |
+ if ( !facs_pa ) | |
+ goto bad; | |
+ | |
+ facs = (struct acpi_table_facs *)__acpi_map_table( | |
+ facs_pa, sizeof(struct acpi_table_facs)); | |
+ if ( !facs ) | |
+ goto bad; | |
+ | |
+ if ( strncmp(facs->signature, "FACS", 4) ) | |
+ { | |
+ printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n", | |
+ facs->signature); | |
+ goto bad; | |
+ } | |
+ | |
+ if ( facs->length < 24 ) | |
+ { | |
+ printk(KERN_ERR PREFIX "Invalid FACS table length: %#x", facs->length); | |
+ goto bad; | |
+ } | |
+ | |
+ if ( facs->length < 64 ) | |
+ printk(KERN_WARNING PREFIX "FACS is shorter than ACPI spec allow: %#x", | |
+ facs->length); | |
+ | |
+ acpi_sinfo.wakeup_vector = | |
+ facs_pa + offsetof(struct acpi_table_facs, firmware_waking_vector); | |
+ acpi_sinfo.vector_width = 32; | |
+ | |
+ printk(KERN_INFO PREFIX " wakeup_vec[%" PRIx64 | |
+ "], vec_size[%x]\n", | |
+ acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width); | |
+ return; | |
bad: | |
- memset(&acpi_sinfo, 0, | |
- offsetof(struct acpi_sleep_info, sleep_control)); | |
- memset(&acpi_sinfo.sleep_status + 1, 0, | |
- (long)(&acpi_sinfo + 1) - (long)(&acpi_sinfo.sleep_status + 1)); | |
+ memset(&acpi_sinfo, 0, offsetof(struct acpi_sleep_info, sleep_control)); | |
+ memset(&acpi_sinfo.sleep_status + 1, 0, | |
+ (long)(&acpi_sinfo + 1) - (long)(&acpi_sinfo.sleep_status + 1)); | |
} | |
#endif | |
static int __init acpi_parse_fadt(struct acpi_table_header *table) | |
{ | |
- struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table; | |
+ struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table; | |
-#ifdef CONFIG_ACPI_INTERPRETER | |
- /* initialize sci_int early for INT_SRC_OVR MADT parsing */ | |
- acpi_fadt.sci_int = fadt->sci_int; | |
+#ifdef CONFIG_ACPI_INTERPRETER | |
+ /* initialize sci_int early for INT_SRC_OVR MADT parsing */ | |
+ acpi_fadt.sci_int = fadt->sci_int; | |
- /* initialize rev and apic_phys_dest_mode for x86_64 genapic */ | |
- acpi_fadt.revision = fadt->revision; | |
- acpi_fadt.force_apic_physical_destination_mode = | |
- fadt->force_apic_physical_destination_mode; | |
+ /* initialize rev and apic_phys_dest_mode for x86_64 genapic */ | |
+ acpi_fadt.revision = fadt->revision; | |
+ acpi_fadt.force_apic_physical_destination_mode = | |
+ fadt->force_apic_physical_destination_mode; | |
#endif | |
#ifdef CONFIG_X86_PM_TIMER | |
- /* detect the location of the ACPI PM Timer */ | |
- if (fadt->header.revision >= FADT2_REVISION_ID) { | |
- /* FADT rev. 2 */ | |
- if (fadt->xpm_timer_block.space_id == | |
- ACPI_ADR_SPACE_SYSTEM_IO) { | |
- pmtmr_ioport = fadt->xpm_timer_block.address; | |
- pmtmr_width = fadt->xpm_timer_block.bit_width; | |
- } | |
- } | |
- /* | |
- * "X" fields are optional extensions to the original V1.0 | |
- * fields, so we must selectively expand V1.0 fields if the | |
- * corresponding X field is zero. | |
- */ | |
- if (!pmtmr_ioport) { | |
- pmtmr_ioport = fadt->pm_timer_block; | |
- pmtmr_width = fadt->pm_timer_length == 4 ? 24 : 0; | |
- } | |
- if (pmtmr_ioport) | |
- printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x (%u bits)\n", | |
- pmtmr_ioport, pmtmr_width); | |
+ /* detect the location of the ACPI PM Timer */ | |
+ if ( fadt->header.revision >= FADT2_REVISION_ID ) | |
+ { | |
+ /* FADT rev. 2 */ | |
+ if ( fadt->xpm_timer_block.space_id == ACPI_ADR_SPACE_SYSTEM_IO ) | |
+ { | |
+ pmtmr_ioport = fadt->xpm_timer_block.address; | |
+ pmtmr_width = fadt->xpm_timer_block.bit_width; | |
+ } | |
+ } | |
+ /* | |
+ * "X" fields are optional extensions to the original V1.0 | |
+ * fields, so we must selectively expand V1.0 fields if the | |
+ * corresponding X field is zero. | |
+ */ | |
+ if ( !pmtmr_ioport ) | |
+ { | |
+ pmtmr_ioport = fadt->pm_timer_block; | |
+ pmtmr_width = fadt->pm_timer_length == 4 ? 24 : 0; | |
+ } | |
+ if ( pmtmr_ioport ) | |
+ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x (%u bits)\n", | |
+ pmtmr_ioport, pmtmr_width); | |
#endif | |
- acpi_smi_cmd = fadt->smi_command; | |
- acpi_enable_value = fadt->acpi_enable; | |
- acpi_disable_value = fadt->acpi_disable; | |
+ acpi_smi_cmd = fadt->smi_command; | |
+ acpi_enable_value = fadt->acpi_enable; | |
+ acpi_disable_value = fadt->acpi_disable; | |
#ifdef CONFIG_ACPI_SLEEP | |
- acpi_fadt_parse_sleep_info(fadt); | |
+ acpi_fadt_parse_sleep_info(fadt); | |
#endif | |
- return 0; | |
+ return 0; | |
} | |
/* | |
@@ -514,54 +512,55 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) | |
*/ | |
static int __init acpi_parse_madt_lapic_entries(void) | |
{ | |
- int count, x2count; | |
- | |
- if (!cpu_has_apic) | |
- return -ENODEV; | |
- | |
- /* | |
- * Note that the LAPIC address is obtained from the MADT (32-bit value) | |
- * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). | |
- */ | |
- | |
- count = | |
- acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, | |
- acpi_parse_lapic_addr_ovr, 0); | |
- if (count < 0) { | |
- printk(KERN_ERR PREFIX | |
- "Error parsing LAPIC address override entry\n"); | |
- return count; | |
- } | |
- | |
- mp_register_lapic_address(acpi_lapic_addr); | |
- | |
- BUILD_BUG_ON(MAX_APICS != MAX_LOCAL_APIC); | |
- count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, | |
- acpi_parse_lapic, MAX_APICS); | |
- x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, | |
- acpi_parse_x2apic, MAX_APICS); | |
- if (!count && !x2count) { | |
- printk(KERN_ERR PREFIX "No LAPIC entries present\n"); | |
- /* TBD: Cleanup to allow fallback to MPS */ | |
- return -ENODEV; | |
- } else if (count < 0 || x2count < 0) { | |
- printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); | |
- /* TBD: Cleanup to allow fallback to MPS */ | |
- return count < 0 ? count : x2count; | |
- } | |
- | |
- count = | |
- acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, | |
+ int count, x2count; | |
+ | |
+ if ( !cpu_has_apic ) | |
+ return -ENODEV; | |
+ | |
+ /* | |
+ * Note that the LAPIC address is obtained from the MADT (32-bit value) | |
+ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). | |
+ */ | |
+ | |
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, | |
+ acpi_parse_lapic_addr_ovr, 0); | |
+ if ( count < 0 ) | |
+ { | |
+ printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); | |
+ return count; | |
+ } | |
+ | |
+ mp_register_lapic_address(acpi_lapic_addr); | |
+ | |
+ BUILD_BUG_ON(MAX_APICS != MAX_LOCAL_APIC); | |
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, | |
+ MAX_APICS); | |
+ x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, | |
+ acpi_parse_x2apic, MAX_APICS); | |
+ if ( !count && !x2count ) | |
+ { | |
+ printk(KERN_ERR PREFIX "No LAPIC entries present\n"); | |
+ /* TBD: Cleanup to allow fallback to MPS */ | |
+ return -ENODEV; | |
+ } | |
+ else if ( count < 0 || x2count < 0 ) | |
+ { | |
+ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); | |
+ /* TBD: Cleanup to allow fallback to MPS */ | |
+ return count < 0 ? count : x2count; | |
+ } | |
+ | |
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, | |
acpi_parse_lapic_nmi, 0); | |
- x2count = | |
- acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, | |
- acpi_parse_x2apic_nmi, 0); | |
- if (count < 0 || x2count < 0) { | |
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); | |
- /* TBD: Cleanup to allow fallback to MPS */ | |
- return count < 0 ? count : x2count; | |
- } | |
- return 0; | |
+ x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, | |
+ acpi_parse_x2apic_nmi, 0); | |
+ if ( count < 0 || x2count < 0 ) | |
+ { | |
+ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); | |
+ /* TBD: Cleanup to allow fallback to MPS */ | |
+ return count < 0 ? count : x2count; | |
+ } | |
+ return 0; | |
} | |
/* | |
@@ -570,100 +569,106 @@ static int __init acpi_parse_madt_lapic_entries(void) | |
*/ | |
static int __init acpi_parse_madt_ioapic_entries(void) | |
{ | |
- int count; | |
- | |
- /* | |
- * ACPI interpreter is required to complete interrupt setup, | |
- * so if it is off, don't enumerate the io-apics with ACPI. | |
- * If MPS is present, it will handle them, | |
- * otherwise the system will stay in PIC mode | |
- */ | |
- if (acpi_disabled || acpi_noirq) { | |
- return -ENODEV; | |
- } | |
- | |
- if (!cpu_has_apic) | |
- return -ENODEV; | |
- | |
- /* | |
- * if "noapic" boot option, don't look for IO-APICs | |
- */ | |
- if (skip_ioapic_setup) { | |
- printk(KERN_INFO PREFIX "Skipping IOAPIC probe " | |
- "due to 'noapic' option.\n"); | |
- return -ENODEV; | |
- } | |
- | |
- count = | |
- acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, | |
- MAX_IO_APICS); | |
- if (!count) { | |
- printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); | |
- return -ENODEV; | |
- } else if (count < 0) { | |
- printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); | |
- return count; | |
- } | |
- | |
- count = | |
- acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, | |
+ int count; | |
+ | |
+ /* | |
+ * ACPI interpreter is required to complete interrupt setup, | |
+ * so if it is off, don't enumerate the io-apics with ACPI. | |
+ * If MPS is present, it will handle them, | |
+ * otherwise the system will stay in PIC mode | |
+ */ | |
+ if ( acpi_disabled || acpi_noirq ) | |
+ { | |
+ return -ENODEV; | |
+ } | |
+ | |
+ if ( !cpu_has_apic ) | |
+ return -ENODEV; | |
+ | |
+ /* | |
+ * if "noapic" boot option, don't look for IO-APICs | |
+ */ | |
+ if ( skip_ioapic_setup ) | |
+ { | |
+ printk(KERN_INFO PREFIX "Skipping IOAPIC probe " | |
+ "due to 'noapic' option.\n"); | |
+ return -ENODEV; | |
+ } | |
+ | |
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, | |
+ MAX_IO_APICS); | |
+ if ( !count ) | |
+ { | |
+ printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); | |
+ return -ENODEV; | |
+ } | |
+ else if ( count < 0 ) | |
+ { | |
+ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); | |
+ return count; | |
+ } | |
+ | |
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, | |
acpi_parse_int_src_ovr, MAX_IRQ_SOURCES); | |
- if (count < 0) { | |
- printk(KERN_ERR PREFIX | |
- "Error parsing interrupt source overrides entry\n"); | |
- /* TBD: Cleanup to allow fallback to MPS */ | |
- return count; | |
- } | |
- | |
- /* Fill in identity legacy mapings where no override */ | |
- mp_config_acpi_legacy_irqs(); | |
- | |
- count = | |
- acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, | |
+ if ( count < 0 ) | |
+ { | |
+ printk(KERN_ERR PREFIX | |
+ "Error parsing interrupt source overrides entry\n"); | |
+ /* TBD: Cleanup to allow fallback to MPS */ | |
+ return count; | |
+ } | |
+ | |
+ /* Fill in identity legacy mapings where no override */ | |
+ mp_config_acpi_legacy_irqs(); | |
+ | |
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, | |
acpi_parse_nmi_src, MAX_IRQ_SOURCES); | |
- if (count < 0) { | |
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); | |
- /* TBD: Cleanup to allow fallback to MPS */ | |
- return count; | |
- } | |
- | |
- return 0; | |
+ if ( count < 0 ) | |
+ { | |
+ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); | |
+ /* TBD: Cleanup to allow fallback to MPS */ | |
+ return count; | |
+ } | |
+ | |
+ return 0; | |
} | |
static void __init acpi_process_madt(void) | |
{ | |
- int error; | |
- | |
- if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { | |
- | |
- /* | |
- * Parse MADT LAPIC entries | |
- */ | |
- error = acpi_parse_madt_lapic_entries(); | |
- if (!error) { | |
- acpi_lapic = true; | |
- generic_bigsmp_probe(); | |
- | |
- /* | |
- * Parse MADT IO-APIC entries | |
- */ | |
- error = acpi_parse_madt_ioapic_entries(); | |
- if (!error) { | |
- acpi_ioapic = true; | |
- | |
- smp_found_config = true; | |
- clustered_apic_check(); | |
- } | |
- } | |
- if (error == -EINVAL) { | |
- /* | |
- * Dell Precision Workstation 410, 610 come here. | |
- */ | |
- printk(KERN_ERR PREFIX | |
- "Invalid BIOS MADT, disabling ACPI\n"); | |
- disable_acpi(); | |
- } | |
- } | |
+ int error; | |
+ | |
+ if ( !acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt) ) | |
+ { | |
+ /* | |
+ * Parse MADT LAPIC entries | |
+ */ | |
+ error = acpi_parse_madt_lapic_entries(); | |
+ if ( !error ) | |
+ { | |
+ acpi_lapic = true; | |
+ generic_bigsmp_probe(); | |
+ | |
+ /* | |
+ * Parse MADT IO-APIC entries | |
+ */ | |
+ error = acpi_parse_madt_ioapic_entries(); | |
+ if ( !error ) | |
+ { | |
+ acpi_ioapic = true; | |
+ | |
+ smp_found_config = true; | |
+ clustered_apic_check(); | |
+ } | |
+ } | |
+ if ( error == -EINVAL ) | |
+ { | |
+ /* | |
+ * Dell Precision Workstation 410, 610 come here. | |
+ */ | |
+ printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); | |
+ disable_acpi(); | |
+ } | |
+ } | |
} | |
/* | |
@@ -689,57 +694,58 @@ static void __init acpi_process_madt(void) | |
int __init acpi_boot_table_init(void) | |
{ | |
- int error; | |
- | |
- /* | |
- * If acpi_disabled, bail out | |
- * One exception: acpi=ht continues far enough to enumerate LAPICs | |
- */ | |
- if (acpi_disabled && !acpi_ht) | |
- return 1; | |
- | |
- /* | |
- * Initialize the ACPI boot-time table parser. | |
- */ | |
- error = acpi_table_init(); | |
- if (error) { | |
- disable_acpi(); | |
- return error; | |
- } | |
- | |
- return 0; | |
+ int error; | |
+ | |
+ /* | |
+ * If acpi_disabled, bail out | |
+ * One exception: acpi=ht continues far enough to enumerate LAPICs | |
+ */ | |
+ if ( acpi_disabled && !acpi_ht ) | |
+ return 1; | |
+ | |
+ /* | |
+ * Initialize the ACPI boot-time table parser. | |
+ */ | |
+ error = acpi_table_init(); | |
+ if ( error ) | |
+ { | |
+ disable_acpi(); | |
+ return error; | |
+ } | |
+ | |
+ return 0; | |
} | |
int __init acpi_boot_init(void) | |
{ | |
- /* | |
- * If acpi_disabled, bail out | |
- * One exception: acpi=ht continues far enough to enumerate LAPICs | |
- */ | |
- if (acpi_disabled && !acpi_ht) | |
- return 1; | |
+ /* | |
+ * If acpi_disabled, bail out | |
+ * One exception: acpi=ht continues far enough to enumerate LAPICs | |
+ */ | |
+ if ( acpi_disabled && !acpi_ht ) | |
+ return 1; | |
- /* | |
- * set sci_int and PM timer address | |
- */ | |
- acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); | |
+ /* | |
+ * set sci_int and PM timer address | |
+ */ | |
+ acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); | |
- /* | |
- * Process the Multiple APIC Description Table (MADT), if present | |
- */ | |
- acpi_process_madt(); | |
+ /* | |
+ * Process the Multiple APIC Description Table (MADT), if present | |
+ */ | |
+ acpi_process_madt(); | |
- acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); | |
+ acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); | |
- acpi_mmcfg_init(); | |
+ acpi_mmcfg_init(); | |
- acpi_iommu_init(); | |
+ acpi_iommu_init(); | |
- erst_init(); | |
+ erst_init(); | |
- acpi_hest_init(); | |
+ acpi_hest_init(); | |
- acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt); | |
+ acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt); | |
- return 0; | |
+ return 0; | |
} | |
diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c | |
index 8f7b6e9b8c..b454c71836 100644 | |
--- a/xen/arch/x86/acpi/cpu_idle.c | |
+++ b/xen/arch/x86/acpi/cpu_idle.c | |
@@ -1,6 +1,6 @@ | |
/* | |
- * cpu_idle - xen idle state module derived from Linux | |
- * drivers/acpi/processor_idle.c & | |
+ * cpu_idle - xen idle state module derived from Linux | |
+ * drivers/acpi/processor_idle.c & | |
* arch/x86/kernel/acpi/cstate.c | |
* | |
* Copyright (C) 2001, 2002 Andy Grover <[email protected]> | |
@@ -60,22 +60,30 @@ | |
/*#define DEBUG_PM_CX*/ | |
#define GET_HW_RES_IN_NS(msr, val) \ | |
- do { rdmsrl(msr, val); val = tsc_ticks2ns(val); } while( 0 ) | |
-#define GET_MC6_RES(val) GET_HW_RES_IN_NS(0x664, val) | |
-#define GET_PC2_RES(val) GET_HW_RES_IN_NS(0x60D, val) /* SNB onwards */ | |
-#define GET_PC3_RES(val) GET_HW_RES_IN_NS(0x3F8, val) | |
-#define GET_PC6_RES(val) GET_HW_RES_IN_NS(0x3F9, val) | |
-#define GET_PC7_RES(val) GET_HW_RES_IN_NS(0x3FA, val) | |
-#define GET_PC8_RES(val) GET_HW_RES_IN_NS(0x630, val) /* some Haswells only */ | |
-#define GET_PC9_RES(val) GET_HW_RES_IN_NS(0x631, val) /* some Haswells only */ | |
-#define GET_PC10_RES(val) GET_HW_RES_IN_NS(0x632, val) /* some Haswells only */ | |
-#define GET_CC1_RES(val) GET_HW_RES_IN_NS(0x660, val) /* Silvermont only */ | |
-#define GET_CC3_RES(val) GET_HW_RES_IN_NS(0x3FC, val) | |
-#define GET_CC6_RES(val) GET_HW_RES_IN_NS(0x3FD, val) | |
-#define GET_CC7_RES(val) GET_HW_RES_IN_NS(0x3FE, val) /* SNB onwards */ | |
-#define PHI_CC6_RES(val) GET_HW_RES_IN_NS(0x3FF, val) /* Xeon Phi only */ | |
- | |
-static void lapic_timer_nop(void) { } | |
+ do { \ | |
+ rdmsrl(msr, val); \ | |
+ val = tsc_ticks2ns(val); \ | |
+ } while ( 0 ) | |
+#define GET_MC6_RES(val) GET_HW_RES_IN_NS(0x664, val) | |
+#define GET_PC2_RES(val) GET_HW_RES_IN_NS(0x60D, val) /* SNB onwards */ | |
+#define GET_PC3_RES(val) GET_HW_RES_IN_NS(0x3F8, val) | |
+#define GET_PC6_RES(val) GET_HW_RES_IN_NS(0x3F9, val) | |
+#define GET_PC7_RES(val) GET_HW_RES_IN_NS(0x3FA, val) | |
+#define GET_PC8_RES(val) GET_HW_RES_IN_NS(0x630, val) /* some Haswells only \ | |
+ */ | |
+#define GET_PC9_RES(val) GET_HW_RES_IN_NS(0x631, val) /* some Haswells only \ | |
+ */ | |
+#define GET_PC10_RES(val) GET_HW_RES_IN_NS(0x632, val) /* some Haswells only \ | |
+ */ | |
+#define GET_CC1_RES(val) GET_HW_RES_IN_NS(0x660, val) /* Silvermont only */ | |
+#define GET_CC3_RES(val) GET_HW_RES_IN_NS(0x3FC, val) | |
+#define GET_CC6_RES(val) GET_HW_RES_IN_NS(0x3FD, val) | |
+#define GET_CC7_RES(val) GET_HW_RES_IN_NS(0x3FE, val) /* SNB onwards */ | |
+#define PHI_CC6_RES(val) GET_HW_RES_IN_NS(0x3FF, val) /* Xeon Phi only */ | |
+ | |
+static void lapic_timer_nop(void) | |
+{ | |
+} | |
void (*__read_mostly lapic_timer_off)(void); | |
void (*__read_mostly lapic_timer_on)(void); | |
@@ -153,7 +161,7 @@ static void do_get_hw_residencies(void *arg) | |
if ( c->x86_vendor != X86_VENDOR_INTEL || c->x86 != 6 ) | |
return; | |
- switch ( c->x86_model ) | |
+ switch (c->x86_model) | |
{ | |
/* 4th generation Intel Core (Haswell) */ | |
case 0x45: | |
@@ -267,33 +275,38 @@ static void print_hw_residencies(uint32_t cpu) | |
get_hw_residencies(cpu, &hw_res); | |
if ( hw_res.mc0 | hw_res.mc6 ) | |
- printk("MC0[%"PRIu64"] MC6[%"PRIu64"]\n", | |
- hw_res.mc0, hw_res.mc6); | |
- printk("PC2[%"PRIu64"] PC%d[%"PRIu64"] PC6[%"PRIu64"] PC7[%"PRIu64"]\n", | |
- hw_res.pc2, | |
- hw_res.pc4 ? 4 : 3, hw_res.pc4 ?: hw_res.pc3, | |
+ printk("MC0[%" PRIu64 "] MC6[%" PRIu64 "]\n", hw_res.mc0, hw_res.mc6); | |
+ printk("PC2[%" PRIu64 "] PC%d[%" PRIu64 "] PC6[%" PRIu64 "] PC7[%" PRIu64 | |
+ "]\n", | |
+ hw_res.pc2, hw_res.pc4 ? 4 : 3, hw_res.pc4 ?: hw_res.pc3, | |
hw_res.pc6, hw_res.pc7); | |
if ( hw_res.pc8 | hw_res.pc9 | hw_res.pc10 ) | |
- printk("PC8[%"PRIu64"] PC9[%"PRIu64"] PC10[%"PRIu64"]\n", | |
+ printk("PC8[%" PRIu64 "] PC9[%" PRIu64 "] PC10[%" PRIu64 "]\n", | |
hw_res.pc8, hw_res.pc9, hw_res.pc10); | |
- printk("CC%d[%"PRIu64"] CC6[%"PRIu64"] CC7[%"PRIu64"]\n", | |
- hw_res.cc1 ? 1 : 3, hw_res.cc1 ?: hw_res.cc3, | |
- hw_res.cc6, hw_res.cc7); | |
+ printk("CC%d[%" PRIu64 "] CC6[%" PRIu64 "] CC7[%" PRIu64 "]\n", | |
+ hw_res.cc1 ? 1 : 3, hw_res.cc1 ?: hw_res.cc3, hw_res.cc6, | |
+ hw_res.cc7); | |
} | |
-static char* acpi_cstate_method_name[] = | |
-{ | |
- "NONE", | |
- "SYSIO", | |
- "FFH", | |
- "HALT" | |
-}; | |
+static char *acpi_cstate_method_name[] = {"NONE", "SYSIO", "FFH", "HALT"}; | |
-static uint64_t get_stime_tick(void) { return (uint64_t)NOW(); } | |
-static uint64_t stime_ticks_elapsed(uint64_t t1, uint64_t t2) { return t2 - t1; } | |
-static uint64_t stime_tick_to_ns(uint64_t ticks) { return ticks; } | |
+static uint64_t get_stime_tick(void) | |
+{ | |
+ return (uint64_t)NOW(); | |
+} | |
+static uint64_t stime_ticks_elapsed(uint64_t t1, uint64_t t2) | |
+{ | |
+ return t2 - t1; | |
+} | |
+static uint64_t stime_tick_to_ns(uint64_t ticks) | |
+{ | |
+ return ticks; | |
+} | |
-static uint64_t get_acpi_pm_tick(void) { return (uint64_t)inl(pmtmr_ioport); } | |
+static uint64_t get_acpi_pm_tick(void) | |
+{ | |
+ return (uint64_t)inl(pmtmr_ioport); | |
+} | |
static uint64_t acpi_pm_ticks_elapsed(uint64_t t1, uint64_t t2) | |
{ | |
if ( t2 >= t1 ) | |
@@ -301,7 +314,7 @@ static uint64_t acpi_pm_ticks_elapsed(uint64_t t1, uint64_t t2) | |
else if ( !(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) ) | |
return (((0x00FFFFFF - t1) + t2 + 1) & 0x00FFFFFF); | |
else | |
- return ((0xFFFFFFFF - t1) + t2 +1); | |
+ return ((0xFFFFFFFF - t1) + t2 + 1); | |
} | |
uint64_t (*__read_mostly cpuidle_get_tick)(void); | |
@@ -312,8 +325,8 @@ static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power) | |
{ | |
uint64_t idle_res = 0, idle_usage = 0; | |
uint64_t last_state_update_tick, current_tick, current_stime; | |
- uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = { 0 }; | |
- uint64_t res_tick[ACPI_PROCESSOR_MAX_POWER] = { 0 }; | |
+ uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = {0}; | |
+ uint64_t res_tick[ACPI_PROCESSOR_MAX_POWER] = {0}; | |
unsigned int i; | |
signed int last_state_idx; | |
@@ -333,8 +346,8 @@ static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power) | |
if ( last_state_idx >= 0 ) | |
{ | |
- res_tick[last_state_idx] += ticks_elapsed(last_state_update_tick, | |
- current_tick); | |
+ res_tick[last_state_idx] += | |
+ ticks_elapsed(last_state_update_tick, current_tick); | |
usage[last_state_idx]++; | |
} | |
@@ -343,15 +356,16 @@ static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power) | |
idle_usage += usage[i]; | |
idle_res += tick_to_ns(res_tick[i]); | |
- printk(" %cC%u:\ttype[C%d] latency[%3u] usage[%8"PRIu64"] method[%5s] duration[%"PRIu64"]\n", | |
- (last_state_idx == i) ? '*' : ' ', i, | |
- power->states[i].type, power->states[i].latency, usage[i], | |
+ printk(" %cC%u:\ttype[C%d] latency[%3u] usage[%8" PRIu64 | |
+ "] method[%5s] duration[%" PRIu64 "]\n", | |
+ (last_state_idx == i) ? '*' : ' ', i, power->states[i].type, | |
+ power->states[i].latency, usage[i], | |
acpi_cstate_method_name[power->states[i].entry_method], | |
tick_to_ns(res_tick[i])); | |
} | |
- printk(" %cC0:\tusage[%8"PRIu64"] duration[%"PRIu64"]\n", | |
- (last_state_idx == 0) ? '*' : ' ', | |
- usage[0] + idle_usage, current_stime - idle_res); | |
+ printk(" %cC0:\tusage[%8" PRIu64 "] duration[%" PRIu64 "]\n", | |
+ (last_state_idx == 0) ? '*' : ' ', usage[0] + idle_usage, | |
+ current_stime - idle_res); | |
print_hw_residencies(cpu); | |
} | |
@@ -371,7 +385,7 @@ static void dump_cx(unsigned char key) | |
} | |
else | |
printk("max state: unlimited\n"); | |
- for_each_present_cpu ( cpu ) | |
+ for_each_present_cpu (cpu) | |
{ | |
struct acpi_processor_power *power = processor_powers[cpu]; | |
@@ -411,7 +425,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mask) | |
cpumask_and(&target, mask, &cpuidle_mwait_flags); | |
/* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */ | |
- for_each_cpu(cpu, &target) | |
+ for_each_cpu (cpu, &target) | |
mwait_wakeup(cpu) = 0; | |
cpumask_andnot(mask, mask, &target); | |
@@ -473,7 +487,7 @@ static void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |
{ | |
struct cpu_info *info = get_cpu_info(); | |
- switch ( cx->entry_method ) | |
+ switch (cx->entry_method) | |
{ | |
case ACPI_CSTATE_EM_FFH: | |
/* Call into architectural FFH based C-state */ | |
@@ -513,17 +527,18 @@ static int acpi_idle_bm_check(void) | |
return bm_status; | |
} | |
-static struct { | |
+static struct | |
+{ | |
spinlock_t lock; | |
unsigned int count; | |
-} c3_cpu_status = { .lock = SPIN_LOCK_UNLOCKED }; | |
+} c3_cpu_status = {.lock = SPIN_LOCK_UNLOCKED}; | |
void trace_exit_reason(u32 *irq_traced) | |
{ | |
if ( unlikely(tb_init_done) ) | |
{ | |
int i, curbit; | |
- u32 irr_status[8] = { 0 }; | |
+ u32 irr_status[8] = {0}; | |
/* Get local apic IRR register */ | |
for ( i = 0; i < 8; i++ ) | |
@@ -533,17 +548,18 @@ void trace_exit_reason(u32 *irq_traced) | |
while ( i < 4 && curbit < 256 ) | |
{ | |
irq_traced[i++] = curbit; | |
- curbit = find_next_bit((const unsigned long *)irr_status, 256, curbit + 1); | |
+ curbit = find_next_bit((const unsigned long *)irr_status, 256, | |
+ curbit + 1); | |
} | |
} | |
} | |
/* | |
- * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During | |
+ * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During | |
* an Interrupt Service Routine" | |
- * | |
- * There was an errata with some Core i7 processors that an EOI transaction | |
- * may not be sent if software enters core C6 during an interrupt service | |
+ * | |
+ * There was an errata with some Core i7 processors that an EOI transaction | |
+ * may not be sent if software enters core C6 during an interrupt service | |
* routine. So we don't enter deep Cx state if there is an EOI pending. | |
*/ | |
static bool errata_c6_eoi_workaround(void) | |
@@ -575,8 +591,8 @@ void update_last_cx_stat(struct acpi_processor_power *power, | |
} | |
void update_idle_stats(struct acpi_processor_power *power, | |
- struct acpi_processor_cx *cx, | |
- uint64_t before, uint64_t after) | |
+ struct acpi_processor_cx *cx, uint64_t before, | |
+ uint64_t after) | |
{ | |
int64_t sleep_ticks = alternative_call(ticks_elapsed, before, after); | |
/* Interrupts are disabled */ | |
@@ -586,8 +602,8 @@ void update_idle_stats(struct acpi_processor_power *power, | |
cx->usage++; | |
if ( sleep_ticks > 0 ) | |
{ | |
- power->last_residency = alternative_call(tick_to_ns, sleep_ticks) / | |
- 1000UL; | |
+ power->last_residency = | |
+ alternative_call(tick_to_ns, sleep_ticks) / 1000UL; | |
cx->time += sleep_ticks; | |
} | |
power->last_state = &power->states[0]; | |
@@ -603,13 +619,13 @@ static void acpi_processor_idle(void) | |
int next_state; | |
uint64_t t1, t2 = 0; | |
u32 exp = 0, pred = 0; | |
- u32 irq_traced[4] = { 0 }; | |
+ u32 irq_traced[4] = {0}; | |
if ( max_cstate > 0 && power && | |
(next_state = cpuidle_current_governor->select(power)) > 0 ) | |
{ | |
- unsigned int max_state = sched_has_urgent_vcpu() ? ACPI_STATE_C1 | |
- : max_cstate; | |
+ unsigned int max_state = | |
+ sched_has_urgent_vcpu() ? ACPI_STATE_C1 : max_cstate; | |
do { | |
cx = &power->states[next_state]; | |
@@ -668,13 +684,12 @@ static void acpi_processor_idle(void) | |
if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() ) | |
cx = power->safe_state; | |
- | |
/* | |
* Sleep: | |
* ------ | |
* Invoke the current Cx state to put the processor to sleep. | |
*/ | |
- switch ( cx->type ) | |
+ switch (cx->type) | |
{ | |
case ACPI_STATE_C1: | |
case ACPI_STATE_C2: | |
@@ -693,8 +708,8 @@ static void acpi_processor_idle(void) | |
t2 = alternative_call(cpuidle_get_tick); | |
trace_exit_reason(irq_traced); | |
/* Trace cpu idle exit */ | |
- TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, | |
- irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); | |
+ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], | |
+ irq_traced[1], irq_traced[2], irq_traced[3]); | |
/* Update statistics */ | |
update_idle_stats(power, cx, t1, t2); | |
/* Re-enable interrupts */ | |
@@ -704,7 +719,7 @@ static void acpi_processor_idle(void) | |
case ACPI_STATE_C3: | |
/* | |
- * Before invoking C3, be aware that TSC/APIC timer may be | |
+ * Before invoking C3, be aware that TSC/APIC timer may be | |
* stopped by H/W. Without carefully handling of TSC/APIC stop issues, | |
* deep C state can't work correctly. | |
*/ | |
@@ -752,8 +767,8 @@ static void acpi_processor_idle(void) | |
/* Invoke C3 */ | |
acpi_idle_do_entry(cx); | |
- if ( (cx->type == ACPI_STATE_C3) && | |
- power->flags.bm_check && power->flags.bm_control ) | |
+ if ( (cx->type == ACPI_STATE_C3) && power->flags.bm_check && | |
+ power->flags.bm_control ) | |
{ | |
/* Enable bus master arbitration */ | |
spin_lock(&c3_cpu_status.lock); | |
@@ -769,8 +784,8 @@ static void acpi_processor_idle(void) | |
cstate_restore_tsc(); | |
trace_exit_reason(irq_traced); | |
/* Trace cpu idle exit */ | |
- TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, | |
- irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); | |
+ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], | |
+ irq_traced[2], irq_traced[3]); | |
/* Update statistics */ | |
update_idle_stats(power, cx, t1, t2); | |
@@ -828,11 +843,11 @@ void acpi_dead_idle(void) | |
{ | |
/* | |
* 1. The CLFLUSH is a workaround for erratum AAI65 for | |
- * the Xeon 7400 series. | |
+ * the Xeon 7400 series. | |
* 2. The WBINVD is insufficient due to the spurious-wakeup | |
* case where we return around the loop. | |
- * 3. Unlike wbinvd, clflush is a light weight but not serializing | |
- * instruction, hence memory fence is necessary to make sure all | |
+ * 3. Unlike wbinvd, clflush is a light weight but not serializing | |
+ * instruction, hence memory fence is necessary to make sure all | |
* load/store visible before flush cache line. | |
*/ | |
mb(); | |
@@ -931,8 +946,8 @@ static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx) | |
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | |
if ( opt_cpu_info ) | |
- printk(XENLOG_DEBUG "cpuid.MWAIT[eax=%x ebx=%x ecx=%x edx=%x]\n", | |
- eax, ebx, ecx, edx); | |
+ printk(XENLOG_DEBUG "cpuid.MWAIT[eax=%x ebx=%x ecx=%x edx=%x]\n", eax, | |
+ ebx, ecx, edx); | |
/* Check whether this particular cx_type (in CST) is supported or not */ | |
cstate_type = (cx->reg.address >> MWAIT_SUBSTATE_SIZE) + 1; | |
@@ -962,7 +977,8 @@ static int acpi_processor_ffh_cstate_probe(xen_processor_cx_t *cx) | |
* | |
* This routine is called only after all the CPUs are online | |
*/ | |
-static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags) | |
+static void | |
+acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags) | |
{ | |
struct cpuinfo_x86 *c = ¤t_cpu_data; | |
@@ -987,19 +1003,19 @@ static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flag | |
* P4, Core and beyond CPUs | |
*/ | |
if ( c->x86_vendor == X86_VENDOR_INTEL && | |
- (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) ) | |
- flags->bm_control = 0; | |
+ (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) ) | |
+ flags->bm_control = 0; | |
} | |
-#define VENDOR_INTEL (1) | |
-#define NATIVE_CSTATE_BEYOND_HALT (2) | |
+#define VENDOR_INTEL (1) | |
+#define NATIVE_CSTATE_BEYOND_HALT (2) | |
static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) | |
{ | |
static int bm_check_flag = -1; | |
static int bm_control_flag = -1; | |
- switch ( cx->reg.space_id ) | |
+ switch (cx->reg.space_id) | |
{ | |
case ACPI_ADR_SPACE_SYSTEM_IO: | |
if ( cx->reg.address == 0 ) | |
@@ -1007,7 +1023,7 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) | |
break; | |
case ACPI_ADR_SPACE_FIXED_HARDWARE: | |
- if ( cx->reg.bit_width != VENDOR_INTEL || | |
+ if ( cx->reg.bit_width != VENDOR_INTEL || | |
cx->reg.bit_offset != NATIVE_CSTATE_BEYOND_HALT ) | |
return -EINVAL; | |
@@ -1020,7 +1036,7 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) | |
return -ENODEV; | |
} | |
- switch ( cx->type ) | |
+ switch (cx->type) | |
{ | |
case ACPI_STATE_C2: | |
if ( local_apic_timer_c2_ok ) | |
@@ -1048,15 +1064,15 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) | |
if ( power->flags.has_cst != 1 ) | |
{ | |
/* bus mastering control is necessary */ | |
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, | |
- "C3 support requires BM control\n")); | |
+ ACPI_DEBUG_PRINT( | |
+ (ACPI_DB_INFO, "C3 support requires BM control\n")); | |
return -EINVAL; | |
} | |
else | |
{ | |
/* Here we enter C3 without bus mastering */ | |
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, | |
- "C3 support without BM control\n")); | |
+ ACPI_DEBUG_PRINT( | |
+ (ACPI_DB_INFO, "C3 support without BM control\n")); | |
} | |
} | |
/* | |
@@ -1077,8 +1093,8 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) | |
if ( !(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD) ) | |
{ | |
ACPI_DEBUG_PRINT((ACPI_DB_INFO, | |
- "Cache invalidation should work properly" | |
- " for C3 to be enabled on SMP systems\n")); | |
+ "Cache invalidation should work properly" | |
+ " for C3 to be enabled on SMP systems\n")); | |
return -EINVAL; | |
} | |
} | |
@@ -1099,16 +1115,15 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) | |
static unsigned int latency_factor = 2; | |
integer_param("idle_latency_factor", latency_factor); | |
-static void set_cx( | |
- struct acpi_processor_power *acpi_power, | |
- xen_processor_cx_t *xen_cx) | |
+static void set_cx(struct acpi_processor_power *acpi_power, | |
+ xen_processor_cx_t *xen_cx) | |
{ | |
struct acpi_processor_cx *cx; | |
if ( check_cx(acpi_power, xen_cx) != 0 ) | |
return; | |
- switch ( xen_cx->type ) | |
+ switch (xen_cx->type) | |
{ | |
case ACPI_STATE_C1: | |
cx = &acpi_power->states[1]; | |
@@ -1116,9 +1131,9 @@ static void set_cx( | |
default: | |
if ( acpi_power->count >= ACPI_PROCESSOR_MAX_POWER ) | |
{ | |
- case ACPI_STATE_C0: | |
- printk(XENLOG_WARNING "CPU%u: C%d data ignored\n", | |
- acpi_power->cpu, xen_cx->type); | |
+ case ACPI_STATE_C0: | |
+ printk(XENLOG_WARNING "CPU%u: C%d data ignored\n", acpi_power->cpu, | |
+ xen_cx->type); | |
return; | |
} | |
cx = &acpi_power->states[acpi_power->count]; | |
@@ -1128,7 +1143,7 @@ static void set_cx( | |
cx->address = xen_cx->reg.address; | |
- switch ( xen_cx->reg.space_id ) | |
+ switch (xen_cx->reg.space_id) | |
{ | |
case ACPI_ADR_SPACE_FIXED_HARDWARE: | |
if ( xen_cx->reg.bit_width == VENDOR_INTEL && | |
@@ -1183,7 +1198,7 @@ int get_cpu_id(u32 acpi_id) | |
static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power) | |
{ | |
XEN_GUEST_HANDLE(xen_processor_cx_t) states; | |
- xen_processor_cx_t state; | |
+ xen_processor_cx_t state; | |
XEN_GUEST_HANDLE(xen_processor_csd_t) csd; | |
xen_processor_csd_t dp; | |
uint32_t i; | |
@@ -1192,29 +1207,30 @@ static void print_cx_pminfo(uint32_t cpu, struct xen_processor_power *power) | |
printk("\tcount = %d\n", power->count); | |
printk("\tflags: bm_cntl[%d], bm_chk[%d], has_cst[%d],\n" | |
"\t pwr_setup_done[%d], bm_rld_set[%d]\n", | |
- power->flags.bm_control, power->flags.bm_check, power->flags.has_cst, | |
- power->flags.power_setup_done, power->flags.bm_rld_set); | |
- | |
+ power->flags.bm_control, power->flags.bm_check, | |
+ power->flags.has_cst, power->flags.power_setup_done, | |
+ power->flags.bm_rld_set); | |
+ | |
states = power->states; | |
- | |
+ | |
for ( i = 0; i < power->count; i++ ) | |
{ | |
if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) ) | |
return; | |
- | |
+ | |
printk("\tstates[%d]:\n", i); | |
printk("\t\treg.space_id = %#x\n", state.reg.space_id); | |
printk("\t\treg.bit_width = %#x\n", state.reg.bit_width); | |
printk("\t\treg.bit_offset = %#x\n", state.reg.bit_offset); | |
printk("\t\treg.access_size = %#x\n", state.reg.access_size); | |
- printk("\t\treg.address = %#"PRIx64"\n", state.reg.address); | |
+ printk("\t\treg.address = %#" PRIx64 "\n", state.reg.address); | |
printk("\t\ttype = %d\n", state.type); | |
printk("\t\tlatency = %d\n", state.latency); | |
printk("\t\tpower = %d\n", state.power); | |
csd = state.dp; | |
printk("\t\tdp(@0x%p)\n", csd.p); | |
- | |
+ | |
if ( csd.p != NULL ) | |
{ | |
if ( unlikely(copy_from_guest(&dp, csd, 1)) ) | |
@@ -1313,7 +1329,7 @@ long set_cx_pminfo(uint32_t acpi_id, struct xen_processor_power *power) | |
dead_idle = acpi_dead_idle; | |
} | |
- | |
+ | |
return 0; | |
} | |
@@ -1321,8 +1337,8 @@ static void amd_cpuidle_init(struct acpi_processor_power *power) | |
{ | |
unsigned int i, nr = 0; | |
const struct cpuinfo_x86 *c = ¤t_cpu_data; | |
- const unsigned int ecx_req = CPUID5_ECX_EXTENSIONS_SUPPORTED | | |
- CPUID5_ECX_INTERRUPT_BREAK; | |
+ const unsigned int ecx_req = | |
+ CPUID5_ECX_EXTENSIONS_SUPPORTED | CPUID5_ECX_INTERRUPT_BREAK; | |
const struct acpi_processor_cx *cx = NULL; | |
static const struct acpi_processor_cx fam17[] = { | |
{ | |
@@ -1343,12 +1359,12 @@ static void amd_cpuidle_init(struct acpi_processor_power *power) | |
if ( vendor_override < 0 ) | |
return; | |
- switch ( c->x86 ) | |
+ switch (c->x86) | |
{ | |
case 0x18: | |
if ( boot_cpu_data.x86_vendor != X86_VENDOR_HYGON ) | |
{ | |
- default: | |
+ default: | |
vendor_override = -1; | |
return; | |
} | |
@@ -1423,8 +1439,8 @@ int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat) | |
struct acpi_processor_power *power = processor_powers[cpuid]; | |
uint64_t idle_usage = 0, idle_res = 0; | |
uint64_t last_state_update_tick, current_stime, current_tick; | |
- uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = { 0 }; | |
- uint64_t res[ACPI_PROCESSOR_MAX_POWER] = { 0 }; | |
+ uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = {0}; | |
+ uint64_t res[ACPI_PROCESSOR_MAX_POWER] = {0}; | |
unsigned int i, nr, nr_pc = 0, nr_cc = 0; | |
if ( power == NULL ) | |
@@ -1473,8 +1489,8 @@ int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat) | |
if ( last_state_idx >= 0 ) | |
{ | |
usage[last_state_idx]++; | |
- res[last_state_idx] += ticks_elapsed(last_state_update_tick, | |
- current_tick); | |
+ res[last_state_idx] += | |
+ ticks_elapsed(last_state_update_tick, current_tick); | |
stat->last = last_state_idx; | |
} | |
else | |
@@ -1489,12 +1505,13 @@ int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat) | |
get_hw_residencies(cpuid, &hw_res); | |
-#define PUT_xC(what, n) do { \ | |
- if ( stat->nr_##what >= n && \ | |
+#define PUT_xC(what, n) \ | |
+ do { \ | |
+ if ( stat->nr_##what >= n && \ | |
copy_to_guest_offset(stat->what, n - 1, &hw_res.what##n, 1) ) \ | |
- return -EFAULT; \ | |
- if ( hw_res.what##n ) \ | |
- nr_##what = n; \ | |
+ return -EFAULT; \ | |
+ if ( hw_res.what##n ) \ | |
+ nr_##what = n; \ | |
} while ( 0 ) | |
#define PUT_PC(n) PUT_xC(pc, n) | |
PUT_PC(2); | |
@@ -1548,12 +1565,13 @@ void cpuidle_disable_deep_cstate(void) | |
bool cpuidle_using_deep_cstate(void) | |
{ | |
- return xen_cpuidle && max_cstate > (local_apic_timer_c2_ok ? ACPI_STATE_C2 | |
- : ACPI_STATE_C1); | |
+ return xen_cpuidle && | |
+ max_cstate > | |
+ (local_apic_timer_c2_ok ? ACPI_STATE_C2 : ACPI_STATE_C1); | |
} | |
-static int cpu_callback( | |
- struct notifier_block *nfb, unsigned long action, void *hcpu) | |
+static int cpu_callback(struct notifier_block *nfb, unsigned long action, | |
+ void *hcpu) | |
{ | |
unsigned int cpu = (unsigned long)hcpu; | |
int rc = 0; | |
@@ -1562,7 +1580,7 @@ static int cpu_callback( | |
* Only hook on CPU_UP_PREPARE / CPU_ONLINE because a dead cpu may utilize | |
* the info to enter deep C-state. | |
*/ | |
- switch ( action ) | |
+ switch (action) | |
{ | |
case CPU_UP_PREPARE: | |
rc = cpuidle_init_cpu(cpu); | |
@@ -1581,9 +1599,7 @@ static int cpu_callback( | |
return !rc ? NOTIFY_DONE : notifier_from_errno(rc); | |
} | |
-static struct notifier_block cpu_nfb = { | |
- .notifier_call = cpu_callback | |
-}; | |
+static struct notifier_block cpu_nfb = {.notifier_call = cpu_callback}; | |
static int __init cpuidle_presmp_init(void) | |
{ | |
@@ -1599,4 +1615,3 @@ static int __init cpuidle_presmp_init(void) | |
return 0; | |
} | |
presmp_initcall(cpuidle_presmp_init); | |
- | |
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c b/xen/arch/x86/acpi/cpufreq/cpufreq.c | |
index 7086d1aa15..0ca034979a 100644 | |
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c | |
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c | |
@@ -43,13 +43,14 @@ | |
#include <acpi/acpi.h> | |
#include <acpi/cpufreq/cpufreq.h> | |
-enum { | |
+enum | |
+{ | |
UNDEFINED_CAPABLE = 0, | |
SYSTEM_INTEL_MSR_CAPABLE, | |
SYSTEM_IO_CAPABLE, | |
}; | |
-#define INTEL_MSR_RANGE (0xffffull) | |
+#define INTEL_MSR_RANGE (0xffffull) | |
struct acpi_cpufreq_data *cpufreq_drv_data[NR_CPUS]; | |
@@ -60,8 +61,8 @@ static int check_est_cpu(unsigned int cpuid) | |
{ | |
struct cpuinfo_x86 *cpu = &cpu_data[cpuid]; | |
- if (cpu->x86_vendor != X86_VENDOR_INTEL || | |
- !cpu_has(cpu, X86_FEATURE_EIST)) | |
+ if ( cpu->x86_vendor != X86_VENDOR_INTEL || | |
+ !cpu_has(cpu, X86_FEATURE_EIST) ) | |
return 0; | |
return 1; | |
@@ -74,8 +75,9 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) | |
perf = data->acpi_data; | |
- for (i=0; i<perf->state_count; i++) { | |
- if (value == perf->states[i].status) | |
+ for ( i = 0; i < perf->state_count; i++ ) | |
+ { | |
+ if ( value == perf->states[i].status ) | |
return data->freq_table[i].frequency; | |
} | |
return 0; | |
@@ -89,8 +91,9 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) | |
msr &= INTEL_MSR_RANGE; | |
perf = data->acpi_data; | |
- for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { | |
- if (msr == perf->states[data->freq_table[i].index].status) | |
+ for ( i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++ ) | |
+ { | |
+ if ( msr == perf->states[data->freq_table[i].index].status ) | |
return data->freq_table[i].frequency; | |
} | |
return data->freq_table[0].frequency; | |
@@ -98,7 +101,8 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) | |
static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) | |
{ | |
- switch (data->arch_cpu_flags) { | |
+ switch (data->arch_cpu_flags) | |
+ { | |
case SYSTEM_INTEL_MSR_CAPABLE: | |
return extract_msr(val, data); | |
case SYSTEM_IO_CAPABLE: | |
@@ -108,11 +112,13 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) | |
} | |
} | |
-struct msr_addr { | |
+struct msr_addr | |
+{ | |
u32 reg; | |
}; | |
-struct io_addr { | |
+struct io_addr | |
+{ | |
u16 port; | |
u8 bit_width; | |
}; | |
@@ -122,7 +128,8 @@ typedef union { | |
struct io_addr io; | |
} drv_addr_union; | |
-struct drv_cmd { | |
+struct drv_cmd | |
+{ | |
unsigned int type; | |
const cpumask_t *mask; | |
drv_addr_union addr; | |
@@ -135,13 +142,14 @@ static void do_drv_read(void *drvcmd) | |
cmd = (struct drv_cmd *)drvcmd; | |
- switch (cmd->type) { | |
+ switch (cmd->type) | |
+ { | |
case SYSTEM_INTEL_MSR_CAPABLE: | |
rdmsrl(cmd->addr.msr.reg, cmd->val); | |
break; | |
case SYSTEM_IO_CAPABLE: | |
- acpi_os_read_port((acpi_io_address)cmd->addr.io.port, | |
- &cmd->val, (u32)cmd->addr.io.bit_width); | |
+ acpi_os_read_port((acpi_io_address)cmd->addr.io.port, &cmd->val, | |
+ (u32)cmd->addr.io.bit_width); | |
break; | |
default: | |
break; | |
@@ -155,16 +163,17 @@ static void do_drv_write(void *drvcmd) | |
cmd = (struct drv_cmd *)drvcmd; | |
- switch (cmd->type) { | |
+ switch (cmd->type) | |
+ { | |
case SYSTEM_INTEL_MSR_CAPABLE: | |
rdmsrl(cmd->addr.msr.reg, msr_content); | |
- msr_content = (msr_content & ~INTEL_MSR_RANGE) | |
- | (cmd->val & INTEL_MSR_RANGE); | |
+ msr_content = | |
+ (msr_content & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); | |
wrmsrl(cmd->addr.msr.reg, msr_content); | |
break; | |
case SYSTEM_IO_CAPABLE: | |
- acpi_os_write_port((acpi_io_address)cmd->addr.io.port, | |
- cmd->val, (u32)cmd->addr.io.bit_width); | |
+ acpi_os_write_port((acpi_io_address)cmd->addr.io.port, cmd->val, | |
+ (u32)cmd->addr.io.bit_width); | |
break; | |
default: | |
break; | |
@@ -178,7 +187,7 @@ static void drv_read(struct drv_cmd *cmd) | |
ASSERT(cpumask_weight(cmd->mask) == 1); | |
/* to reduce IPI for the sake of performance */ | |
- if (likely(cpumask_test_cpu(smp_processor_id(), cmd->mask))) | |
+ if ( likely(cpumask_test_cpu(smp_processor_id(), cmd->mask)) ) | |
do_drv_read((void *)cmd); | |
else | |
on_selected_cpus(cmd->mask, do_drv_read, cmd, 1); | |
@@ -186,7 +195,7 @@ static void drv_read(struct drv_cmd *cmd) | |
static void drv_write(struct drv_cmd *cmd) | |
{ | |
- if (cpumask_equal(cmd->mask, cpumask_of(smp_processor_id()))) | |
+ if ( cpumask_equal(cmd->mask, cpumask_of(smp_processor_id())) ) | |
do_drv_write((void *)cmd); | |
else | |
on_selected_cpus(cmd->mask, do_drv_write, cmd, 1); | |
@@ -199,19 +208,20 @@ static u32 get_cur_val(const cpumask_t *mask) | |
struct drv_cmd cmd; | |
unsigned int cpu = smp_processor_id(); | |
- if (unlikely(cpumask_empty(mask))) | |
+ if ( unlikely(cpumask_empty(mask)) ) | |
return 0; | |
- if (!cpumask_test_cpu(cpu, mask)) | |
+ if ( !cpumask_test_cpu(cpu, mask) ) | |
cpu = cpumask_first(mask); | |
- if (cpu >= nr_cpu_ids || !cpu_online(cpu)) | |
+ if ( cpu >= nr_cpu_ids || !cpu_online(cpu) ) | |
return 0; | |
policy = per_cpu(cpufreq_cpu_policy, cpu); | |
- if (!policy || !cpufreq_drv_data[policy->cpu]) | |
- return 0; | |
+ if ( !policy || !cpufreq_drv_data[policy->cpu] ) | |
+ return 0; | |
- switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) { | |
+ switch (cpufreq_drv_data[policy->cpu]->arch_cpu_flags) | |
+ { | |
case SYSTEM_INTEL_MSR_CAPABLE: | |
cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | |
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; | |
@@ -232,9 +242,11 @@ static u32 get_cur_val(const cpumask_t *mask) | |
return cmd.val; | |
} | |
-struct perf_pair { | |
+struct perf_pair | |
+{ | |
union { | |
- struct { | |
+ struct | |
+ { | |
uint32_t lo; | |
uint32_t hi; | |
} split; | |
@@ -267,16 +279,16 @@ static void read_measured_perf_ctrs(void *_readin) | |
*/ | |
unsigned int get_measured_perf(unsigned int cpu, unsigned int flag) | |
{ | |
- struct cpufreq_policy *policy; | |
+ struct cpufreq_policy *policy; | |
struct perf_pair readin, cur, *saved; | |
unsigned int perf_percent; | |
unsigned int retval; | |
- if (!cpu_online(cpu)) | |
+ if ( !cpu_online(cpu) ) | |
return 0; | |
policy = per_cpu(cpufreq_cpu_policy, cpu); | |
- if (!policy || !policy->aperf_mperf) | |
+ if ( !policy || !policy->aperf_mperf ) | |
return 0; | |
switch (flag) | |
@@ -295,11 +307,13 @@ unsigned int get_measured_perf(unsigned int cpu, unsigned int flag) | |
return 0; | |
} | |
- if (cpu == smp_processor_id()) { | |
+ if ( cpu == smp_processor_id() ) | |
+ { | |
read_measured_perf_ctrs((void *)&readin); | |
- } else { | |
- on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs, | |
- &readin, 1); | |
+ } | |
+ else | |
+ { | |
+ on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs, &readin, 1); | |
} | |
cur.aperf.whole = readin.aperf.whole - saved->aperf.whole; | |
@@ -307,13 +321,14 @@ unsigned int get_measured_perf(unsigned int cpu, unsigned int flag) | |
saved->aperf.whole = readin.aperf.whole; | |
saved->mperf.whole = readin.mperf.whole; | |
- if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { | |
+ if ( unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole) ) | |
+ { | |
int shift_count = 7; | |
cur.aperf.whole >>= shift_count; | |
cur.mperf.whole >>= shift_count; | |
} | |
- if (cur.aperf.whole && cur.mperf.whole) | |
+ if ( cur.aperf.whole && cur.mperf.whole ) | |
perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; | |
else | |
perf_percent = 0; | |
@@ -329,16 +344,16 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |
struct acpi_cpufreq_data *data; | |
unsigned int freq; | |
- if (!cpu_online(cpu)) | |
+ if ( !cpu_online(cpu) ) | |
return 0; | |
policy = per_cpu(cpufreq_cpu_policy, cpu); | |
- if (!policy) | |
+ if ( !policy ) | |
return 0; | |
data = cpufreq_drv_data[policy->cpu]; | |
- if (unlikely(data == NULL || | |
- data->acpi_data == NULL || data->freq_table == NULL)) | |
+ if ( unlikely(data == NULL || data->acpi_data == NULL || | |
+ data->freq_table == NULL) ) | |
return 0; | |
freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); | |
@@ -357,9 +372,10 @@ static void feature_detect(void *info) | |
} | |
eax = cpuid_eax(6); | |
- if (eax & 0x2) { | |
+ if ( eax & 0x2 ) | |
+ { | |
policy->turbo = CPUFREQ_TURBO_ENABLED; | |
- if (cpufreq_verbose) | |
+ if ( cpufreq_verbose ) | |
printk(XENLOG_INFO "CPU%u: Turbo Mode detected and enabled\n", | |
smp_processor_id()); | |
} | |
@@ -371,9 +387,10 @@ static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, | |
unsigned int cur_freq; | |
unsigned int i; | |
- for (i=0; i<100; i++) { | |
+ for ( i = 0; i < 100; i++ ) | |
+ { | |
cur_freq = extract_freq(get_cur_val(mask), data); | |
- if (cur_freq == freq) | |
+ if ( cur_freq == freq ) | |
return 1; | |
udelay(10); | |
} | |
@@ -388,55 +405,56 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |
struct cpufreq_freqs freqs; | |
cpumask_t online_policy_cpus; | |
struct drv_cmd cmd; | |
- unsigned int next_state = 0; /* Index into freq_table */ | |
+ unsigned int next_state = 0; /* Index into freq_table */ | |
unsigned int next_perf_state = 0; /* Index into perf table */ | |
unsigned int j; | |
int result = 0; | |
- if (unlikely(data == NULL || | |
- data->acpi_data == NULL || data->freq_table == NULL)) { | |
+ if ( unlikely(data == NULL || data->acpi_data == NULL || | |
+ data->freq_table == NULL) ) | |
+ { | |
return -ENODEV; | |
} | |
- if (policy->turbo == CPUFREQ_TURBO_DISABLED) | |
- if (target_freq > policy->cpuinfo.second_max_freq) | |
+ if ( policy->turbo == CPUFREQ_TURBO_DISABLED ) | |
+ if ( target_freq > policy->cpuinfo.second_max_freq ) | |
target_freq = policy->cpuinfo.second_max_freq; | |
perf = data->acpi_data; | |
- result = cpufreq_frequency_table_target(policy, | |
- data->freq_table, | |
- target_freq, | |
- relation, &next_state); | |
- if (unlikely(result)) | |
+ result = cpufreq_frequency_table_target( | |
+ policy, data->freq_table, target_freq, relation, &next_state); | |
+ if ( unlikely(result) ) | |
return -ENODEV; | |
cpumask_and(&online_policy_cpus, &cpu_online_map, policy->cpus); | |
next_perf_state = data->freq_table[next_state].index; | |
- if (perf->state == next_perf_state) { | |
- if (unlikely(policy->resume)) | |
+ if ( perf->state == next_perf_state ) | |
+ { | |
+ if ( unlikely(policy->resume) ) | |
policy->resume = 0; | |
else | |
return 0; | |
} | |
- switch (data->arch_cpu_flags) { | |
+ switch (data->arch_cpu_flags) | |
+ { | |
case SYSTEM_INTEL_MSR_CAPABLE: | |
cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | |
cmd.addr.msr.reg = MSR_IA32_PERF_CTL; | |
- cmd.val = (u32) perf->states[next_perf_state].control; | |
+ cmd.val = (u32)perf->states[next_perf_state].control; | |
break; | |
case SYSTEM_IO_CAPABLE: | |
cmd.type = SYSTEM_IO_CAPABLE; | |
cmd.addr.io.port = perf->control_register.address; | |
cmd.addr.io.bit_width = perf->control_register.bit_width; | |
- cmd.val = (u32) perf->states[next_perf_state].control; | |
+ cmd.val = (u32)perf->states[next_perf_state].control; | |
break; | |
default: | |
return -ENODEV; | |
} | |
- if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) | |
+ if ( policy->shared_type != CPUFREQ_SHARED_TYPE_ANY ) | |
cmd.mask = &online_policy_cpus; | |
else | |
cmd.mask = cpumask_of(policy->cpu); | |
@@ -446,12 +464,13 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |
drv_write(&cmd); | |
- if (acpi_pstate_strict && !check_freqs(cmd.mask, freqs.new, data)) { | |
+ if ( acpi_pstate_strict && !check_freqs(cmd.mask, freqs.new, data) ) | |
+ { | |
printk(KERN_WARNING "Fail transfer to new freq %d\n", freqs.new); | |
return -EAGAIN; | |
} | |
- for_each_cpu(j, &online_policy_cpus) | |
+ for_each_cpu (j, &online_policy_cpus) | |
cpufreq_statistic_update(j, perf->state, next_perf_state); | |
perf->state = next_perf_state; | |
@@ -465,48 +484,52 @@ static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | |
struct acpi_cpufreq_data *data; | |
struct processor_performance *perf; | |
- if (!policy || !(data = cpufreq_drv_data[policy->cpu]) || | |
- !processor_pminfo[policy->cpu]) | |
+ if ( !policy || !(data = cpufreq_drv_data[policy->cpu]) || | |
+ !processor_pminfo[policy->cpu] ) | |
return -EINVAL; | |
perf = &processor_pminfo[policy->cpu]->perf; | |
- cpufreq_verify_within_limits(policy, 0, | |
- perf->states[perf->platform_limit].core_frequency * 1000); | |
+ cpufreq_verify_within_limits( | |
+ policy, 0, perf->states[perf->platform_limit].core_frequency * 1000); | |
return cpufreq_frequency_table_verify(policy, data->freq_table); | |
} | |
-static unsigned long | |
-acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) | |
+static unsigned long acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, | |
+ unsigned int cpu) | |
{ | |
struct processor_performance *perf = data->acpi_data; | |
- if (cpu_khz) { | |
+ if ( cpu_khz ) | |
+ { | |
/* search the closest match to cpu_khz */ | |
unsigned int i; | |
unsigned long freq; | |
unsigned long freqn = perf->states[0].core_frequency * 1000; | |
- for (i=0; i<(perf->state_count-1); i++) { | |
+ for ( i = 0; i < (perf->state_count - 1); i++ ) | |
+ { | |
freq = freqn; | |
- freqn = perf->states[i+1].core_frequency * 1000; | |
- if ((2 * cpu_khz) > (freqn + freq)) { | |
+ freqn = perf->states[i + 1].core_frequency * 1000; | |
+ if ( (2 * cpu_khz) > (freqn + freq) ) | |
+ { | |
perf->state = i; | |
return freq; | |
} | |
} | |
- perf->state = perf->state_count-1; | |
+ perf->state = perf->state_count - 1; | |
return freqn; | |
- } else { | |
+ } | |
+ else | |
+ { | |
/* assume CPU is at P0... */ | |
perf->state = 0; | |
return perf->states[0].core_frequency * 1000; | |
} | |
} | |
-static int | |
-acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
+static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
{ | |
unsigned int i; | |
unsigned int valid_states = 0; | |
@@ -517,7 +540,7 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
struct processor_performance *perf; | |
data = xzalloc(struct acpi_cpufreq_data); | |
- if (!data) | |
+ if ( !data ) | |
return -ENOMEM; | |
cpufreq_drv_data[cpu] = data; | |
@@ -527,18 +550,20 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
perf = data->acpi_data; | |
policy->shared_type = perf->shared_type; | |
- switch (perf->control_register.space_id) { | |
+ switch (perf->control_register.space_id) | |
+ { | |
case ACPI_ADR_SPACE_SYSTEM_IO: | |
- if (cpufreq_verbose) | |
+ if ( cpufreq_verbose ) | |
printk("xen_pminfo: @acpi_cpufreq_cpu_init," | |
"SYSTEM IO addr space\n"); | |
data->arch_cpu_flags = SYSTEM_IO_CAPABLE; | |
break; | |
case ACPI_ADR_SPACE_FIXED_HARDWARE: | |
- if (cpufreq_verbose) | |
+ if ( cpufreq_verbose ) | |
printk("xen_pminfo: @acpi_cpufreq_cpu_init," | |
"HARDWARE addr space\n"); | |
- if (!check_est_cpu(cpu)) { | |
+ if ( !check_est_cpu(cpu) ) | |
+ { | |
result = -ENODEV; | |
goto err_unreg; | |
} | |
@@ -549,28 +574,31 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
goto err_unreg; | |
} | |
- data->freq_table = xmalloc_array(struct cpufreq_frequency_table, | |
- (perf->state_count+1)); | |
- if (!data->freq_table) { | |
+ data->freq_table = | |
+ xmalloc_array(struct cpufreq_frequency_table, (perf->state_count + 1)); | |
+ if ( !data->freq_table ) | |
+ { | |
result = -ENOMEM; | |
goto err_unreg; | |
} | |
/* detect transition latency */ | |
policy->cpuinfo.transition_latency = 0; | |
- for (i=0; i<perf->state_count; i++) { | |
- if ((perf->states[i].transition_latency * 1000) > | |
- policy->cpuinfo.transition_latency) | |
+ for ( i = 0; i < perf->state_count; i++ ) | |
+ { | |
+ if ( (perf->states[i].transition_latency * 1000) > | |
+ policy->cpuinfo.transition_latency ) | |
policy->cpuinfo.transition_latency = | |
perf->states[i].transition_latency * 1000; | |
} | |
- policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR; | |
+ policy->governor = cpufreq_opt_governor ?: CPUFREQ_DEFAULT_GOVERNOR; | |
/* table init */ | |
- for (i=0; i<perf->state_count; i++) { | |
- if (i>0 && perf->states[i].core_frequency >= | |
- data->freq_table[valid_states-1].frequency / 1000) | |
+ for ( i = 0; i < perf->state_count; i++ ) | |
+ { | |
+ if ( i > 0 && perf->states[i].core_frequency >= | |
+ data->freq_table[valid_states - 1].frequency / 1000 ) | |
continue; | |
data->freq_table[valid_states].index = i; | |
@@ -582,10 +610,11 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
perf->state = 0; | |
result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | |
- if (result) | |
+ if ( result ) | |
goto err_freqfree; | |
- switch (perf->control_register.space_id) { | |
+ switch (perf->control_register.space_id) | |
+ { | |
case ACPI_ADR_SPACE_SYSTEM_IO: | |
/* Current speed is unknown and not detectable by IO port */ | |
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); | |
@@ -600,7 +629,7 @@ acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
/* Check for APERF/MPERF support in hardware | |
* also check for boost support */ | |
- if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) | |
+ if ( c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6 ) | |
on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1); | |
/* | |
@@ -624,7 +653,8 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |
{ | |
struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu]; | |
- if (data) { | |
+ if ( data ) | |
+ { | |
cpufreq_drv_data[policy->cpu] = NULL; | |
xfree(data->freq_table); | |
xfree(data); | |
@@ -634,23 +664,23 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |
} | |
static const struct cpufreq_driver __initconstrel acpi_cpufreq_driver = { | |
- .name = "acpi-cpufreq", | |
+ .name = "acpi-cpufreq", | |
.verify = acpi_cpufreq_verify, | |
.target = acpi_cpufreq_target, | |
- .init = acpi_cpufreq_cpu_init, | |
- .exit = acpi_cpufreq_cpu_exit, | |
+ .init = acpi_cpufreq_cpu_init, | |
+ .exit = acpi_cpufreq_cpu_exit, | |
}; | |
static int __init cpufreq_driver_init(void) | |
{ | |
int ret = 0; | |
- if ((cpufreq_controller == FREQCTL_xen) && | |
- (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) | |
+ if ( (cpufreq_controller == FREQCTL_xen) && | |
+ (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ) | |
ret = cpufreq_register_driver(&acpi_cpufreq_driver); | |
- else if ((cpufreq_controller == FREQCTL_xen) && | |
- (boot_cpu_data.x86_vendor & | |
- (X86_VENDOR_AMD | X86_VENDOR_HYGON))) | |
+ else if ( (cpufreq_controller == FREQCTL_xen) && | |
+ (boot_cpu_data.x86_vendor & | |
+ (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ) | |
ret = powernow_register_driver(); | |
return ret; | |
diff --git a/xen/arch/x86/acpi/cpufreq/powernow.c b/xen/arch/x86/acpi/cpufreq/powernow.c | |
index 72ab6a1eba..e782b3833d 100644 | |
--- a/xen/arch/x86/acpi/cpufreq/powernow.c | |
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c | |
@@ -37,20 +37,20 @@ | |
#include <acpi/acpi.h> | |
#include <acpi/cpufreq/cpufreq.h> | |
-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 | |
-#define CPB_CAPABLE 0x00000200 | |
-#define USE_HW_PSTATE 0x00000080 | |
-#define HW_PSTATE_MASK 0x00000007 | |
-#define HW_PSTATE_VALID_MASK 0x80000000 | |
-#define HW_PSTATE_MAX_MASK 0x000000f0 | |
-#define HW_PSTATE_MAX_SHIFT 4 | |
-#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ | |
-#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ | |
-#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ | |
-#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ | |
-#define MSR_HWCR_CPBDIS_MASK 0x02000000ULL | |
- | |
-#define ARCH_CPU_FLAG_RESUME 1 | |
+#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 | |
+#define CPB_CAPABLE 0x00000200 | |
+#define USE_HW_PSTATE 0x00000080 | |
+#define HW_PSTATE_MASK 0x00000007 | |
+#define HW_PSTATE_VALID_MASK 0x80000000 | |
+#define HW_PSTATE_MAX_MASK 0x000000f0 | |
+#define HW_PSTATE_MAX_SHIFT 4 | |
+#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ | |
+#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ | |
+#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ | |
+#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ | |
+#define MSR_HWCR_CPBDIS_MASK 0x02000000ULL | |
+ | |
+#define ARCH_CPU_FLAG_RESUME 1 | |
static void transition_pstate(void *pstate) | |
{ | |
@@ -61,24 +61,24 @@ static void update_cpb(void *data) | |
{ | |
struct cpufreq_policy *policy = (struct cpufreq_policy *)data; | |
- if (policy->turbo != CPUFREQ_TURBO_UNSUPPORTED) { | |
+ if ( policy->turbo != CPUFREQ_TURBO_UNSUPPORTED ) | |
+ { | |
uint64_t msr_content; | |
- | |
+ | |
rdmsrl(MSR_K8_HWCR, msr_content); | |
- if (policy->turbo == CPUFREQ_TURBO_ENABLED) | |
+ if ( policy->turbo == CPUFREQ_TURBO_ENABLED ) | |
msr_content &= ~MSR_HWCR_CPBDIS_MASK; | |
else | |
- msr_content |= MSR_HWCR_CPBDIS_MASK; | |
+ msr_content |= MSR_HWCR_CPBDIS_MASK; | |
wrmsrl(MSR_K8_HWCR, msr_content); | |
} | |
} | |
-static int powernow_cpufreq_update (int cpuid, | |
- struct cpufreq_policy *policy) | |
+static int powernow_cpufreq_update(int cpuid, struct cpufreq_policy *policy) | |
{ | |
- if (!cpumask_test_cpu(cpuid, &cpu_online_map)) | |
+ if ( !cpumask_test_cpu(cpuid, &cpu_online_map) ) | |
return -EINVAL; | |
on_selected_cpus(cpumask_of(cpuid), update_cpb, policy, 1); | |
@@ -87,53 +87,57 @@ static int powernow_cpufreq_update (int cpuid, | |
} | |
static int powernow_cpufreq_target(struct cpufreq_policy *policy, | |
- unsigned int target_freq, unsigned int relation) | |
+ unsigned int target_freq, | |
+ unsigned int relation) | |
{ | |
struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu]; | |
struct processor_performance *perf; | |
- unsigned int next_state; /* Index into freq_table */ | |
+ unsigned int next_state; /* Index into freq_table */ | |
unsigned int next_perf_state; /* Index into perf table */ | |
int result; | |
- if (unlikely(data == NULL || | |
- data->acpi_data == NULL || data->freq_table == NULL)) { | |
+ if ( unlikely(data == NULL || data->acpi_data == NULL || | |
+ data->freq_table == NULL) ) | |
+ { | |
return -ENODEV; | |
} | |
perf = data->acpi_data; | |
- result = cpufreq_frequency_table_target(policy, | |
- data->freq_table, | |
- target_freq, | |
- relation, &next_state); | |
- if (unlikely(result)) | |
+ result = cpufreq_frequency_table_target( | |
+ policy, data->freq_table, target_freq, relation, &next_state); | |
+ if ( unlikely(result) ) | |
return result; | |
next_perf_state = data->freq_table[next_state].index; | |
- if (perf->state == next_perf_state) { | |
- if (unlikely(data->arch_cpu_flags & ARCH_CPU_FLAG_RESUME)) | |
+ if ( perf->state == next_perf_state ) | |
+ { | |
+ if ( unlikely(data->arch_cpu_flags & ARCH_CPU_FLAG_RESUME) ) | |
data->arch_cpu_flags &= ~ARCH_CPU_FLAG_RESUME; | |
else | |
return 0; | |
} | |
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_HW && | |
- likely(policy->cpu == smp_processor_id())) { | |
+ if ( policy->shared_type == CPUFREQ_SHARED_TYPE_HW && | |
+ likely(policy->cpu == smp_processor_id()) ) | |
+ { | |
transition_pstate(&next_perf_state); | |
cpufreq_statistic_update(policy->cpu, perf->state, next_perf_state); | |
- } else { | |
+ } | |
+ else | |
+ { | |
cpumask_t online_policy_cpus; | |
unsigned int cpu; | |
cpumask_and(&online_policy_cpus, policy->cpus, &cpu_online_map); | |
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || | |
- unlikely(policy->cpu != smp_processor_id())) | |
+ if ( policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || | |
+ unlikely(policy->cpu != smp_processor_id()) ) | |
on_selected_cpus(&online_policy_cpus, transition_pstate, | |
&next_perf_state, 1); | |
else | |
transition_pstate(&next_perf_state); | |
- for_each_cpu(cpu, &online_policy_cpus) | |
+ for_each_cpu (cpu, &online_policy_cpus) | |
cpufreq_statistic_update(cpu, perf->state, next_perf_state); | |
} | |
@@ -149,7 +153,7 @@ static void amd_fixup_frequency(struct xen_processor_px *px) | |
int index = px->control & 0x00000007; | |
const struct cpuinfo_x86 *c = ¤t_cpu_data; | |
- if ((c->x86 != 0x10 || c->x86_model >= 10) && c->x86 != 0x11) | |
+ if ( (c->x86 != 0x10 || c->x86_model >= 10) && c->x86 != 0x11 ) | |
return; | |
rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | |
@@ -157,18 +161,19 @@ static void amd_fixup_frequency(struct xen_processor_px *px) | |
* MSR C001_0064+: | |
* Bit 63: PstateEn. Read-write. If set, the P-state is valid. | |
*/ | |
- if (!(hi & (1U << 31))) | |
+ if ( !(hi & (1U << 31)) ) | |
return; | |
fid = lo & 0x3f; | |
did = (lo >> 6) & 7; | |
- if (c->x86 == 0x10) | |
+ if ( c->x86 == 0x10 ) | |
px->core_frequency = (100 * (fid + 16)) >> did; | |
else | |
px->core_frequency = (100 * (fid + 8)) >> did; | |
} | |
-struct amd_cpu_data { | |
+struct amd_cpu_data | |
+{ | |
struct processor_performance *perf; | |
u32 max_hw_pstate; | |
}; | |
@@ -181,10 +186,10 @@ static void get_cpu_data(void *arg) | |
unsigned int i; | |
rdmsrl(MSR_PSTATE_CUR_LIMIT, msr_content); | |
- data->max_hw_pstate = (msr_content & HW_PSTATE_MAX_MASK) >> | |
- HW_PSTATE_MAX_SHIFT; | |
+ data->max_hw_pstate = | |
+ (msr_content & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; | |
- for (i = 0; i < perf->state_count && i <= data->max_hw_pstate; i++) | |
+ for ( i = 0; i < perf->state_count && i <= data->max_hw_pstate; i++ ) | |
amd_fixup_frequency(&perf->states[i]); | |
} | |
@@ -193,14 +198,14 @@ static int powernow_cpufreq_verify(struct cpufreq_policy *policy) | |
struct acpi_cpufreq_data *data; | |
struct processor_performance *perf; | |
- if (!policy || !(data = cpufreq_drv_data[policy->cpu]) || | |
- !processor_pminfo[policy->cpu]) | |
+ if ( !policy || !(data = cpufreq_drv_data[policy->cpu]) || | |
+ !processor_pminfo[policy->cpu] ) | |
return -EINVAL; | |
perf = &processor_pminfo[policy->cpu]->perf; | |
- cpufreq_verify_within_limits(policy, 0, | |
- perf->states[perf->platform_limit].core_frequency * 1000); | |
+ cpufreq_verify_within_limits( | |
+ policy, 0, perf->states[perf->platform_limit].core_frequency * 1000); | |
return cpufreq_frequency_table_verify(policy, data->freq_table); | |
} | |
@@ -217,9 +222,10 @@ static void feature_detect(void *info) | |
} | |
edx = cpuid_edx(CPUID_FREQ_VOLT_CAPABILITIES); | |
- if ((edx & CPB_CAPABLE) == CPB_CAPABLE) { | |
+ if ( (edx & CPB_CAPABLE) == CPB_CAPABLE ) | |
+ { | |
policy->turbo = CPUFREQ_TURBO_ENABLED; | |
- if (cpufreq_verbose) | |
+ if ( cpufreq_verbose ) | |
printk(XENLOG_INFO | |
"CPU%u: Core Boost/Turbo detected and enabled\n", | |
smp_processor_id()); | |
@@ -238,7 +244,7 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; | |
data = xzalloc(struct acpi_cpufreq_data); | |
- if (!data) | |
+ if ( !data ) | |
return -ENOMEM; | |
cpufreq_drv_data[cpu] = data; | |
@@ -248,58 +254,68 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
info.perf = perf = data->acpi_data; | |
policy->shared_type = perf->shared_type; | |
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || | |
- policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { | |
+ if ( policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || | |
+ policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ) | |
+ { | |
cpumask_set_cpu(cpu, policy->cpus); | |
- if (cpumask_weight(policy->cpus) != 1) { | |
+ if ( cpumask_weight(policy->cpus) != 1 ) | |
+ { | |
printk(XENLOG_WARNING "Unsupported sharing type %d (%u CPUs)\n", | |
policy->shared_type, cpumask_weight(policy->cpus)); | |
result = -ENODEV; | |
goto err_unreg; | |
} | |
- } else { | |
+ } | |
+ else | |
+ { | |
cpumask_copy(policy->cpus, cpumask_of(cpu)); | |
} | |
/* capability check */ | |
- if (perf->state_count <= 1) { | |
+ if ( perf->state_count <= 1 ) | |
+ { | |
printk("No P-States\n"); | |
result = -ENODEV; | |
goto err_unreg; | |
} | |
- if (perf->control_register.space_id != perf->status_register.space_id) { | |
+ if ( perf->control_register.space_id != perf->status_register.space_id ) | |
+ { | |
result = -ENODEV; | |
goto err_unreg; | |
} | |
- data->freq_table = xmalloc_array(struct cpufreq_frequency_table, | |
- (perf->state_count+1)); | |
- if (!data->freq_table) { | |
+ data->freq_table = | |
+ xmalloc_array(struct cpufreq_frequency_table, (perf->state_count + 1)); | |
+ if ( !data->freq_table ) | |
+ { | |
result = -ENOMEM; | |
goto err_unreg; | |
} | |
/* detect transition latency */ | |
policy->cpuinfo.transition_latency = 0; | |
- for (i=0; i<perf->state_count; i++) { | |
- if ((perf->states[i].transition_latency * 1000) > | |
- policy->cpuinfo.transition_latency) | |
+ for ( i = 0; i < perf->state_count; i++ ) | |
+ { | |
+ if ( (perf->states[i].transition_latency * 1000) > | |
+ policy->cpuinfo.transition_latency ) | |
policy->cpuinfo.transition_latency = | |
perf->states[i].transition_latency * 1000; | |
} | |
- policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR; | |
+ policy->governor = cpufreq_opt_governor ?: CPUFREQ_DEFAULT_GOVERNOR; | |
on_selected_cpus(cpumask_of(cpu), get_cpu_data, &info, 1); | |
/* table init */ | |
- for (i = 0; i < perf->state_count && i <= info.max_hw_pstate; i++) { | |
- if (i > 0 && perf->states[i].core_frequency >= | |
- data->freq_table[valid_states-1].frequency / 1000) | |
+ for ( i = 0; i < perf->state_count && i <= info.max_hw_pstate; i++ ) | |
+ { | |
+ if ( i > 0 && perf->states[i].core_frequency >= | |
+ data->freq_table[valid_states - 1].frequency / 1000 ) | |
continue; | |
- data->freq_table[valid_states].index = perf->states[i].control & HW_PSTATE_MASK; | |
+ data->freq_table[valid_states].index = | |
+ perf->states[i].control & HW_PSTATE_MASK; | |
data->freq_table[valid_states].frequency = | |
perf->states[i].core_frequency * 1000; | |
valid_states++; | |
@@ -308,12 +324,12 @@ static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
perf->state = 0; | |
result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | |
- if (result) | |
+ if ( result ) | |
goto err_freqfree; | |
- if (c->cpuid_level >= 6) | |
+ if ( c->cpuid_level >= 6 ) | |
on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1); | |
- | |
+ | |
/* | |
* the first call to ->target() should result in us actually | |
* writing something to the appropriate registers. | |
@@ -336,7 +352,8 @@ static int powernow_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |
{ | |
struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu]; | |
- if (data) { | |
+ if ( data ) | |
+ { | |
cpufreq_drv_data[policy->cpu] = NULL; | |
xfree(data->freq_table); | |
xfree(data); | |
@@ -346,30 +363,30 @@ static int powernow_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |
} | |
static const struct cpufreq_driver __initconstrel powernow_cpufreq_driver = { | |
- .name = "powernow", | |
+ .name = "powernow", | |
.verify = powernow_cpufreq_verify, | |
.target = powernow_cpufreq_target, | |
- .init = powernow_cpufreq_cpu_init, | |
- .exit = powernow_cpufreq_cpu_exit, | |
- .update = powernow_cpufreq_update | |
-}; | |
+ .init = powernow_cpufreq_cpu_init, | |
+ .exit = powernow_cpufreq_cpu_exit, | |
+ .update = powernow_cpufreq_update}; | |
unsigned int __init powernow_register_driver() | |
{ | |
unsigned int i, ret = 0; | |
- for_each_online_cpu(i) { | |
+ for_each_online_cpu (i) | |
+ { | |
struct cpuinfo_x86 *c = &cpu_data[i]; | |
- if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON))) | |
+ if ( !(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ) | |
ret = -ENODEV; | |
else | |
{ | |
u32 eax, ebx, ecx, edx; | |
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | |
- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE) | |
+ if ( (edx & USE_HW_PSTATE) != USE_HW_PSTATE ) | |
ret = -ENODEV; | |
} | |
- if (ret) | |
+ if ( ret ) | |
return ret; | |
} | |
diff --git a/xen/arch/x86/acpi/cpuidle_menu.c b/xen/arch/x86/acpi/cpuidle_menu.c | |
index 6ff5fb8ff2..488197ae40 100644 | |
--- a/xen/arch/x86/acpi/cpuidle_menu.c | |
+++ b/xen/arch/x86/acpi/cpuidle_menu.c | |
@@ -1,6 +1,6 @@ | |
/* | |
* cpuidle_menu - menu governor for cpu idle, main idea come from Linux. | |
- * drivers/cpuidle/governors/menu.c | |
+ * drivers/cpuidle/governors/menu.c | |
* | |
* Copyright (C) 2006-2007 Adam Belay <[email protected]> | |
* Copyright (C) 2007, 2008 Intel Corporation | |
@@ -92,23 +92,24 @@ | |
* measured idle time. | |
*/ | |
-struct perf_factor{ | |
- s_time_t time_stamp; | |
- s_time_t duration; | |
+struct perf_factor | |
+{ | |
+ s_time_t time_stamp; | |
+ s_time_t duration; | |
unsigned int irq_count_stamp; | |
unsigned int irq_sum; | |
}; | |
struct menu_device | |
{ | |
- int last_state_idx; | |
- unsigned int expected_us; | |
- u64 predicted_us; | |
- u64 latency_factor; | |
- unsigned int measured_us; | |
- unsigned int exit_us; | |
- unsigned int bucket; | |
- u64 correction_factor[BUCKETS]; | |
+ int last_state_idx; | |
+ unsigned int expected_us; | |
+ u64 predicted_us; | |
+ u64 latency_factor; | |
+ unsigned int measured_us; | |
+ unsigned int exit_us; | |
+ unsigned int bucket; | |
+ u64 correction_factor[BUCKETS]; | |
struct perf_factor pf; | |
}; | |
@@ -116,19 +117,19 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices); | |
static inline int which_bucket(unsigned int duration) | |
{ | |
- int bucket = 0; | |
- | |
- if (duration < 10) | |
- return bucket; | |
- if (duration < 100) | |
- return bucket + 1; | |
- if (duration < 1000) | |
- return bucket + 2; | |
- if (duration < 10000) | |
- return bucket + 3; | |
- if (duration < 100000) | |
- return bucket + 4; | |
- return bucket + 5; | |
+ int bucket = 0; | |
+ | |
+ if ( duration < 10 ) | |
+ return bucket; | |
+ if ( duration < 100 ) | |
+ return bucket + 1; | |
+ if ( duration < 1000 ) | |
+ return bucket + 2; | |
+ if ( duration < 10000 ) | |
+ return bucket + 3; | |
+ if ( duration < 100000 ) | |
+ return bucket + 4; | |
+ return bucket + 5; | |
} | |
/* | |
@@ -139,35 +140,39 @@ static inline int which_bucket(unsigned int duration) | |
*/ | |
/* 5 milisec sampling period */ | |
-#define SAMPLING_PERIOD 5000000 | |
+#define SAMPLING_PERIOD 5000000 | |
/* for I/O interrupt, we give 8x multiplier compared to C state latency*/ | |
-#define IO_MULTIPLIER 8 | |
+#define IO_MULTIPLIER 8 | |
static inline s_time_t avg_intr_interval_us(void) | |
{ | |
struct menu_device *data = &this_cpu(menu_devices); | |
- s_time_t duration, now; | |
- s_time_t avg_interval; | |
+ s_time_t duration, now; | |
+ s_time_t avg_interval; | |
unsigned int irq_sum; | |
now = NOW(); | |
- duration = (data->pf.duration + (now - data->pf.time_stamp) | |
- * (DECAY - 1)) / DECAY; | |
+ duration = | |
+ (data->pf.duration + (now - data->pf.time_stamp) * (DECAY - 1)) / | |
+ DECAY; | |
- irq_sum = (data->pf.irq_sum + (this_cpu(irq_count) - data->pf.irq_count_stamp) | |
- * (DECAY - 1)) / DECAY; | |
+ irq_sum = | |
+ (data->pf.irq_sum + | |
+ (this_cpu(irq_count) - data->pf.irq_count_stamp) * (DECAY - 1)) / | |
+ DECAY; | |
- if (irq_sum == 0) | |
+ if ( irq_sum == 0 ) | |
/* no irq recently, so return a big enough interval: 1 sec */ | |
avg_interval = 1000000; | |
else | |
avg_interval = duration / irq_sum / 1000; /* in us */ | |
- if ( duration >= SAMPLING_PERIOD){ | |
+ if ( duration >= SAMPLING_PERIOD ) | |
+ { | |
data->pf.time_stamp = now; | |
data->pf.duration = duration; | |
- data->pf.irq_count_stamp= this_cpu(irq_count); | |
+ data->pf.irq_count_stamp = this_cpu(irq_count); | |
data->pf.irq_sum = irq_sum; | |
} | |
@@ -189,7 +194,7 @@ static int menu_select(struct acpi_processor_power *power) | |
{ | |
struct menu_device *data = &this_cpu(menu_devices); | |
int i; | |
- s_time_t io_interval; | |
+ s_time_t io_interval; | |
/* TBD: Change to 0 if C0(polling mode) support is added later*/ | |
data->last_state_idx = CPUIDLE_DRIVER_STATE_START; | |
@@ -203,31 +208,30 @@ static int menu_select(struct acpi_processor_power *power) | |
io_interval = avg_intr_interval_us(); | |
data->latency_factor = DIV_ROUND( | |
- data->latency_factor * (DECAY - 1) + data->measured_us, | |
- DECAY); | |
+ data->latency_factor * (DECAY - 1) + data->measured_us, DECAY); | |
/* | |
* if the correction factor is 0 (eg first time init or cpu hotplug | |
* etc), we actually want to start out with a unity factor. | |
*/ | |
- if (data->correction_factor[data->bucket] == 0) | |
+ if ( data->correction_factor[data->bucket] == 0 ) | |
data->correction_factor[data->bucket] = RESOLUTION * DECAY; | |
/* Make sure to round up for half microseconds */ | |
- data->predicted_us = DIV_ROUND( | |
- data->expected_us * data->correction_factor[data->bucket], | |
- RESOLUTION * DECAY); | |
+ data->predicted_us = | |
+ DIV_ROUND(data->expected_us * data->correction_factor[data->bucket], | |
+ RESOLUTION * DECAY); | |
/* find the deepest idle state that satisfies our constraints */ | |
for ( i = CPUIDLE_DRIVER_STATE_START + 1; i < power->count; i++ ) | |
{ | |
struct acpi_processor_cx *s = &power->states[i]; | |
- if (s->target_residency > data->predicted_us) | |
+ if ( s->target_residency > data->predicted_us ) | |
break; | |
- if (s->latency * IO_MULTIPLIER > io_interval) | |
+ if ( s->latency * IO_MULTIPLIER > io_interval ) | |
break; | |
- if (s->latency * LATENCY_MULTIPLIER > data->latency_factor) | |
+ if ( s->latency * LATENCY_MULTIPLIER > data->latency_factor ) | |
break; | |
/* TBD: we need to check the QoS requirment in future */ | |
data->exit_us = s->latency; | |
@@ -248,15 +252,14 @@ static void menu_reflect(struct acpi_processor_power *power) | |
* We correct for the exit latency; we are assuming here that the | |
* exit latency happens after the event that we're interested in. | |
*/ | |
- if (data->measured_us > data->exit_us) | |
+ if ( data->measured_us > data->exit_us ) | |
data->measured_us -= data->exit_us; | |
/* update our correction ratio */ | |
- new_factor = data->correction_factor[data->bucket] | |
- * (DECAY - 1) / DECAY; | |
+ new_factor = data->correction_factor[data->bucket] * (DECAY - 1) / DECAY; | |
- if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING) | |
+ if ( data->expected_us > 0 && data->measured_us < MAX_INTERESTING ) | |
new_factor += RESOLUTION * data->measured_us / data->expected_us; | |
else | |
/* | |
@@ -269,7 +272,7 @@ static void menu_reflect(struct acpi_processor_power *power) | |
* We don't want 0 as factor; we always want at least | |
* a tiny bit of estimated time. | |
*/ | |
- if (new_factor == 0) | |
+ if ( new_factor == 0 ) | |
new_factor = 1; | |
data->correction_factor[data->bucket] = new_factor; | |
@@ -282,13 +285,12 @@ static int menu_enable_device(struct acpi_processor_power *power) | |
return 0; | |
} | |
-static struct cpuidle_governor menu_governor = | |
-{ | |
- .name = "menu", | |
- .rating = 20, | |
- .enable = menu_enable_device, | |
- .select = menu_select, | |
- .reflect = menu_reflect, | |
+static struct cpuidle_governor menu_governor = { | |
+ .name = "menu", | |
+ .rating = 20, | |
+ .enable = menu_enable_device, | |
+ .select = menu_select, | |
+ .reflect = menu_reflect, | |
}; | |
struct cpuidle_governor *cpuidle_current_governor = &menu_governor; | |
diff --git a/xen/arch/x86/acpi/lib.c b/xen/arch/x86/acpi/lib.c | |
index 265b9ad819..ecd31066ec 100644 | |
--- a/xen/arch/x86/acpi/lib.c | |
+++ b/xen/arch/x86/acpi/lib.c | |
@@ -29,8 +29,8 @@ u32 __read_mostly acpi_smi_cmd; | |
u8 __read_mostly acpi_enable_value; | |
u8 __read_mostly acpi_disable_value; | |
-u32 __read_mostly x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = | |
- {[0 ... MAX_MADT_ENTRIES - 1] = BAD_APICID }; | |
+u32 __read_mostly x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = { | |
+ [0 ... MAX_MADT_ENTRIES - 1] = BAD_APICID}; | |
/* | |
* Important Safety Note: The fixed ACPI page numbers are *subtracted* | |
@@ -39,87 +39,88 @@ u32 __read_mostly x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = | |
*/ | |
char *__acpi_map_table(paddr_t phys, unsigned long size) | |
{ | |
- unsigned long base, offset, mapped_size; | |
- int idx; | |
- | |
- /* XEN: RAM holes above 1MB are not permanently mapped. */ | |
- if ((phys + size) <= (1 * 1024 * 1024)) | |
- return __va(phys); | |
- | |
- offset = phys & (PAGE_SIZE - 1); | |
- mapped_size = PAGE_SIZE - offset; | |
- set_fixmap(FIX_ACPI_END, phys); | |
- base = __fix_to_virt(FIX_ACPI_END); | |
- | |
- /* | |
- * Most cases can be covered by the below. | |
- */ | |
- idx = FIX_ACPI_END; | |
- while (mapped_size < size) { | |
- if (--idx < FIX_ACPI_BEGIN) | |
- return NULL; /* cannot handle this */ | |
- phys += PAGE_SIZE; | |
- set_fixmap(idx, phys); | |
- mapped_size += PAGE_SIZE; | |
- } | |
- | |
- return ((char *) base + offset); | |
+ unsigned long base, offset, mapped_size; | |
+ int idx; | |
+ | |
+ /* XEN: RAM holes above 1MB are not permanently mapped. */ | |
+ if ( (phys + size) <= (1 * 1024 * 1024) ) | |
+ return __va(phys); | |
+ | |
+ offset = phys & (PAGE_SIZE - 1); | |
+ mapped_size = PAGE_SIZE - offset; | |
+ set_fixmap(FIX_ACPI_END, phys); | |
+ base = __fix_to_virt(FIX_ACPI_END); | |
+ | |
+ /* | |
+ * Most cases can be covered by the below. | |
+ */ | |
+ idx = FIX_ACPI_END; | |
+ while ( mapped_size < size ) | |
+ { | |
+ if ( --idx < FIX_ACPI_BEGIN ) | |
+ return NULL; /* cannot handle this */ | |
+ phys += PAGE_SIZE; | |
+ set_fixmap(idx, phys); | |
+ mapped_size += PAGE_SIZE; | |
+ } | |
+ | |
+ return ((char *)base + offset); | |
} | |
unsigned int acpi_get_processor_id(unsigned int cpu) | |
{ | |
- unsigned int acpiid, apicid; | |
+ unsigned int acpiid, apicid; | |
- if ((apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID) | |
- return INVALID_ACPIID; | |
+ if ( (apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID ) | |
+ return INVALID_ACPIID; | |
- for (acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++) | |
- if (x86_acpiid_to_apicid[acpiid] == apicid) | |
- return acpiid; | |
+ for ( acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++ ) | |
+ if ( x86_acpiid_to_apicid[acpiid] == apicid ) | |
+ return acpiid; | |
- return INVALID_ACPIID; | |
+ return INVALID_ACPIID; | |
} | |
static void get_mwait_ecx(void *info) | |
{ | |
- *(u32 *)info = cpuid_ecx(CPUID_MWAIT_LEAF); | |
+ *(u32 *)info = cpuid_ecx(CPUID_MWAIT_LEAF); | |
} | |
int arch_acpi_set_pdc_bits(u32 acpi_id, u32 *pdc, u32 mask) | |
{ | |
- unsigned int cpu = get_cpu_id(acpi_id); | |
- struct cpuinfo_x86 *c; | |
- u32 ecx; | |
- | |
- if (!(acpi_id + 1)) | |
- c = &boot_cpu_data; | |
- else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) | |
- return -EINVAL; | |
- else | |
- c = cpu_data + cpu; | |
- | |
- pdc[2] |= ACPI_PDC_C_CAPABILITY_SMP & mask; | |
- | |
- if (cpu_has(c, X86_FEATURE_EIST)) | |
- pdc[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP & mask; | |
- | |
- if (cpu_has(c, X86_FEATURE_ACPI)) | |
- pdc[2] |= ACPI_PDC_T_FFH & mask; | |
- | |
- /* | |
- * If mwait/monitor or its break-on-interrupt extension are | |
- * unsupported, Cx_FFH will be disabled. | |
- */ | |
- if (!cpu_has(c, X86_FEATURE_MONITOR) || | |
- c->cpuid_level < CPUID_MWAIT_LEAF) | |
- ecx = 0; | |
- else if (c == &boot_cpu_data || cpu == smp_processor_id()) | |
- ecx = cpuid_ecx(CPUID_MWAIT_LEAF); | |
- else | |
- on_selected_cpus(cpumask_of(cpu), get_mwait_ecx, &ecx, 1); | |
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | |
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | |
- pdc[2] &= ~(ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH); | |
- | |
- return 0; | |
+ unsigned int cpu = get_cpu_id(acpi_id); | |
+ struct cpuinfo_x86 *c; | |
+ u32 ecx; | |
+ | |
+ if ( !(acpi_id + 1) ) | |
+ c = &boot_cpu_data; | |
+ else if ( cpu >= nr_cpu_ids || !cpu_online(cpu) ) | |
+ return -EINVAL; | |
+ else | |
+ c = cpu_data + cpu; | |
+ | |
+ pdc[2] |= ACPI_PDC_C_CAPABILITY_SMP & mask; | |
+ | |
+ if ( cpu_has(c, X86_FEATURE_EIST) ) | |
+ pdc[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP & mask; | |
+ | |
+ if ( cpu_has(c, X86_FEATURE_ACPI) ) | |
+ pdc[2] |= ACPI_PDC_T_FFH & mask; | |
+ | |
+ /* | |
+ * If mwait/monitor or its break-on-interrupt extension are | |
+ * unsupported, Cx_FFH will be disabled. | |
+ */ | |
+ if ( !cpu_has(c, X86_FEATURE_MONITOR) || | |
+ c->cpuid_level < CPUID_MWAIT_LEAF ) | |
+ ecx = 0; | |
+ else if ( c == &boot_cpu_data || cpu == smp_processor_id() ) | |
+ ecx = cpuid_ecx(CPUID_MWAIT_LEAF); | |
+ else | |
+ on_selected_cpus(cpumask_of(cpu), get_mwait_ecx, &ecx, 1); | |
+ if ( !(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | |
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ) | |
+ pdc[2] &= ~(ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH); | |
+ | |
+ return 0; | |
} | |
diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c | |
index aecc754fdb..7a1d0e92d5 100644 | |
--- a/xen/arch/x86/acpi/power.c | |
+++ b/xen/arch/x86/acpi/power.c | |
@@ -80,7 +80,7 @@ static int device_power_down(void) | |
static void device_power_up(enum dev_power_saved saved) | |
{ | |
- switch ( saved ) | |
+ switch (saved) | |
{ | |
case SAVED_ALL: | |
case SAVED_LAPIC: | |
@@ -119,7 +119,7 @@ static void freeze_domains(void) | |
* first which is required for correctness (as only dom0 can add domains to | |
* the domain list). Otherwise we could miss concurrently-created domains. | |
*/ | |
- for_each_domain ( d ) | |
+ for_each_domain (d) | |
domain_pause(d); | |
rcu_read_unlock(&domlist_read_lock); | |
} | |
@@ -129,7 +129,7 @@ static void thaw_domains(void) | |
struct domain *d; | |
rcu_read_lock(&domlist_read_lock); | |
- for_each_domain ( d ) | |
+ for_each_domain (d) | |
{ | |
restore_vcpu_affinity(d); | |
domain_unpause(d); | |
@@ -144,8 +144,8 @@ static void acpi_sleep_prepare(u32 state) | |
if ( state != ACPI_STATE_S3 ) | |
return; | |
- wakeup_vector_va = __acpi_map_table( | |
- acpi_sinfo.wakeup_vector, sizeof(uint64_t)); | |
+ wakeup_vector_va = | |
+ __acpi_map_table(acpi_sinfo.wakeup_vector, sizeof(uint64_t)); | |
/* TBoot will set resume vector itself (when it is safe to do so). */ | |
if ( tboot_in_measured_env() ) | |
@@ -157,7 +157,9 @@ static void acpi_sleep_prepare(u32 state) | |
*(uint64_t *)wakeup_vector_va = bootsym_phys(wakeup_start); | |
} | |
-static void acpi_sleep_post(u32 state) {} | |
+static void acpi_sleep_post(u32 state) | |
+{ | |
+} | |
/* Main interface to do xen specific suspend/resume */ | |
static int enter_state(u32 state) | |
@@ -219,7 +221,7 @@ static int enter_state(u32 state) | |
ACPI_FLUSH_CPU_CACHE(); | |
- switch ( state ) | |
+ switch (state) | |
{ | |
case ACPI_STATE_S3: | |
do_suspend_lowlevel(); | |
@@ -262,7 +264,7 @@ static int enter_state(u32 state) | |
ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr); | |
spec_ctrl_exit_idle(ci); | |
- done: | |
+done: | |
spin_debug_enable(); | |
local_irq_restore(flags); | |
acpi_sleep_post(state); | |
@@ -270,7 +272,7 @@ static int enter_state(u32 state) | |
BUG(); | |
cpufreq_add_cpu(0); | |
- enable_cpu: | |
+enable_cpu: | |
rcu_barrier(); | |
mtrr_aps_sync_begin(); | |
enable_nonboot_cpus(); | |
@@ -367,11 +369,11 @@ static void tboot_sleep(u8 sleep_state) | |
{ | |
uint32_t shutdown_type; | |
-#define TB_COPY_GAS(tbg, g) \ | |
- tbg.space_id = g.space_id; \ | |
- tbg.bit_width = g.bit_width; \ | |
- tbg.bit_offset = g.bit_offset; \ | |
- tbg.access_width = g.access_width; \ | |
+#define TB_COPY_GAS(tbg, g) \ | |
+ tbg.space_id = g.space_id; \ | |
+ tbg.bit_width = g.bit_width; \ | |
+ tbg.bit_offset = g.bit_offset; \ | |
+ tbg.access_width = g.access_width; \ | |
tbg.address = g.address; | |
/* sizes are not same (due to packing) so copy each one */ | |
@@ -388,21 +390,21 @@ static void tboot_sleep(u8 sleep_state) | |
g_tboot_shared->acpi_sinfo.wakeup_vector = acpi_sinfo.wakeup_vector; | |
g_tboot_shared->acpi_sinfo.vector_width = acpi_sinfo.vector_width; | |
g_tboot_shared->acpi_sinfo.kernel_s3_resume_vector = | |
- bootsym_phys(wakeup_start); | |
+ bootsym_phys(wakeup_start); | |
- switch ( sleep_state ) | |
+ switch (sleep_state) | |
{ | |
- case ACPI_STATE_S3: | |
- shutdown_type = TB_SHUTDOWN_S3; | |
- break; | |
- case ACPI_STATE_S4: | |
- shutdown_type = TB_SHUTDOWN_S4; | |
- break; | |
- case ACPI_STATE_S5: | |
- shutdown_type = TB_SHUTDOWN_S5; | |
- break; | |
- default: | |
- return; | |
+ case ACPI_STATE_S3: | |
+ shutdown_type = TB_SHUTDOWN_S3; | |
+ break; | |
+ case ACPI_STATE_S4: | |
+ shutdown_type = TB_SHUTDOWN_S4; | |
+ break; | |
+ case ACPI_STATE_S5: | |
+ shutdown_type = TB_SHUTDOWN_S5; | |
+ break; | |
+ default: | |
+ return; | |
} | |
tboot_shutdown(shutdown_type); | |
@@ -432,7 +434,8 @@ acpi_status acpi_enter_sleep_state(u8 sleep_state) | |
*/ | |
u8 sleep_type_value = | |
((acpi_sinfo.sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & | |
- ACPI_X_SLEEP_TYPE_MASK) | ACPI_X_SLEEP_ENABLE; | |
+ ACPI_X_SLEEP_TYPE_MASK) | | |
+ ACPI_X_SLEEP_ENABLE; | |
status = acpi_hw_register_write(ACPI_REGISTER_SLEEP_CONTROL, | |
sleep_type_value); | |
diff --git a/xen/arch/x86/acpi/suspend.c b/xen/arch/x86/acpi/suspend.c | |
index ba9d2e13a7..55aa775bf2 100644 | |
--- a/xen/arch/x86/acpi/suspend.c | |
+++ b/xen/arch/x86/acpi/suspend.c | |
@@ -38,7 +38,6 @@ void save_rest_processor_state(void) | |
saved_xcr0 = get_xcr0(); | |
} | |
- | |
void restore_rest_processor_state(void) | |
{ | |
load_TR(); | |
diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c | |
index ce2b4302e6..56cda5b667 100644 | |
--- a/xen/arch/x86/alternative.c | |
+++ b/xen/arch/x86/alternative.c | |
@@ -27,80 +27,68 @@ | |
#include <asm/nops.h> | |
#include <xen/livepatch.h> | |
-#define MAX_PATCH_LEN (255-1) | |
+#define MAX_PATCH_LEN (255 - 1) | |
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | |
#ifdef K8_NOP1 | |
static const unsigned char k8nops[] init_or_livepatch_const = { | |
- K8_NOP1, | |
- K8_NOP2, | |
- K8_NOP3, | |
- K8_NOP4, | |
- K8_NOP5, | |
- K8_NOP6, | |
- K8_NOP7, | |
- K8_NOP8, | |
- K8_NOP9, | |
+ K8_NOP1, K8_NOP2, K8_NOP3, K8_NOP4, K8_NOP5, | |
+ K8_NOP6, K8_NOP7, K8_NOP8, K8_NOP9, | |
}; | |
-static const unsigned char * const k8_nops[ASM_NOP_MAX+1] init_or_livepatch_constrel = { | |
- NULL, | |
- k8nops, | |
- k8nops + 1, | |
- k8nops + 1 + 2, | |
- k8nops + 1 + 2 + 3, | |
- k8nops + 1 + 2 + 3 + 4, | |
- k8nops + 1 + 2 + 3 + 4 + 5, | |
- k8nops + 1 + 2 + 3 + 4 + 5 + 6, | |
- k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
- k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | |
+static const unsigned char | |
+ *const k8_nops[ASM_NOP_MAX + 1] init_or_livepatch_constrel = { | |
+ NULL, | |
+ k8nops, | |
+ k8nops + 1, | |
+ k8nops + 1 + 2, | |
+ k8nops + 1 + 2 + 3, | |
+ k8nops + 1 + 2 + 3 + 4, | |
+ k8nops + 1 + 2 + 3 + 4 + 5, | |
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6, | |
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | |
}; | |
#endif | |
#ifdef P6_NOP1 | |
static const unsigned char p6nops[] init_or_livepatch_const = { | |
- P6_NOP1, | |
- P6_NOP2, | |
- P6_NOP3, | |
- P6_NOP4, | |
- P6_NOP5, | |
- P6_NOP6, | |
- P6_NOP7, | |
- P6_NOP8, | |
- P6_NOP9, | |
+ P6_NOP1, P6_NOP2, P6_NOP3, P6_NOP4, P6_NOP5, | |
+ P6_NOP6, P6_NOP7, P6_NOP8, P6_NOP9, | |
}; | |
-static const unsigned char * const p6_nops[ASM_NOP_MAX+1] init_or_livepatch_constrel = { | |
- NULL, | |
- p6nops, | |
- p6nops + 1, | |
- p6nops + 1 + 2, | |
- p6nops + 1 + 2 + 3, | |
- p6nops + 1 + 2 + 3 + 4, | |
- p6nops + 1 + 2 + 3 + 4 + 5, | |
- p6nops + 1 + 2 + 3 + 4 + 5 + 6, | |
- p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
- p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | |
+static const unsigned char | |
+ *const p6_nops[ASM_NOP_MAX + 1] init_or_livepatch_constrel = { | |
+ NULL, | |
+ p6nops, | |
+ p6nops + 1, | |
+ p6nops + 1 + 2, | |
+ p6nops + 1 + 2 + 3, | |
+ p6nops + 1 + 2 + 3 + 4, | |
+ p6nops + 1 + 2 + 3 + 4 + 5, | |
+ p6nops + 1 + 2 + 3 + 4 + 5 + 6, | |
+ p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
+ p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | |
}; | |
#endif | |
-static const unsigned char * const *ideal_nops init_or_livepatch_data = p6_nops; | |
+static const unsigned char *const *ideal_nops init_or_livepatch_data = p6_nops; | |
#ifdef HAVE_AS_NOPS_DIRECTIVE | |
/* Nops in .init.rodata to compare against the runtime ideal nops. */ | |
-asm ( ".pushsection .init.rodata, \"a\", @progbits\n\t" | |
- "toolchain_nops: .nops " __stringify(ASM_NOP_MAX) "\n\t" | |
- ".popsection\n\t"); | |
+asm(".pushsection .init.rodata, \"a\", @progbits\n\t" | |
+ "toolchain_nops: .nops " __stringify(ASM_NOP_MAX) "\n\t" | |
+ ".popsection\n\t"); | |
extern char toolchain_nops[ASM_NOP_MAX]; | |
static bool init_or_livepatch_read_mostly toolchain_nops_are_ideal; | |
#else | |
-# define toolchain_nops_are_ideal false | |
+#define toolchain_nops_are_ideal false | |
#endif | |
static void __init arch_init_ideal_nops(void) | |
{ | |
- switch ( boot_cpu_data.x86_vendor ) | |
+ switch (boot_cpu_data.x86_vendor) | |
{ | |
case X86_VENDOR_INTEL: | |
/* | |
@@ -111,7 +99,7 @@ static void __init arch_init_ideal_nops(void) | |
if ( boot_cpu_data.x86 != 6 ) | |
break; | |
- switch ( boot_cpu_data.x86_model ) | |
+ switch (boot_cpu_data.x86_model) | |
{ | |
case 0x0f ... 0x1b: | |
case 0x1d ... 0x25: | |
@@ -165,8 +153,9 @@ void init_or_livepatch add_nops(void *insns, unsigned int len) | |
* "noinline" to cause control flow change and thus invalidate I$ and | |
* cause refetch after modification. | |
*/ | |
-static void *init_or_livepatch noinline | |
-text_poke(void *addr, const void *opcode, size_t len) | |
+static void *init_or_livepatch noinline text_poke(void *addr, | |
+ const void *opcode, | |
+ size_t len) | |
{ | |
return memcpy(addr, opcode, len); | |
} | |
@@ -214,9 +203,9 @@ static void init_or_livepatch _apply_alternatives(struct alt_instr *start, | |
* Detect sequences of alt_instr's patching the same origin site, and | |
* keep base pointing at the first alt_instr entry. This is so we can | |
* refer to a single ->priv field for some of our patching decisions, | |
- * in particular the NOP optimization. We deliberately use the alt_instr | |
- * itself rather than a local variable in case we end up making multiple | |
- * passes. | |
+ * in particular the NOP optimization. We deliberately use the | |
+ * alt_instr itself rather than a local variable in case we end up | |
+ * making multiple passes. | |
* | |
* ->priv being nonzero means that the origin site has already been | |
* modified, and we shouldn't try to optimise the nops again. | |
@@ -231,7 +220,8 @@ static void init_or_livepatch _apply_alternatives(struct alt_instr *start, | |
continue; | |
} | |
- /* If there is no replacement to make, see about optimising the nops. */ | |
+ /* If there is no replacement to make, see about optimising the nops. | |
+ */ | |
if ( !boot_cpu_has(a->cpuid) ) | |
{ | |
/* Origin site site already touched? Don't nop anything. */ | |
@@ -269,10 +259,8 @@ static void init_or_livepatch _apply_alternatives(struct alt_instr *start, | |
* (for ease of recognition) instead of CALL/JMP. | |
*/ | |
if ( a->cpuid == X86_FEATURE_ALWAYS && | |
- *(int32_t *)(buf + 1) == -5 && | |
- a->orig_len >= 6 && | |
- orig[0] == 0xff && | |
- orig[1] == (*buf & 1 ? 0x25 : 0x15) ) | |
+ *(int32_t *)(buf + 1) == -5 && a->orig_len >= 6 && | |
+ orig[0] == 0xff && orig[1] == (*buf & 1 ? 0x25 : 0x15) ) | |
{ | |
long disp = *(int32_t *)(orig + 2); | |
const uint8_t *dest = *(void **)(orig + 6 + disp); | |
@@ -299,7 +287,7 @@ static void init_or_livepatch _apply_alternatives(struct alt_instr *start, | |
else | |
*(int32_t *)(buf + 1) += repl - orig; | |
} | |
- else if ( force && system_state < SYS_STATE_active ) | |
+ else if ( force && system_state < SYS_STATE_active ) | |
ASSERT_UNREACHABLE(); | |
a->priv = 1; | |
diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c | |
index 9c3c998d34..4dd876b8ff 100644 | |
--- a/xen/arch/x86/apic.c | |
+++ b/xen/arch/x86/apic.c | |
@@ -44,7 +44,8 @@ static bool __read_mostly tdt_enabled; | |
static bool __initdata tdt_enable = true; | |
boolean_param("tdt", tdt_enable); | |
-static struct { | |
+static struct | |
+{ | |
int active; | |
/* r/w apic fields */ | |
unsigned int apic_id; | |
@@ -89,12 +90,12 @@ static int modern_apic(void) | |
{ | |
unsigned int lvr, version; | |
/* AMD systems use old APIC versions, so check the CPU */ | |
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | |
- boot_cpu_data.x86 >= 0xf) | |
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | |
+ boot_cpu_data.x86 >= 0xf ) | |
return 1; | |
/* Hygon systems use modern APIC */ | |
- if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) | |
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ) | |
return 1; | |
lvr = apic_read(APIC_LVR); | |
@@ -118,7 +119,7 @@ void ack_bad_irq(unsigned int irq) | |
* completely. | |
* But only ack when the APIC is enabled -AK | |
*/ | |
- if (cpu_has_apic) | |
+ if ( cpu_has_apic ) | |
ack_APIC_irq(); | |
} | |
@@ -144,7 +145,7 @@ static bool __read_mostly enabled_via_apicbase; | |
int get_physical_broadcast(void) | |
{ | |
- if (modern_apic()) | |
+ if ( modern_apic() ) | |
return 0xff; | |
else | |
return 0xf; | |
@@ -171,7 +172,8 @@ void clear_local_APIC(void) | |
* Masking an LVT entry on a P6 can trigger a local APIC error | |
* if the vector is zero. Mask LVTERR first to prevent this. | |
*/ | |
- if (maxlvt >= 3) { | |
+ if ( maxlvt >= 3 ) | |
+ { | |
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */ | |
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); | |
} | |
@@ -185,20 +187,23 @@ void clear_local_APIC(void) | |
apic_write(APIC_LVT0, v | APIC_LVT_MASKED); | |
v = apic_read(APIC_LVT1); | |
apic_write(APIC_LVT1, v | APIC_LVT_MASKED); | |
- if (maxlvt >= 4) { | |
+ if ( maxlvt >= 4 ) | |
+ { | |
v = apic_read(APIC_LVTPC); | |
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); | |
} | |
/* lets not touch this if we didn't frob it */ | |
#ifdef CONFIG_X86_MCE_THERMAL | |
- if (maxlvt >= 5) { | |
+ if ( maxlvt >= 5 ) | |
+ { | |
v = apic_read(APIC_LVTTHMR); | |
apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | |
} | |
#endif | |
- if (maxlvt >= 6) { | |
+ if ( maxlvt >= 6 ) | |
+ { | |
v = apic_read(APIC_CMCI); | |
apic_write(APIC_CMCI, v | APIC_LVT_MASKED); | |
} | |
@@ -208,26 +213,27 @@ void clear_local_APIC(void) | |
apic_write(APIC_LVTT, APIC_LVT_MASKED); | |
apic_write(APIC_LVT0, APIC_LVT_MASKED); | |
apic_write(APIC_LVT1, APIC_LVT_MASKED); | |
- if (maxlvt >= 3) | |
+ if ( maxlvt >= 3 ) | |
apic_write(APIC_LVTERR, APIC_LVT_MASKED); | |
- if (maxlvt >= 4) | |
+ if ( maxlvt >= 4 ) | |
apic_write(APIC_LVTPC, APIC_LVT_MASKED); | |
#ifdef CONFIG_X86_MCE_THERMAL | |
- if (maxlvt >= 5) | |
+ if ( maxlvt >= 5 ) | |
apic_write(APIC_LVTTHMR, APIC_LVT_MASKED); | |
#endif | |
- if (maxlvt >= 6) | |
+ if ( maxlvt >= 6 ) | |
apic_write(APIC_CMCI, APIC_LVT_MASKED); | |
- if (maxlvt > 3) /* Due to Pentium errata 3AP and 11AP. */ | |
+ if ( maxlvt > 3 ) /* Due to Pentium errata 3AP and 11AP. */ | |
apic_write(APIC_ESR, 0); | |
apic_read(APIC_ESR); | |
} | |
void __init connect_bsp_APIC(void) | |
{ | |
- if (pic_mode) { | |
+ if ( pic_mode ) | |
+ { | |
/* | |
* Do not trust the local APIC being empty at bootup. | |
*/ | |
@@ -237,7 +243,7 @@ void __init connect_bsp_APIC(void) | |
* connect BSP's local APIC to INT and NMI lines. | |
*/ | |
apic_printk(APIC_VERBOSE, "leaving PIC mode, " | |
- "enabling APIC mode.\n"); | |
+ "enabling APIC mode.\n"); | |
outb(0x70, 0x22); | |
outb(0x01, 0x23); | |
} | |
@@ -246,7 +252,8 @@ void __init connect_bsp_APIC(void) | |
void disconnect_bsp_APIC(int virt_wire_setup) | |
{ | |
- if (pic_mode) { | |
+ if ( pic_mode ) | |
+ { | |
/* | |
* Put the board back into PIC mode (has an effect | |
* only on certain older boards). Note that APIC | |
@@ -254,11 +261,12 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |
* this point! The only exception are INIT IPIs. | |
*/ | |
apic_printk(APIC_VERBOSE, "disabling APIC mode, " | |
- "entering PIC mode.\n"); | |
+ "entering PIC mode.\n"); | |
outb(0x70, 0x22); | |
outb(0x00, 0x23); | |
} | |
- else { | |
+ else | |
+ { | |
/* Go back to Virtual Wire compatibility mode */ | |
unsigned long value; | |
@@ -269,27 +277,29 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |
value |= 0xf; | |
apic_write(APIC_SPIV, value); | |
- if (!virt_wire_setup) { | |
- /* For LVT0 make it edge triggered, active high, external and enabled */ | |
+ if ( !virt_wire_setup ) | |
+ { | |
+ /* For LVT0 make it edge triggered, active high, external and | |
+ * enabled */ | |
value = apic_read(APIC_LVT0); | |
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | | |
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | |
- APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED ); | |
+ APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); | |
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | |
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); | |
apic_write(APIC_LVT0, value); | |
} | |
- else { | |
+ else | |
+ { | |
/* Disable LVT0 */ | |
apic_write(APIC_LVT0, APIC_LVT_MASKED); | |
} | |
/* For LVT1 make it edge triggered, active high, nmi and enabled */ | |
value = apic_read(APIC_LVT1); | |
- value &= ~( | |
- APIC_MODE_MASK | APIC_SEND_PENDING | | |
- APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | |
- APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); | |
+ value &= | |
+ ~(APIC_MODE_MASK | APIC_SEND_PENDING | APIC_INPUT_POLARITY | | |
+ APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); | |
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | |
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); | |
apic_write(APIC_LVT1, value); | |
@@ -306,11 +316,12 @@ void disable_local_APIC(void) | |
*/ | |
apic_write(APIC_SPIV, apic_read(APIC_SPIV) & ~APIC_SPIV_APIC_ENABLED); | |
- if (enabled_via_apicbase) { | |
+ if ( enabled_via_apicbase ) | |
+ { | |
uint64_t msr_content; | |
rdmsrl(MSR_APIC_BASE, msr_content); | |
- wrmsrl(MSR_APIC_BASE, msr_content & | |
- ~(APIC_BASE_ENABLE | APIC_BASE_EXTD)); | |
+ wrmsrl(MSR_APIC_BASE, | |
+ msr_content & ~(APIC_BASE_ENABLE | APIC_BASE_EXTD)); | |
} | |
if ( kexecing && (current_local_apic_mode() != apic_boot_mode) ) | |
@@ -320,7 +331,7 @@ void disable_local_APIC(void) | |
msr_content &= ~(APIC_BASE_ENABLE | APIC_BASE_EXTD); | |
wrmsrl(MSR_APIC_BASE, msr_content); | |
- switch ( apic_boot_mode ) | |
+ switch (apic_boot_mode) | |
{ | |
case APIC_MODE_DISABLED: | |
break; /* Nothing to do - we did this above */ | |
@@ -340,7 +351,6 @@ void disable_local_APIC(void) | |
break; | |
} | |
} | |
- | |
} | |
/* | |
@@ -369,17 +379,17 @@ int __init verify_local_APIC(void) | |
* numbers. If the second one is different, then we | |
* poke at a non-APIC. | |
*/ | |
- if (reg1 != reg0) | |
+ if ( reg1 != reg0 ) | |
return 0; | |
/* | |
* Check if the version looks reasonably. | |
*/ | |
reg1 = GET_APIC_VERSION(reg0); | |
- if (reg1 == 0x00 || reg1 == 0xff) | |
+ if ( reg1 == 0x00 || reg1 == 0xff ) | |
return 0; | |
reg1 = get_maxlvt(); | |
- if (reg1 < 0x02 || reg1 == 0xff) | |
+ if ( reg1 < 0x02 || reg1 == 0xff ) | |
return 0; | |
/* | |
@@ -423,7 +433,7 @@ void __init sync_Arb_IDs(void) | |
{ | |
/* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 | |
And not needed on AMD */ | |
- if (modern_apic()) | |
+ if ( modern_apic() ) | |
return; | |
/* | |
* Wait for idle. | |
@@ -445,23 +455,24 @@ void __init init_bsp_APIC(void) | |
* Don't do the setup now if we have a SMP BIOS as the | |
* through-I/O-APIC virtual wire mode might be active. | |
*/ | |
- if (smp_found_config || !cpu_has_apic) | |
+ if ( smp_found_config || !cpu_has_apic ) | |
return; | |
/* | |
* Do not trust the local APIC being empty at bootup. | |
*/ | |
clear_local_APIC(); | |
- | |
+ | |
/* | |
* Enable APIC. | |
*/ | |
value = apic_read(APIC_SPIV); | |
value &= ~APIC_VECTOR_MASK; | |
value |= APIC_SPIV_APIC_ENABLED; | |
- | |
+ | |
/* This bit is reserved on P4/Xeon and should be cleared */ | |
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15)) | |
+ if ( (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | |
+ (boot_cpu_data.x86 == 15) ) | |
value &= ~APIC_SPIV_FOCUS_DISABLED; | |
else | |
value |= APIC_SPIV_FOCUS_DISABLED; | |
@@ -532,7 +543,8 @@ void setup_local_APIC(void) | |
int i, j; | |
/* Pound the ESR really hard over the head with a big hammer - mbligh */ | |
- if (esr_disable) { | |
+ if ( esr_disable ) | |
+ { | |
apic_write(APIC_ESR, 0); | |
apic_write(APIC_ESR, 0); | |
apic_write(APIC_ESR, 0); | |
@@ -544,7 +556,7 @@ void setup_local_APIC(void) | |
/* | |
* Double-check whether this APIC is really registered. | |
*/ | |
- if (!apic_id_registered()) | |
+ if ( !apic_id_registered() ) | |
BUG(); | |
/* | |
@@ -570,10 +582,12 @@ void setup_local_APIC(void) | |
* the interrupt. Hence a vector might get locked. It was noticed | |
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR. | |
*/ | |
- for (i = APIC_ISR_NR - 1; i >= 0; i--) { | |
- value = apic_read(APIC_ISR + i*0x10); | |
- for (j = 31; j >= 0; j--) { | |
- if (value & (1u << j)) | |
+ for ( i = APIC_ISR_NR - 1; i >= 0; i-- ) | |
+ { | |
+ value = apic_read(APIC_ISR + i * 0x10); | |
+ for ( j = 31; j >= 0; j-- ) | |
+ { | |
+ if ( value & (1u << j) ) | |
ack_APIC_irq(); | |
} | |
} | |
@@ -642,11 +656,14 @@ void setup_local_APIC(void) | |
* TODO: set up through-local-APIC from through-I/O-APIC? --macro | |
*/ | |
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; | |
- if (!smp_processor_id() && (pic_mode || !value)) { | |
+ if ( !smp_processor_id() && (pic_mode || !value) ) | |
+ { | |
value = APIC_DM_EXTINT; | |
apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", | |
smp_processor_id()); | |
- } else { | |
+ } | |
+ else | |
+ { | |
value = APIC_DM_EXTINT | APIC_LVT_MASKED; | |
apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", | |
smp_processor_id()); | |
@@ -656,31 +673,35 @@ void setup_local_APIC(void) | |
/* | |
* only the BP should see the LINT1 NMI signal, obviously. | |
*/ | |
- if (!smp_processor_id()) | |
+ if ( !smp_processor_id() ) | |
value = APIC_DM_NMI; | |
else | |
value = APIC_DM_NMI | APIC_LVT_MASKED; | |
apic_write(APIC_LVT1, value); | |
- if (!esr_disable) { | |
+ if ( !esr_disable ) | |
+ { | |
maxlvt = get_maxlvt(); | |
- if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | |
+ if ( maxlvt > 3 ) /* Due to the Pentium erratum 3AP. */ | |
apic_write(APIC_ESR, 0); | |
oldvalue = apic_read(APIC_ESR); | |
- value = ERROR_APIC_VECTOR; // enables sending errors | |
+ value = ERROR_APIC_VECTOR; // enables sending errors | |
apic_write(APIC_LVTERR, value); | |
/* | |
* spec says clear errors after enabling vector. | |
*/ | |
- if (maxlvt > 3) | |
+ if ( maxlvt > 3 ) | |
apic_write(APIC_ESR, 0); | |
value = apic_read(APIC_ESR); | |
- if (value != oldvalue) | |
- apic_printk(APIC_VERBOSE, "ESR value before enabling " | |
+ if ( value != oldvalue ) | |
+ apic_printk(APIC_VERBOSE, | |
+ "ESR value before enabling " | |
"vector: %#lx after: %#lx\n", | |
oldvalue, value); | |
- } else { | |
+ } | |
+ else | |
+ { | |
/* | |
* Something untraceble is creating bad interrupts on | |
* secondary quads ... for the moment, just leave the | |
@@ -690,7 +711,7 @@ void setup_local_APIC(void) | |
printk("Leaving ESR disabled.\n"); | |
} | |
- if (nmi_watchdog == NMI_LOCAL_APIC && smp_processor_id()) | |
+ if ( nmi_watchdog == NMI_LOCAL_APIC && smp_processor_id() ) | |
setup_apic_nmi_watchdog(); | |
apic_pm_activate(); | |
} | |
@@ -699,7 +720,7 @@ int lapic_suspend(void) | |
{ | |
unsigned long flags; | |
int maxlvt = get_maxlvt(); | |
- if (!apic_pm_state.active) | |
+ if ( !apic_pm_state.active ) | |
return 0; | |
apic_pm_state.apic_id = apic_read(APIC_ID); | |
@@ -708,10 +729,11 @@ int lapic_suspend(void) | |
apic_pm_state.apic_dfr = apic_read(APIC_DFR); | |
apic_pm_state.apic_spiv = apic_read(APIC_SPIV); | |
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); | |
- if (maxlvt >= 4) | |
+ if ( maxlvt >= 4 ) | |
apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); | |
- if (maxlvt >= 6) { | |
+ if ( maxlvt >= 6 ) | |
+ { | |
apic_pm_state.apic_lvtcmci = apic_read(APIC_CMCI); | |
} | |
@@ -720,7 +742,7 @@ int lapic_suspend(void) | |
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); | |
apic_pm_state.apic_tmict = apic_read(APIC_TMICT); | |
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); | |
- if (maxlvt >= 5) | |
+ if ( maxlvt >= 5 ) | |
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); | |
local_irq_save(flags); | |
@@ -736,7 +758,7 @@ int lapic_resume(void) | |
unsigned long flags; | |
int maxlvt; | |
- if (!apic_pm_state.active) | |
+ if ( !apic_pm_state.active ) | |
return 0; | |
local_irq_save(flags); | |
@@ -751,8 +773,7 @@ int lapic_resume(void) | |
{ | |
rdmsrl(MSR_APIC_BASE, msr_content); | |
msr_content &= ~APIC_BASE_ADDR_MASK; | |
- wrmsrl(MSR_APIC_BASE, | |
- msr_content | APIC_BASE_ENABLE | mp_lapic_addr); | |
+ wrmsrl(MSR_APIC_BASE, msr_content | APIC_BASE_ENABLE | mp_lapic_addr); | |
} | |
else | |
resume_x2apic(); | |
@@ -766,14 +787,15 @@ int lapic_resume(void) | |
apic_write(APIC_SPIV, apic_pm_state.apic_spiv); | |
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); | |
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); | |
- if (maxlvt >= 5) | |
+ if ( maxlvt >= 5 ) | |
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); | |
- if (maxlvt >= 6) { | |
+ if ( maxlvt >= 6 ) | |
+ { | |
apic_write(APIC_CMCI, apic_pm_state.apic_lvtcmci); | |
} | |
- if (maxlvt >= 4) | |
+ if ( maxlvt >= 4 ) | |
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); | |
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); | |
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); | |
@@ -787,7 +809,6 @@ int lapic_resume(void) | |
return 0; | |
} | |
- | |
/* | |
* Detect and enable local APICs on non-SMP boards. | |
* Original code written by Keir Fraser. | |
@@ -804,9 +825,9 @@ boolean_param("lapic", enable_local_apic); | |
static int __init apic_set_verbosity(const char *str) | |
{ | |
- if (strcmp("debug", str) == 0) | |
+ if ( strcmp("debug", str) == 0 ) | |
apic_verbosity = APIC_DEBUG; | |
- else if (strcmp("verbose", str) == 0) | |
+ else if ( strcmp("verbose", str) == 0 ) | |
apic_verbosity = APIC_VERBOSE; | |
else | |
return -EINVAL; | |
@@ -815,12 +836,12 @@ static int __init apic_set_verbosity(const char *str) | |
} | |
custom_param("apic_verbosity", apic_set_verbosity); | |
-static int __init detect_init_APIC (void) | |
+static int __init detect_init_APIC(void) | |
{ | |
uint64_t msr_content; | |
/* Disabled by kernel option? */ | |
- if (enable_local_apic < 0) | |
+ if ( enable_local_apic < 0 ) | |
return -1; | |
if ( rdmsr_safe(MSR_APIC_BASE, msr_content) ) | |
@@ -829,12 +850,14 @@ static int __init detect_init_APIC (void) | |
return -1; | |
} | |
- if (!cpu_has_apic) { | |
+ if ( !cpu_has_apic ) | |
+ { | |
/* | |
* Over-ride BIOS and try to enable the local | |
* APIC only if "lapic" specified. | |
*/ | |
- if (enable_local_apic <= 0) { | |
+ if ( enable_local_apic <= 0 ) | |
+ { | |
printk("Local APIC disabled by BIOS -- " | |
"you can enable it with \"lapic\"\n"); | |
return -1; | |
@@ -858,7 +881,8 @@ static int __init detect_init_APIC (void) | |
* The APIC feature bit should now be enabled | |
* in `cpuid' | |
*/ | |
- if (!(cpuid_edx(1) & cpufeat_mask(X86_FEATURE_APIC))) { | |
+ if ( !(cpuid_edx(1) & cpufeat_mask(X86_FEATURE_APIC)) ) | |
+ { | |
printk("Could not enable APIC!\n"); | |
return -1; | |
} | |
@@ -870,7 +894,7 @@ static int __init detect_init_APIC (void) | |
if ( msr_content & APIC_BASE_ENABLE ) | |
mp_lapic_addr = msr_content & APIC_BASE_ADDR_MASK; | |
- if (nmi_watchdog != NMI_NONE) | |
+ if ( nmi_watchdog != NMI_NONE ) | |
nmi_watchdog = NMI_LOCAL_APIC; | |
printk("Found and enabled local APIC!\n"); | |
@@ -900,7 +924,7 @@ void __init x2apic_bsp_setup(void) | |
{ | |
printk("Not enabling x2APIC: disabled by cmdline.\n"); | |
return; | |
- } | |
+ } | |
printk("x2APIC: Already enabled by BIOS: Ignoring cmdline disable.\n"); | |
} | |
@@ -929,7 +953,7 @@ void __init x2apic_bsp_setup(void) | |
mask_8259A(); | |
mask_IO_APIC_setup(ioapic_entries); | |
- switch ( iommu_enable_x2apic() ) | |
+ switch (iommu_enable_x2apic()) | |
{ | |
case 0: | |
break; | |
@@ -945,8 +969,9 @@ void __init x2apic_bsp_setup(void) | |
panic("Interrupt remapping could not be enabled while " | |
"x2APIC is already enabled by BIOS\n"); | |
- printk(XENLOG_ERR | |
- "Failed to enable Interrupt Remapping: Will not enable x2APIC.\n"); | |
+ printk( | |
+ XENLOG_ERR | |
+ "Failed to enable Interrupt Remapping: Will not enable x2APIC.\n"); | |
goto restore_out; | |
} | |
@@ -983,10 +1008,12 @@ void __init init_apic_mappings(void) | |
* zeroes page to simulate the local APIC and another | |
* one for the IO-APIC. | |
*/ | |
- if (!smp_found_config && detect_init_APIC()) { | |
+ if ( !smp_found_config && detect_init_APIC() ) | |
+ { | |
apic_phys = __pa(alloc_xenheap_page()); | |
clear_page(__va(apic_phys)); | |
- } else | |
+ } | |
+ else | |
apic_phys = mp_lapic_addr; | |
set_fixmap_nocache(FIX_APIC_BASE, apic_phys); | |
@@ -998,7 +1025,7 @@ __next: | |
* Fetch the APIC ID of the BSP in case we have a | |
* default configuration (or the MP table is broken). | |
*/ | |
- if (boot_cpu_physical_apicid == -1U) | |
+ if ( boot_cpu_physical_apicid == -1U ) | |
boot_cpu_physical_apicid = get_apic_id(); | |
x86_cpu_to_apicid[0] = get_apic_id(); | |
@@ -1007,15 +1034,15 @@ __next: | |
/***************************************************************************** | |
* APIC calibration | |
- * | |
+ * | |
* The APIC is programmed in bus cycles. | |
* Timeout values should specified in real time units. | |
* The "cheapest" time source is the cyclecounter. | |
- * | |
+ * | |
* Thus, we need a mappings from: bus cycles <- cycle counter <- system time | |
- * | |
+ * | |
* The calibration is currently a bit shoddy since it requires the external | |
- * timer chip to generate periodic timer interupts. | |
+ * timer chip to generate periodic timer interupts. | |
*****************************************************************************/ | |
/* used for system time scaling */ | |
@@ -1048,17 +1075,17 @@ static unsigned int __init get_8254_timer_count(void) | |
static void __init wait_8254_wraparound(void) | |
{ | |
unsigned int curr_count, prev_count; | |
- | |
+ | |
curr_count = get_8254_timer_count(); | |
do { | |
prev_count = curr_count; | |
curr_count = get_8254_timer_count(); | |
/* workaround for broken Mercury/Neptune */ | |
- if (prev_count >= curr_count + 0x100) | |
+ if ( prev_count >= curr_count + 0x100 ) | |
curr_count = get_8254_timer_count(); | |
- | |
- } while (prev_count >= curr_count); | |
+ | |
+ } while ( prev_count >= curr_count ); | |
} | |
/* | |
@@ -1094,7 +1121,7 @@ static void __setup_APIC_LVTT(unsigned int clocks) | |
* writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized. | |
* According to Intel, MFENCE can do the serialization here. | |
*/ | |
- asm volatile( "mfence" : : : "memory" ); | |
+ asm volatile("mfence" : : : "memory"); | |
tmp_value = apic_read(APIC_TDCR); | |
apic_write(APIC_TDCR, tmp_value | APIC_TDR_DIV_1); | |
@@ -1110,17 +1137,21 @@ static void setup_APIC_timer(void) | |
local_irq_restore(flags); | |
} | |
-#define DEADLINE_MODEL_MATCH(m, fr) \ | |
- { .vendor = X86_VENDOR_INTEL, .family = 6, .model = (m), \ | |
- .feature = X86_FEATURE_TSC_DEADLINE, \ | |
- .driver_data = (void *)(unsigned long)(fr) } | |
+#define DEADLINE_MODEL_MATCH(m, fr) \ | |
+ { \ | |
+ .vendor = X86_VENDOR_INTEL, .family = 6, .model = (m), \ | |
+ .feature = X86_FEATURE_TSC_DEADLINE, \ | |
+ .driver_data = (void *)(unsigned long)(fr) \ | |
+ } | |
static unsigned int __init hsx_deadline_rev(void) | |
{ | |
- switch ( boot_cpu_data.x86_mask ) | |
+ switch (boot_cpu_data.x86_mask) | |
{ | |
- case 0x02: return 0x3a; /* EP */ | |
- case 0x04: return 0x0f; /* EX */ | |
+ case 0x02: | |
+ return 0x3a; /* EP */ | |
+ case 0x04: | |
+ return 0x0f; /* EX */ | |
} | |
return ~0U; | |
@@ -1128,12 +1159,16 @@ static unsigned int __init hsx_deadline_rev(void) | |
static unsigned int __init bdx_deadline_rev(void) | |
{ | |
- switch ( boot_cpu_data.x86_mask ) | |
+ switch (boot_cpu_data.x86_mask) | |
{ | |
- case 0x02: return 0x00000011; | |
- case 0x03: return 0x0700000e; | |
- case 0x04: return 0x0f00000c; | |
- case 0x05: return 0x0e000003; | |
+ case 0x02: | |
+ return 0x00000011; | |
+ case 0x03: | |
+ return 0x0700000e; | |
+ case 0x04: | |
+ return 0x0f00000c; | |
+ case 0x05: | |
+ return 0x0e000003; | |
} | |
return ~0U; | |
@@ -1141,11 +1176,14 @@ static unsigned int __init bdx_deadline_rev(void) | |
static unsigned int __init skx_deadline_rev(void) | |
{ | |
- switch ( boot_cpu_data.x86_mask ) | |
+ switch (boot_cpu_data.x86_mask) | |
{ | |
- case 0x00 ... 0x02: return ~0U; | |
- case 0x03: return 0x01000136; | |
- case 0x04: return 0x02000014; | |
+ case 0x00 ... 0x02: | |
+ return ~0U; | |
+ case 0x03: | |
+ return 0x01000136; | |
+ case 0x04: | |
+ return 0x02000014; | |
} | |
return 0; | |
@@ -1166,11 +1204,10 @@ static const struct x86_cpu_id __initconstrel deadline_match[] = { | |
DEADLINE_MODEL_MATCH(0x55, skx_deadline_rev), /* Skylake X */ | |
DEADLINE_MODEL_MATCH(0x5e, 0xb2), /* Skylake D */ | |
- DEADLINE_MODEL_MATCH(0x8e, 0x52), /* Kabylake M */ | |
- DEADLINE_MODEL_MATCH(0x9e, 0x52), /* Kabylake D */ | |
+ DEADLINE_MODEL_MATCH(0x8e, 0x52), /* Kabylake M */ | |
+ DEADLINE_MODEL_MATCH(0x9e, 0x52), /* Kabylake D */ | |
- {} | |
-}; | |
+ {}}; | |
static void __init check_deadline_errata(void) | |
{ | |
@@ -1197,8 +1234,10 @@ static void __init check_deadline_errata(void) | |
return; | |
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE); | |
- printk(XENLOG_WARNING "TSC_DEADLINE disabled due to Errata; " | |
- "please update microcode to version %#x (or later)\n", rev); | |
+ printk(XENLOG_WARNING | |
+ "TSC_DEADLINE disabled due to Errata; " | |
+ "please update microcode to version %#x (or later)\n", | |
+ rev); | |
} | |
static void wait_tick_pvh(void) | |
@@ -1236,7 +1275,7 @@ static int __init calibrate_APIC_clock(void) | |
int i; | |
unsigned long bus_freq; /* KAF: pointer-size avoids compile warns. */ | |
u32 bus_cycle; /* length of one bus cycle in pico-seconds */ | |
- const int LOOPS = HZ/10; | |
+ const int LOOPS = HZ / 10; | |
apic_printk(APIC_VERBOSE, "calibrating APIC timer ...\n"); | |
@@ -1266,7 +1305,7 @@ static int __init calibrate_APIC_clock(void) | |
/* | |
* Let's wait LOOPS ticks: | |
*/ | |
- for (i = 0; i < LOOPS; i++) | |
+ for ( i = 0; i < LOOPS; i++ ) | |
if ( !xen_guest ) | |
wait_8254_wraparound(); | |
else | |
@@ -1283,7 +1322,7 @@ static int __init calibrate_APIC_clock(void) | |
* underflown to be exact, as the timer counts down ;) | |
*/ | |
- result = (tt1-tt2)*APIC_DIVISOR/LOOPS; | |
+ result = (tt1 - tt2) * APIC_DIVISOR / LOOPS; | |
apic_printk(APIC_VERBOSE, "..... CPU clock speed is %ld.%04ld MHz.\n", | |
((long)(t2 - t1) / LOOPS) / (1000000 / HZ), | |
@@ -1293,9 +1332,9 @@ static int __init calibrate_APIC_clock(void) | |
result / (1000000 / HZ), result % (1000000 / HZ)); | |
/* set up multipliers for accurate timer code */ | |
- bus_freq = result*HZ; | |
- bus_cycle = (u32) (1000000000000LL/bus_freq); /* in pico seconds */ | |
- bus_scale = (1000*262144)/bus_cycle; | |
+ bus_freq = result * HZ; | |
+ bus_cycle = (u32)(1000000000000LL / bus_freq); /* in pico seconds */ | |
+ bus_scale = (1000 * 262144) / bus_cycle; | |
apic_printk(APIC_VERBOSE, "..... bus_scale = %#x\n", bus_scale); | |
/* reset APIC to zero timeout value */ | |
@@ -1323,7 +1362,7 @@ void __init setup_boot_APIC_clock(void) | |
} | |
setup_APIC_timer(); | |
- | |
+ | |
local_irq_restore(flags); | |
} | |
@@ -1334,7 +1373,8 @@ void setup_secondary_APIC_clock(void) | |
void disable_APIC_timer(void) | |
{ | |
- if (using_apic_timer) { | |
+ if ( using_apic_timer ) | |
+ { | |
unsigned long v; | |
/* Work around AMD Erratum 411. This is a nice thing to do anyway. */ | |
@@ -1347,9 +1387,10 @@ void disable_APIC_timer(void) | |
void enable_APIC_timer(void) | |
{ | |
- if (using_apic_timer) { | |
+ if ( using_apic_timer ) | |
+ { | |
unsigned long v; | |
- | |
+ | |
v = apic_read(APIC_LVTT); | |
apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED); | |
} | |
@@ -1385,7 +1426,7 @@ int reprogram_timer(s_time_t timeout) | |
return apic_tmict || !timeout; | |
} | |
-void apic_timer_interrupt(struct cpu_user_regs * regs) | |
+void apic_timer_interrupt(struct cpu_user_regs *regs) | |
{ | |
ack_APIC_irq(); | |
perfc_incr(apic_timer); | |
@@ -1411,9 +1452,11 @@ void spurious_interrupt(struct cpu_user_regs *regs) | |
* a request to dump local CPU state). Vectored interrupts are ACKed; | |
* spurious interrupts are not. | |
*/ | |
- if (apic_isr_read(SPURIOUS_APIC_VECTOR)) { | |
+ if ( apic_isr_read(SPURIOUS_APIC_VECTOR) ) | |
+ { | |
ack_APIC_irq(); | |
- if (this_cpu(state_dump_pending)) { | |
+ if ( this_cpu(state_dump_pending) ) | |
+ { | |
this_cpu(state_dump_pending) = false; | |
dump_execstate(regs); | |
return; | |
@@ -1422,7 +1465,8 @@ void spurious_interrupt(struct cpu_user_regs *regs) | |
/* see sw-dev-man vol 3, chapter 7.4.13.5 */ | |
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should " | |
- "never happen.\n", smp_processor_id()); | |
+ "never happen.\n", | |
+ smp_processor_id()); | |
} | |
/* | |
@@ -1432,14 +1476,10 @@ void spurious_interrupt(struct cpu_user_regs *regs) | |
void error_interrupt(struct cpu_user_regs *regs) | |
{ | |
static const char *const esr_fields[] = { | |
- "Send CS error", | |
- "Receive CS error", | |
- "Send accept error", | |
- "Receive accept error", | |
- "Redirectable IPI", | |
- "Send illegal vector", | |
- "Received illegal vector", | |
- "Illegal register address", | |
+ "Send CS error", "Receive CS error", | |
+ "Send accept error", "Receive accept error", | |
+ "Redirectable IPI", "Send illegal vector", | |
+ "Received illegal vector", "Illegal register address", | |
}; | |
unsigned int v, v1; | |
int i; | |
@@ -1450,8 +1490,8 @@ void error_interrupt(struct cpu_user_regs *regs) | |
v1 = apic_read(APIC_ESR); | |
ack_APIC_irq(); | |
- printk(XENLOG_DEBUG "APIC error on CPU%u: %02x(%02x)", | |
- smp_processor_id(), v , v1); | |
+ printk(XENLOG_DEBUG "APIC error on CPU%u: %02x(%02x)", smp_processor_id(), | |
+ v, v1); | |
for ( i = 7; i >= 0; --i ) | |
if ( v1 & (1 << i) ) | |
printk(", %s", esr_fields[i]); | |
@@ -1472,12 +1512,13 @@ void pmu_apic_interrupt(struct cpu_user_regs *regs) | |
* This initializes the IO-APIC and APIC hardware if this is | |
* a UP kernel. | |
*/ | |
-int __init APIC_init_uniprocessor (void) | |
+int __init APIC_init_uniprocessor(void) | |
{ | |
- if (enable_local_apic < 0) | |
+ if ( enable_local_apic < 0 ) | |
setup_clear_cpu_cap(X86_FEATURE_APIC); | |
- if (!smp_found_config && !cpu_has_apic) { | |
+ if ( !smp_found_config && !cpu_has_apic ) | |
+ { | |
skip_ioapic_setup = true; | |
return -1; | |
} | |
@@ -1485,7 +1526,8 @@ int __init APIC_init_uniprocessor (void) | |
/* | |
* Complain if the BIOS pretends there is one. | |
*/ | |
- if (!cpu_has_apic) { | |
+ if ( !cpu_has_apic ) | |
+ { | |
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | |
boot_cpu_physical_apicid); | |
skip_ioapic_setup = true; | |
@@ -1509,11 +1551,11 @@ int __init APIC_init_uniprocessor (void) | |
setup_local_APIC(); | |
- if (nmi_watchdog == NMI_LOCAL_APIC) | |
+ if ( nmi_watchdog == NMI_LOCAL_APIC ) | |
check_nmi_watchdog(); | |
- if (smp_found_config) | |
- if (!skip_ioapic_setup && nr_ioapics) | |
+ if ( smp_found_config ) | |
+ if ( !skip_ioapic_setup && nr_ioapics ) | |
setup_IO_APIC(); | |
setup_boot_APIC_clock(); | |
@@ -1521,20 +1563,20 @@ int __init APIC_init_uniprocessor (void) | |
return 0; | |
} | |
-static const char * __init apic_mode_to_str(const enum apic_mode mode) | |
+static const char *__init apic_mode_to_str(const enum apic_mode mode) | |
{ | |
- switch ( mode ) | |
+ switch (mode) | |
{ | |
- case APIC_MODE_INVALID: | |
- return "invalid"; | |
- case APIC_MODE_DISABLED: | |
- return "disabled"; | |
- case APIC_MODE_XAPIC: | |
- return "xapic"; | |
- case APIC_MODE_X2APIC: | |
- return "x2apic"; | |
- default: | |
- return "unrecognised"; | |
+ case APIC_MODE_INVALID: | |
+ return "invalid"; | |
+ case APIC_MODE_DISABLED: | |
+ return "disabled"; | |
+ case APIC_MODE_XAPIC: | |
+ return "xapic"; | |
+ case APIC_MODE_X2APIC: | |
+ return "x2apic"; | |
+ default: | |
+ return "unrecognised"; | |
} | |
} | |
@@ -1574,7 +1616,6 @@ enum apic_mode current_local_apic_mode(void) | |
return APIC_MODE_DISABLED; | |
} | |
- | |
void check_for_unexpected_msi(unsigned int vector) | |
{ | |
BUG_ON(apic_isr_read(vector)); | |
diff --git a/xen/arch/x86/bitops.c b/xen/arch/x86/bitops.c | |
index f6ee71512c..7da3714b8b 100644 | |
--- a/xen/arch/x86/bitops.c | |
+++ b/xen/arch/x86/bitops.c | |
@@ -2,30 +2,30 @@ | |
#include <xen/bitops.h> | |
#include <xen/lib.h> | |
-unsigned int __find_first_bit( | |
- const unsigned long *addr, unsigned int size) | |
+unsigned int __find_first_bit(const unsigned long *addr, unsigned int size) | |
{ | |
unsigned long d0, d1, res; | |
- asm volatile ( | |
- "1: xor %%eax,%%eax\n\t" /* also ensures ZF==1 if size==0 */ | |
- " repe; scas"__OS"\n\t" | |
- " je 2f\n\t" | |
- " bsf -"STR(BITS_PER_LONG/8)"(%2),%0\n\t" | |
- " jz 1b\n\t" | |
- " lea -"STR(BITS_PER_LONG/8)"(%2),%2\n\t" | |
- "2: sub %%ebx,%%edi\n\t" | |
- " shl $3,%%edi\n\t" | |
- " add %%edi,%%eax" | |
- : "=&a" (res), "=&c" (d0), "=&D" (d1) | |
- : "1" (BITS_TO_LONGS(size)), "2" (addr), "b" ((int)(long)addr) | |
- : "memory" ); | |
+ asm volatile("1: xor %%eax,%%eax\n\t" /* also ensures ZF==1 if size==0 */ | |
+ " repe; scas" __OS "\n\t" | |
+ " je 2f\n\t" | |
+ " bsf -" STR(BITS_PER_LONG / | |
+ 8) "(%2),%0\n\t" | |
+ " jz 1b\n\t" | |
+ " lea -" STR(BITS_PER_LONG / | |
+ 8) "(%2),%2\n\t" | |
+ "2: sub %%ebx,%%edi\n\t" | |
+ " shl $3,%%edi\n\t" | |
+ " add %%edi,%%eax" | |
+ : "=&a"(res), "=&c"(d0), "=&D"(d1) | |
+ : "1"(BITS_TO_LONGS(size)), "2"(addr), "b"((int)(long)addr) | |
+ : "memory"); | |
return res; | |
} | |
-unsigned int __find_next_bit( | |
- const unsigned long *addr, unsigned int size, unsigned int offset) | |
+unsigned int __find_next_bit(const unsigned long *addr, unsigned int size, | |
+ unsigned int offset) | |
{ | |
const unsigned long *p = addr + (offset / BITS_PER_LONG); | |
unsigned int set, bit = offset & (BITS_PER_LONG - 1); | |
@@ -50,32 +50,33 @@ unsigned int __find_next_bit( | |
return (offset + set); | |
} | |
-unsigned int __find_first_zero_bit( | |
- const unsigned long *addr, unsigned int size) | |
+unsigned int __find_first_zero_bit(const unsigned long *addr, | |
+ unsigned int size) | |
{ | |
unsigned long d0, d1, d2, res; | |
- asm volatile ( | |
- "1: xor %%eax,%%eax ; not %3\n\t" /* rAX == ~0ul */ | |
- " xor %%edx,%%edx\n\t" /* also ensures ZF==1 if size==0 */ | |
- " repe; scas"__OS"\n\t" | |
- " je 2f\n\t" | |
- " xor -"STR(BITS_PER_LONG/8)"(%2),%3\n\t" | |
- " jz 1b\n\t" | |
- " rep; bsf %3,%0\n\t" | |
- " lea -"STR(BITS_PER_LONG/8)"(%2),%2\n\t" | |
- "2: sub %%ebx,%%edi\n\t" | |
- " shl $3,%%edi\n\t" | |
- " add %%edi,%%edx" | |
- : "=&d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) | |
- : "1" (BITS_TO_LONGS(size)), "2" (addr), "b" ((int)(long)addr) | |
- : "memory" ); | |
+ asm volatile("1: xor %%eax,%%eax ; not %3\n\t" /* rAX == ~0ul */ | |
+ " xor %%edx,%%edx\n\t" /* also ensures ZF==1 if size==0 */ | |
+ " repe; scas" __OS "\n\t" | |
+ " je 2f\n\t" | |
+ " xor -" STR(BITS_PER_LONG / | |
+ 8) "(%2),%3\n\t" | |
+ " jz 1b\n\t" | |
+ " rep; bsf %3,%0\n\t" | |
+ " lea -" STR(BITS_PER_LONG / | |
+ 8) "(%2),%2\n\t" | |
+ "2: sub %%ebx,%%edi\n\t" | |
+ " shl $3,%%edi\n\t" | |
+ " add %%edi,%%edx" | |
+ : "=&d"(res), "=&c"(d0), "=&D"(d1), "=&a"(d2) | |
+ : "1"(BITS_TO_LONGS(size)), "2"(addr), "b"((int)(long)addr) | |
+ : "memory"); | |
return res; | |
} | |
-unsigned int __find_next_zero_bit( | |
- const unsigned long *addr, unsigned int size, unsigned int offset) | |
+unsigned int __find_next_zero_bit(const unsigned long *addr, unsigned int size, | |
+ unsigned int offset) | |
{ | |
const unsigned long *p = addr + (offset / BITS_PER_LONG); | |
unsigned int set, bit = offset & (BITS_PER_LONG - 1); | |
diff --git a/xen/arch/x86/boot/cmdline.c b/xen/arch/x86/boot/cmdline.c | |
index fc11c6d3c5..432b467ed3 100644 | |
--- a/xen/arch/x86/boot/cmdline.c | |
+++ b/xen/arch/x86/boot/cmdline.c | |
@@ -23,19 +23,18 @@ | |
* - 0x4(%esp) = &cmdline, | |
* - 0x8(%esp) = &early_boot_opts. | |
*/ | |
-asm ( | |
- " .text \n" | |
+asm(" .text \n" | |
" .globl _start \n" | |
"_start: \n" | |
- " jmp cmdline_parse_early \n" | |
- ); | |
+ " jmp cmdline_parse_early \n"); | |
#include <xen/kconfig.h> | |
#include "defs.h" | |
#include "video.h" | |
/* Keep in sync with trampoline.S:early_boot_opts label! */ | |
-typedef struct __packed { | |
+typedef struct __packed | |
+{ | |
u8 skip_realmode; | |
u8 opt_edd; | |
u8 opt_edid; | |
@@ -55,7 +54,7 @@ typedef struct __packed { | |
*/ | |
static const char delim_chars_comma[] = ", \n\r\t"; | |
-#define delim_chars (delim_chars_comma + 1) | |
+#define delim_chars (delim_chars_comma + 1) | |
static size_t strlen(const char *s) | |
{ | |
@@ -129,14 +128,14 @@ static size_t strcspn(const char *s, const char *reject) | |
return count; | |
} | |
-static unsigned int __maybe_unused strtoui( | |
- const char *s, const char *stop, const char **next) | |
+static unsigned int __maybe_unused strtoui(const char *s, const char *stop, | |
+ const char **next) | |
{ | |
char base = 10, l; | |
unsigned long long res = 0; | |
if ( *s == '0' ) | |
- base = (tolower(*++s) == 'x') ? (++s, 16) : 8; | |
+ base = (tolower(*++s) == 'x') ? (++s, 16) : 8; | |
for ( ; *s != '\0'; ++s ) | |
{ | |
@@ -167,9 +166,9 @@ static unsigned int __maybe_unused strtoui( | |
} | |
} | |
- out: | |
+out: | |
if ( next ) | |
- *next = s; | |
+ *next = s; | |
return res; | |
} | |
@@ -190,7 +189,7 @@ static const char *find_opt(const char *cmdline, const char *opt, bool arg) | |
lo = strlen(opt); | |
- for ( ; ; ) | |
+ for ( ;; ) | |
{ | |
cmdline += strspn(cmdline, delim_chars); | |
@@ -211,7 +210,8 @@ static const char *find_opt(const char *cmdline, const char *opt, bool arg) | |
static bool skip_realmode(const char *cmdline) | |
{ | |
- return find_opt(cmdline, "no-real-mode", false) || find_opt(cmdline, "tboot=", true); | |
+ return find_opt(cmdline, "no-real-mode", false) || | |
+ find_opt(cmdline, "tboot=", true); | |
} | |
static u8 edd_parse(const char *cmdline) | |
@@ -247,7 +247,7 @@ static u8 edid_parse(const char *cmdline) | |
#ifdef CONFIG_VIDEO | |
static u16 rows2vmode(unsigned int rows) | |
{ | |
- switch ( rows ) | |
+ switch (rows) | |
{ | |
case 25: | |
return VIDEO_80x25; | |
diff --git a/xen/arch/x86/boot/defs.h b/xen/arch/x86/boot/defs.h | |
index 21d292cd73..8683cd8607 100644 | |
--- a/xen/arch/x86/boot/defs.h | |
+++ b/xen/arch/x86/boot/defs.h | |
@@ -22,30 +22,33 @@ | |
#include "../../../include/xen/stdbool.h" | |
-#define __maybe_unused __attribute__((__unused__)) | |
-#define __packed __attribute__((__packed__)) | |
-#define __stdcall __attribute__((__stdcall__)) | |
+#define __maybe_unused __attribute__((__unused__)) | |
+#define __packed __attribute__((__packed__)) | |
+#define __stdcall __attribute__((__stdcall__)) | |
-#define NULL ((void *)0) | |
+#define NULL ((void *)0) | |
-#define ALIGN_UP(arg, align) \ | |
- (((arg) + (align) - 1) & ~((typeof(arg))(align) - 1)) | |
+#define ALIGN_UP(arg, align) (((arg) + (align)-1) & ~((typeof(arg))(align)-1)) | |
-#define min(x,y) ({ \ | |
- const typeof(x) _x = (x); \ | |
- const typeof(y) _y = (y); \ | |
- (void) (&_x == &_y); \ | |
- _x < _y ? _x : _y; }) | |
+#define min(x, y) \ | |
+ ({ \ | |
+ const typeof(x) _x = (x); \ | |
+ const typeof(y) _y = (y); \ | |
+ (void)(&_x == &_y); \ | |
+ _x < _y ? _x : _y; \ | |
+ }) | |
-#define max(x,y) ({ \ | |
- const typeof(x) _x = (x); \ | |
- const typeof(y) _y = (y); \ | |
- (void) (&_x == &_y); \ | |
- _x > _y ? _x : _y; }) | |
+#define max(x, y) \ | |
+ ({ \ | |
+ const typeof(x) _x = (x); \ | |
+ const typeof(y) _y = (y); \ | |
+ (void)(&_x == &_y); \ | |
+ _x > _y ? _x : _y; \ | |
+ }) | |
-#define _p(val) ((void *)(unsigned long)(val)) | |
+#define _p(val) ((void *)(unsigned long)(val)) | |
-#define tolower(c) ((c) | 0x20) | |
+#define tolower(c) ((c) | 0x20) | |
typedef unsigned char u8; | |
typedef unsigned short u16; | |
@@ -56,7 +59,7 @@ typedef u8 uint8_t; | |
typedef u32 uint32_t; | |
typedef u64 uint64_t; | |
-#define U16_MAX ((u16)(~0U)) | |
-#define UINT_MAX (~0U) | |
+#define U16_MAX ((u16)(~0U)) | |
+#define UINT_MAX (~0U) | |
#endif /* __BOOT_DEFS_H__ */ | |
diff --git a/xen/arch/x86/boot/mkelf32.c b/xen/arch/x86/boot/mkelf32.c | |
index bcbde1a056..fa771985a9 100644 | |
--- a/xen/arch/x86/boot/mkelf32.c | |
+++ b/xen/arch/x86/boot/mkelf32.c | |
@@ -1,10 +1,10 @@ | |
/****************************************************************************** | |
* mkelf32.c | |
- * | |
+ * | |
* Usage: elf-prefix <in-image> <out-image> <load-base> | |
- * | |
+ * | |
* Converts an Elf64 executable binary <in-image> into a simple Elf32 | |
- * image <out-image> comprising a single chunk to be loaded at <load-base>. | |
+ * image <out-image> comprising a single chunk to be loaded at <load-base>. | |
*/ | |
#include <errno.h> | |
@@ -17,59 +17,61 @@ | |
#include <unistd.h> | |
#include <inttypes.h> | |
-#define u8 uint8_t | |
+#define u8 uint8_t | |
#define u16 uint16_t | |
#define u32 uint32_t | |
#define u64 uint64_t | |
-#define s8 int8_t | |
+#define s8 int8_t | |
#define s16 int16_t | |
#define s32 int32_t | |
#define s64 int64_t | |
#include "../../../include/xen/elfstructs.h" | |
-#define DYNAMICALLY_FILLED 0 | |
-#define RAW_OFFSET 128 | |
+#define DYNAMICALLY_FILLED 0 | |
+#define RAW_OFFSET 128 | |
static Elf32_Ehdr out_ehdr = { | |
- { ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3, /* EI_MAG{0-3} */ | |
- ELFCLASS32, /* EI_CLASS */ | |
- ELFDATA2LSB, /* EI_DATA */ | |
- EV_CURRENT, /* EI_VERSION */ | |
- 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* e_ident */ | |
- ET_EXEC, /* e_type */ | |
- EM_386, /* e_machine */ | |
- EV_CURRENT, /* e_version */ | |
- DYNAMICALLY_FILLED, /* e_entry */ | |
- sizeof(Elf32_Ehdr), /* e_phoff */ | |
- DYNAMICALLY_FILLED, /* e_shoff */ | |
- 0, /* e_flags */ | |
- sizeof(Elf32_Ehdr), /* e_ehsize */ | |
- sizeof(Elf32_Phdr), /* e_phentsize */ | |
- 1, /* modify based on num_phdrs */ /* e_phnum */ | |
- sizeof(Elf32_Shdr), /* e_shentsize */ | |
- 3, /* modify based on num_phdrs */ /* e_shnum */ | |
- 2 /* e_shstrndx */ | |
+ {ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3, /* EI_MAG{0-3} */ | |
+ ELFCLASS32, /* EI_CLASS */ | |
+ ELFDATA2LSB, /* EI_DATA */ | |
+ EV_CURRENT, /* EI_VERSION */ | |
+ 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* e_ident */ | |
+ ET_EXEC, /* e_type */ | |
+ EM_386, /* e_machine */ | |
+ EV_CURRENT, /* e_version */ | |
+ DYNAMICALLY_FILLED, /* e_entry */ | |
+ sizeof(Elf32_Ehdr), /* e_phoff */ | |
+ DYNAMICALLY_FILLED, /* e_shoff */ | |
+ 0, /* e_flags */ | |
+ sizeof(Elf32_Ehdr), /* e_ehsize */ | |
+ sizeof(Elf32_Phdr), /* e_phentsize */ | |
+ 1, | |
+ /* modify based on num_phdrs */ /* e_phnum */ | |
+ sizeof(Elf32_Shdr), /* e_shentsize */ | |
+ 3, | |
+ /* modify based on num_phdrs */ /* e_shnum */ | |
+ 2 /* e_shstrndx */ | |
}; | |
static Elf32_Phdr out_phdr = { | |
- PT_LOAD, /* p_type */ | |
- RAW_OFFSET, /* p_offset */ | |
- DYNAMICALLY_FILLED, /* p_vaddr */ | |
- DYNAMICALLY_FILLED, /* p_paddr */ | |
- DYNAMICALLY_FILLED, /* p_filesz */ | |
- DYNAMICALLY_FILLED, /* p_memsz */ | |
- PF_R|PF_W|PF_X, /* p_flags */ | |
- 64 /* p_align */ | |
+ PT_LOAD, /* p_type */ | |
+ RAW_OFFSET, /* p_offset */ | |
+ DYNAMICALLY_FILLED, /* p_vaddr */ | |
+ DYNAMICALLY_FILLED, /* p_paddr */ | |
+ DYNAMICALLY_FILLED, /* p_filesz */ | |
+ DYNAMICALLY_FILLED, /* p_memsz */ | |
+ PF_R | PF_W | PF_X, /* p_flags */ | |
+ 64 /* p_align */ | |
}; | |
static Elf32_Phdr note_phdr = { | |
- PT_NOTE, /* p_type */ | |
- DYNAMICALLY_FILLED, /* p_offset */ | |
- DYNAMICALLY_FILLED, /* p_vaddr */ | |
- DYNAMICALLY_FILLED, /* p_paddr */ | |
- DYNAMICALLY_FILLED, /* p_filesz */ | |
- DYNAMICALLY_FILLED, /* p_memsz */ | |
- PF_R, /* p_flags */ | |
- 4 /* p_align */ | |
+ PT_NOTE, /* p_type */ | |
+ DYNAMICALLY_FILLED, /* p_offset */ | |
+ DYNAMICALLY_FILLED, /* p_vaddr */ | |
+ DYNAMICALLY_FILLED, /* p_paddr */ | |
+ DYNAMICALLY_FILLED, /* p_filesz */ | |
+ DYNAMICALLY_FILLED, /* p_memsz */ | |
+ PF_R, /* p_flags */ | |
+ 4 /* p_align */ | |
}; | |
static u8 out_shstrtab[] = "\0.text\0.shstrtab"; | |
@@ -77,46 +79,47 @@ static u8 out_shstrtab[] = "\0.text\0.shstrtab"; | |
static u8 out_shstrtab_extra[] = ".note\0"; | |
static Elf32_Shdr out_shdr[] = { | |
- { 0 }, | |
- { 1, /* sh_name */ | |
- SHT_PROGBITS, /* sh_type */ | |
- SHF_WRITE|SHF_ALLOC|SHF_EXECINSTR, /* sh_flags */ | |
- DYNAMICALLY_FILLED, /* sh_addr */ | |
- RAW_OFFSET, /* sh_offset */ | |
- DYNAMICALLY_FILLED, /* sh_size */ | |
- 0, /* sh_link */ | |
- 0, /* sh_info */ | |
- 64, /* sh_addralign */ | |
- 0 /* sh_entsize */ | |
+ {0}, | |
+ { | |
+ 1, /* sh_name */ | |
+ SHT_PROGBITS, /* sh_type */ | |
+ SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR, /* sh_flags */ | |
+ DYNAMICALLY_FILLED, /* sh_addr */ | |
+ RAW_OFFSET, /* sh_offset */ | |
+ DYNAMICALLY_FILLED, /* sh_size */ | |
+ 0, /* sh_link */ | |
+ 0, /* sh_info */ | |
+ 64, /* sh_addralign */ | |
+ 0 /* sh_entsize */ | |
}, | |
- { 7, /* sh_name */ | |
- SHT_STRTAB, /* sh_type */ | |
- 0, /* sh_flags */ | |
- 0, /* sh_addr */ | |
- DYNAMICALLY_FILLED, /* sh_offset */ | |
- sizeof(out_shstrtab), /* sh_size */ | |
- 0, /* sh_link */ | |
- 0, /* sh_info */ | |
- 1, /* sh_addralign */ | |
- 0 /* sh_entsize */ | |
- } | |
-}; | |
+ { | |
+ 7, /* sh_name */ | |
+ SHT_STRTAB, /* sh_type */ | |
+ 0, /* sh_flags */ | |
+ 0, /* sh_addr */ | |
+ DYNAMICALLY_FILLED, /* sh_offset */ | |
+ sizeof(out_shstrtab), /* sh_size */ | |
+ 0, /* sh_link */ | |
+ 0, /* sh_info */ | |
+ 1, /* sh_addralign */ | |
+ 0 /* sh_entsize */ | |
+ }}; | |
/* | |
* The 17 points to the '.note' in the out_shstrtab and out_shstrtab_extra | |
* laid out in the file. | |
*/ | |
static Elf32_Shdr out_shdr_note = { | |
- 17, /* sh_name */ | |
- SHT_NOTE, /* sh_type */ | |
- 0, /* sh_flags */ | |
- DYNAMICALLY_FILLED, /* sh_addr */ | |
- DYNAMICALLY_FILLED, /* sh_offset */ | |
- DYNAMICALLY_FILLED, /* sh_size */ | |
- 0, /* sh_link */ | |
- 0, /* sh_info */ | |
- 4, /* sh_addralign */ | |
- 0 /* sh_entsize */ | |
+ 17, /* sh_name */ | |
+ SHT_NOTE, /* sh_type */ | |
+ 0, /* sh_flags */ | |
+ DYNAMICALLY_FILLED, /* sh_addr */ | |
+ DYNAMICALLY_FILLED, /* sh_offset */ | |
+ DYNAMICALLY_FILLED, /* sh_size */ | |
+ 0, /* sh_link */ | |
+ 0, /* sh_info */ | |
+ 4, /* sh_addralign */ | |
+ 0 /* sh_entsize */ | |
}; | |
/* Some system header files define these macros and pollute our namespace. */ | |
@@ -124,9 +127,11 @@ static Elf32_Shdr out_shdr_note = { | |
#undef swap32 | |
#undef swap64 | |
-#define swap16(_v) ((((u16)(_v)>>8)&0xff)|(((u16)(_v)&0xff)<<8)) | |
-#define swap32(_v) (((u32)swap16((u16)(_v))<<16)|(u32)swap16((u32)((_v)>>16))) | |
-#define swap64(_v) (((u64)swap32((u32)(_v))<<32)|(u64)swap32((u32)((_v)>>32))) | |
+#define swap16(_v) ((((u16)(_v) >> 8) & 0xff) | (((u16)(_v)&0xff) << 8)) | |
+#define swap32(_v) \ | |
+ (((u32)swap16((u16)(_v)) << 16) | (u32)swap16((u32)((_v) >> 16))) | |
+#define swap64(_v) \ | |
+ (((u64)swap32((u32)(_v)) << 32) | (u64)swap32((u32)((_v) >> 32))) | |
static int big_endian; | |
@@ -134,87 +139,87 @@ static void endianadjust_ehdr32(Elf32_Ehdr *eh) | |
{ | |
if ( !big_endian ) | |
return; | |
- eh->e_type = swap16(eh->e_type); | |
- eh->e_machine = swap16(eh->e_machine); | |
- eh->e_version = swap32(eh->e_version); | |
- eh->e_entry = swap32(eh->e_entry); | |
- eh->e_phoff = swap32(eh->e_phoff); | |
- eh->e_shoff = swap32(eh->e_shoff); | |
- eh->e_flags = swap32(eh->e_flags); | |
- eh->e_ehsize = swap16(eh->e_ehsize); | |
+ eh->e_type = swap16(eh->e_type); | |
+ eh->e_machine = swap16(eh->e_machine); | |
+ eh->e_version = swap32(eh->e_version); | |
+ eh->e_entry = swap32(eh->e_entry); | |
+ eh->e_phoff = swap32(eh->e_phoff); | |
+ eh->e_shoff = swap32(eh->e_shoff); | |
+ eh->e_flags = swap32(eh->e_flags); | |
+ eh->e_ehsize = swap16(eh->e_ehsize); | |
eh->e_phentsize = swap16(eh->e_phentsize); | |
- eh->e_phnum = swap16(eh->e_phnum); | |
+ eh->e_phnum = swap16(eh->e_phnum); | |
eh->e_shentsize = swap16(eh->e_shentsize); | |
- eh->e_shnum = swap16(eh->e_shnum); | |
- eh->e_shstrndx = swap16(eh->e_shstrndx); | |
+ eh->e_shnum = swap16(eh->e_shnum); | |
+ eh->e_shstrndx = swap16(eh->e_shstrndx); | |
} | |
static void endianadjust_ehdr64(Elf64_Ehdr *eh) | |
{ | |
if ( !big_endian ) | |
return; | |
- eh->e_type = swap16(eh->e_type); | |
- eh->e_machine = swap16(eh->e_machine); | |
- eh->e_version = swap32(eh->e_version); | |
- eh->e_entry = swap64(eh->e_entry); | |
- eh->e_phoff = swap64(eh->e_phoff); | |
- eh->e_shoff = swap64(eh->e_shoff); | |
- eh->e_flags = swap32(eh->e_flags); | |
- eh->e_ehsize = swap16(eh->e_ehsize); | |
+ eh->e_type = swap16(eh->e_type); | |
+ eh->e_machine = swap16(eh->e_machine); | |
+ eh->e_version = swap32(eh->e_version); | |
+ eh->e_entry = swap64(eh->e_entry); | |
+ eh->e_phoff = swap64(eh->e_phoff); | |
+ eh->e_shoff = swap64(eh->e_shoff); | |
+ eh->e_flags = swap32(eh->e_flags); | |
+ eh->e_ehsize = swap16(eh->e_ehsize); | |
eh->e_phentsize = swap16(eh->e_phentsize); | |
- eh->e_phnum = swap16(eh->e_phnum); | |
+ eh->e_phnum = swap16(eh->e_phnum); | |
eh->e_shentsize = swap16(eh->e_shentsize); | |
- eh->e_shnum = swap16(eh->e_shnum); | |
- eh->e_shstrndx = swap16(eh->e_shstrndx); | |
+ eh->e_shnum = swap16(eh->e_shnum); | |
+ eh->e_shstrndx = swap16(eh->e_shstrndx); | |
} | |
static void endianadjust_phdr32(Elf32_Phdr *ph) | |
{ | |
if ( !big_endian ) | |
return; | |
- ph->p_type = swap32(ph->p_type); | |
- ph->p_offset = swap32(ph->p_offset); | |
- ph->p_vaddr = swap32(ph->p_vaddr); | |
- ph->p_paddr = swap32(ph->p_paddr); | |
- ph->p_filesz = swap32(ph->p_filesz); | |
- ph->p_memsz = swap32(ph->p_memsz); | |
- ph->p_flags = swap32(ph->p_flags); | |
- ph->p_align = swap32(ph->p_align); | |
+ ph->p_type = swap32(ph->p_type); | |
+ ph->p_offset = swap32(ph->p_offset); | |
+ ph->p_vaddr = swap32(ph->p_vaddr); | |
+ ph->p_paddr = swap32(ph->p_paddr); | |
+ ph->p_filesz = swap32(ph->p_filesz); | |
+ ph->p_memsz = swap32(ph->p_memsz); | |
+ ph->p_flags = swap32(ph->p_flags); | |
+ ph->p_align = swap32(ph->p_align); | |
} | |
static void endianadjust_phdr64(Elf64_Phdr *ph) | |
{ | |
if ( !big_endian ) | |
return; | |
- ph->p_type = swap32(ph->p_type); | |
- ph->p_flags = swap32(ph->p_flags); | |
- ph->p_offset = swap64(ph->p_offset); | |
- ph->p_vaddr = swap64(ph->p_vaddr); | |
- ph->p_paddr = swap64(ph->p_paddr); | |
- ph->p_filesz = swap64(ph->p_filesz); | |
- ph->p_memsz = swap64(ph->p_memsz); | |
- ph->p_align = swap64(ph->p_align); | |
+ ph->p_type = swap32(ph->p_type); | |
+ ph->p_flags = swap32(ph->p_flags); | |
+ ph->p_offset = swap64(ph->p_offset); | |
+ ph->p_vaddr = swap64(ph->p_vaddr); | |
+ ph->p_paddr = swap64(ph->p_paddr); | |
+ ph->p_filesz = swap64(ph->p_filesz); | |
+ ph->p_memsz = swap64(ph->p_memsz); | |
+ ph->p_align = swap64(ph->p_align); | |
} | |
static void endianadjust_shdr32(Elf32_Shdr *sh) | |
{ | |
if ( !big_endian ) | |
return; | |
- sh->sh_name = swap32(sh->sh_name); | |
- sh->sh_type = swap32(sh->sh_type); | |
- sh->sh_flags = swap32(sh->sh_flags); | |
- sh->sh_addr = swap32(sh->sh_addr); | |
- sh->sh_offset = swap32(sh->sh_offset); | |
- sh->sh_size = swap32(sh->sh_size); | |
- sh->sh_link = swap32(sh->sh_link); | |
- sh->sh_info = swap32(sh->sh_info); | |
+ sh->sh_name = swap32(sh->sh_name); | |
+ sh->sh_type = swap32(sh->sh_type); | |
+ sh->sh_flags = swap32(sh->sh_flags); | |
+ sh->sh_addr = swap32(sh->sh_addr); | |
+ sh->sh_offset = swap32(sh->sh_offset); | |
+ sh->sh_size = swap32(sh->sh_size); | |
+ sh->sh_link = swap32(sh->sh_link); | |
+ sh->sh_info = swap32(sh->sh_info); | |
sh->sh_addralign = swap32(sh->sh_addralign); | |
- sh->sh_entsize = swap32(sh->sh_entsize); | |
+ sh->sh_entsize = swap32(sh->sh_entsize); | |
} | |
static void do_write(int fd, void *data, int len) | |
{ | |
- int done, left = len; | |
+ int done, left = len; | |
char *p = data; | |
while ( left != 0 ) | |
@@ -223,19 +228,19 @@ static void do_write(int fd, void *data, int len) | |
{ | |
if ( errno == EINTR ) | |
continue; | |
- fprintf(stderr, "Error writing output image: %d (%s).\n", | |
- errno, strerror(errno)); | |
+ fprintf(stderr, "Error writing output image: %d (%s).\n", errno, | |
+ strerror(errno)); | |
exit(1); | |
} | |
left -= done; | |
- p += done; | |
+ p += done; | |
} | |
} | |
static void do_read(int fd, void *data, int len) | |
{ | |
- int done, left = len; | |
+ int done, left = len; | |
char *p = data; | |
while ( left != 0 ) | |
@@ -244,25 +249,25 @@ static void do_read(int fd, void *data, int len) | |
{ | |
if ( errno == EINTR ) | |
continue; | |
- fprintf(stderr, "Error reading input image: %d (%s).\n", | |
- errno, strerror(errno)); | |
+ fprintf(stderr, "Error reading input image: %d (%s).\n", errno, | |
+ strerror(errno)); | |
exit(1); | |
} | |
left -= done; | |
- p += done; | |
+ p += done; | |
} | |
} | |
int main(int argc, char **argv) | |
{ | |
- u64 final_exec_addr; | |
- u32 loadbase, dat_siz, mem_siz, note_base, note_sz, offset; | |
- char *inimage, *outimage; | |
- int infd, outfd; | |
- char buffer[1024] = {}; | |
- int bytes, todo, i = 1; | |
- int num_phdrs = 1; | |
+ u64 final_exec_addr; | |
+ u32 loadbase, dat_siz, mem_siz, note_base, note_sz, offset; | |
+ char *inimage, *outimage; | |
+ int infd, outfd; | |
+ char buffer[1024] = {}; | |
+ int bytes, todo, i = 1; | |
+ int num_phdrs = 1; | |
Elf32_Ehdr in32_ehdr; | |
@@ -272,7 +277,7 @@ int main(int argc, char **argv) | |
if ( argc < 5 ) | |
{ | |
fprintf(stderr, "Usage: mkelf32 [--notes] <in-image> <out-image> " | |
- "<load-base> <final-exec-addr>\n"); | |
+ "<load-base> <final-exec-addr>\n"); | |
return 1; | |
} | |
@@ -281,7 +286,7 @@ int main(int argc, char **argv) | |
i = 2; | |
num_phdrs = 2; | |
} | |
- inimage = argv[i++]; | |
+ inimage = argv[i++]; | |
outimage = argv[i++]; | |
loadbase = strtoul(argv[i++], NULL, 16); | |
final_exec_addr = strtoull(argv[i++], NULL, 16); | |
@@ -289,14 +294,13 @@ int main(int argc, char **argv) | |
infd = open(inimage, O_RDONLY); | |
if ( infd == -1 ) | |
{ | |
- fprintf(stderr, "Failed to open input image '%s': %d (%s).\n", | |
- inimage, errno, strerror(errno)); | |
+ fprintf(stderr, "Failed to open input image '%s': |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment