diff -r 837ea1f0aa8a xen/arch/x86/setup.c --- a/xen/arch/x86/setup.c Wed Apr 16 13:43:23 2008 +0100 +++ b/xen/arch/x86/setup.c Fri Apr 18 14:12:29 2008 +0200 @@ -1021,7 +1021,7 @@ void __init __start_xen(unsigned long mb iommu_setup(); - amd_iommu_detect(); + amd_iov_detect(); /* * We're going to setup domain0 using the module(s) that we stashed safely diff -r 837ea1f0aa8a xen/drivers/passthrough/amd/iommu_acpi.c --- a/xen/drivers/passthrough/amd/iommu_acpi.c Wed Apr 16 13:43:23 2008 +0100 +++ b/xen/drivers/passthrough/amd/iommu_acpi.c Fri Apr 18 14:12:29 2008 +0200 @@ -139,7 +139,7 @@ static int __init register_exclusion_ran iommu = find_iommu_for_device(bus, devfn); if ( !iommu ) { - dprintk(XENLOG_ERR, "IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf); + amd_iov_error("IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf); return -ENODEV; } req = ivrs_mappings[bdf].dte_requestor_id; @@ -221,7 +221,7 @@ static int __init parse_ivmd_device_sele bdf = ivmd_block->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVMD Error: Invalid Dev_Id 0x%x\n", bdf); + amd_iov_error("IVMD Error: Invalid Dev_Id 0x%x\n", bdf); return -ENODEV; } @@ -238,21 +238,18 @@ static int __init parse_ivmd_device_rang first_bdf = ivmd_block->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVMD Error: " - "Invalid Range_First Dev_Id 0x%x\n", first_bdf); + amd_iov_error( + "IVMD Error: Invalid Range_First Dev_Id 0x%x\n", first_bdf); return -ENODEV; } last_bdf = ivmd_block->last_dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { - dprintk(XENLOG_ERR, "IVMD Error: " - "Invalid Range_Last Dev_Id 0x%x\n", last_bdf); - return -ENODEV; - } - - dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n", - first_bdf, last_bdf); + amd_iov_error( + "IVMD Error: Invalid Range_Last Dev_Id 0x%x\n", last_bdf); + return -ENODEV; + } for ( bdf = first_bdf, error = 0; (bdf <= last_bdf) && !error; bdf++ ) error = register_exclusion_range_for_device( @@ -272,8 +269,7 @@ static int __init parse_ivmd_device_iomm ivmd_block->cap_offset); if ( !iommu ) { - dprintk(XENLOG_ERR, - "IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", + amd_iov_error("IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", ivmd_block->header.dev_id, ivmd_block->cap_offset); return -ENODEV; } @@ -290,7 +286,7 @@ static int __init parse_ivmd_block(struc if ( ivmd_block->header.length < sizeof(struct acpi_ivmd_block_header) ) { - dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n"); + amd_iov_error("IVMD Error: Invalid Block Length!\n"); return -ENODEV; } @@ -299,10 +295,9 @@ static int __init parse_ivmd_block(struc base = start_addr & PAGE_MASK; limit = (start_addr + mem_length - 1) & PAGE_MASK; - dprintk(XENLOG_INFO, "IVMD Block: Type 0x%x\n", - ivmd_block->header.type); - dprintk(XENLOG_INFO, " Start_Addr_Phys 0x%lx\n", start_addr); - dprintk(XENLOG_INFO, " Mem_Length 0x%lx\n", mem_length); + amd_iov_info("IVMD Block: Type 0x%x\n",ivmd_block->header.type); + amd_iov_info(" Start_Addr_Phys 0x%lx\n", start_addr); + amd_iov_info(" Mem_Length 0x%lx\n", mem_length); if ( get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK, @@ -321,7 +316,7 @@ static int __init parse_ivmd_block(struc } else { - dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n"); + amd_iov_error("IVMD Error: Invalid Flag Field!\n"); return -ENODEV; } @@ -344,7 +339,7 @@ static int __init parse_ivmd_block(struc base, limit, iw, ir); default: - dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Type!\n"); + amd_iov_error("IVMD Error: Invalid Block Type!\n"); return -ENODEV; } } @@ -354,7 +349,7 @@ static u16 __init parse_ivhd_device_padd { if ( header_length < (block_length + pad_length) ) { - dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n"); + amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } @@ -369,8 +364,7 @@ static u16 __init parse_ivhd_device_sele bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device_Entry Dev_Id 0x%x\n", bdf); + amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } @@ -393,14 +387,14 @@ static u16 __init parse_ivhd_device_rang dev_length = sizeof(struct acpi_ivhd_device_range); if ( header_length < (block_length + dev_length) ) { - dprintk(XENLOG_ERR, "IVHD Error: Invalid Device_Entry Length!\n"); + amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { - dprintk(XENLOG_ERR, "IVHD Error: " + amd_iov_error("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->range.trailer.type); return 0; @@ -409,21 +403,20 @@ static u16 __init parse_ivhd_device_rang first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Range: First Dev_Id 0x%x\n", first_bdf); + amd_iov_error( + "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->range.trailer.dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Range: Last Dev_Id 0x%x\n", last_bdf); - return 0; - } - - dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n", - first_bdf, last_bdf); + amd_iov_error( + "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); + return 0; + } + + amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); /* override flags for range of devices */ sys_mgt = get_field_from_byte(ivhd_device->header.flags, @@ -444,28 +437,25 @@ static u16 __init parse_ivhd_device_alia dev_length = sizeof(struct acpi_ivhd_device_alias); if ( header_length < (block_length + dev_length) ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device_Entry Length!\n"); + amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device_Entry Dev_Id 0x%x\n", bdf); + amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } alias_id = ivhd_device->alias.dev_id; if ( alias_id >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Alias Dev_Id 0x%x\n", alias_id); - return 0; - } - - dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id); + amd_iov_error("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id); + return 0; + } + + amd_iov_info(" Dev_Id Alias: 0x%x\n", alias_id); /* override requestor_id and flags for device */ ivrs_mappings[bdf].dte_requestor_id = alias_id; @@ -490,15 +480,14 @@ static u16 __init parse_ivhd_device_alia dev_length = sizeof(struct acpi_ivhd_device_alias_range); if ( header_length < (block_length + dev_length) ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device_Entry Length!\n"); + amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->alias_range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { - dprintk(XENLOG_ERR, "IVHD Error: " + amd_iov_error("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->alias_range.trailer.type); return 0; @@ -507,30 +496,28 @@ static u16 __init parse_ivhd_device_alia first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR,"IVHD Error: " - "Invalid Range: First Dev_Id 0x%x\n", first_bdf); + amd_iov_error( + "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->alias_range.trailer.dev_id; if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Range: Last Dev_Id 0x%x\n", last_bdf); + amd_iov_error( + "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); return 0; } alias_id = ivhd_device->alias_range.alias.dev_id; if ( alias_id >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Alias Dev_Id 0x%x\n", alias_id); - return 0; - } - - dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n", - first_bdf, last_bdf); - dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id); + amd_iov_error("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id); + return 0; + } + + amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); + amd_iov_info(" Dev_Id Alias: 0x%x\n", alias_id); /* override requestor_id and flags for range of devices */ sys_mgt = get_field_from_byte(ivhd_device->header.flags, @@ -555,16 +542,14 @@ static u16 __init parse_ivhd_device_exte dev_length = sizeof(struct acpi_ivhd_device_extended); if ( header_length < (block_length + dev_length) ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device_Entry Length!\n"); + amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device_Entry Dev_Id 0x%x\n", bdf); + amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } @@ -587,15 +572,14 @@ static u16 __init parse_ivhd_device_exte dev_length = sizeof(struct acpi_ivhd_device_extended_range); if ( header_length < (block_length + dev_length) ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device_Entry Length!\n"); + amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->extended_range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { - dprintk(XENLOG_ERR, "IVHD Error: " + amd_iov_error("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->extended_range.trailer.type); return 0; @@ -604,20 +588,20 @@ static u16 __init parse_ivhd_device_exte first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Range: First Dev_Id 0x%x\n", first_bdf); + amd_iov_error( + "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->extended_range.trailer.dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Range: Last Dev_Id 0x%x\n", last_bdf); - return 0; - } - - dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n", + amd_iov_error( + "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); + return 0; + } + + amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); /* override flags for range of devices */ @@ -639,7 +623,7 @@ static int __init parse_ivhd_block(struc if ( ivhd_block->header.length < sizeof(struct acpi_ivhd_block_header) ) { - dprintk(XENLOG_ERR, "IVHD Error: Invalid Block Length!\n"); + amd_iov_error("IVHD Error: Invalid Block Length!\n"); return -ENODEV; } @@ -647,21 +631,16 @@ static int __init parse_ivhd_block(struc ivhd_block->cap_offset); if ( !iommu ) { - dprintk(XENLOG_ERR, - "IVHD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", + amd_iov_error("IVHD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", ivhd_block->header.dev_id, ivhd_block->cap_offset); return -ENODEV; } - dprintk(XENLOG_INFO, "IVHD Block:\n"); - dprintk(XENLOG_INFO, " Cap_Offset 0x%x\n", - ivhd_block->cap_offset); - dprintk(XENLOG_INFO, " MMIO_BAR_Phys 0x%lx\n", - (unsigned long)ivhd_block->mmio_base); - dprintk(XENLOG_INFO, " PCI_Segment 0x%x\n", - ivhd_block->pci_segment); - dprintk(XENLOG_INFO, " IOMMU_Info 0x%x\n", - ivhd_block->iommu_info); + amd_iov_info("IVHD Block:\n"); + amd_iov_info(" Cap_Offset 0x%x\n", ivhd_block->cap_offset); + amd_iov_info(" MMIO_BAR_Phys 0x%"PRIx64"\n",ivhd_block->mmio_base); + amd_iov_info( " PCI_Segment 0x%x\n", ivhd_block->pci_segment); + amd_iov_info( " IOMMU_Info 0x%x\n", ivhd_block->iommu_info); /* override IOMMU support flags */ iommu->coherent = get_field_from_byte(ivhd_block->header.flags, @@ -692,13 +671,10 @@ static int __init parse_ivhd_block(struc ivhd_device = (union acpi_ivhd_device *) ((u8 *)ivhd_block + block_length); - dprintk(XENLOG_INFO, "IVHD Device Entry:\n"); - dprintk(XENLOG_INFO, " Type 0x%x\n", - ivhd_device->header.type); - dprintk(XENLOG_INFO, " Dev_Id 0x%x\n", - ivhd_device->header.dev_id); - dprintk(XENLOG_INFO, " Flags 0x%x\n", - ivhd_device->header.flags); + amd_iov_info( "IVHD Device Entry:\n"); + amd_iov_info( " Type 0x%x\n", ivhd_device->header.type); + amd_iov_info( " Dev_Id 0x%x\n", ivhd_device->header.dev_id); + amd_iov_info( " Flags 0x%x\n", ivhd_device->header.flags); switch ( ivhd_device->header.type ) { @@ -741,8 +717,7 @@ static int __init parse_ivhd_block(struc ivhd_block->header.length, block_length); break; default: - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device Type!\n"); + amd_iov_error("IVHD Error: Invalid Device Type!\n"); dev_length = 0; break; } @@ -774,46 +749,49 @@ static int __init parse_ivrs_block(struc return parse_ivmd_block(ivmd_block); default: - dprintk(XENLOG_ERR, "IVRS Error: Invalid Block Type!\n"); + amd_iov_error("IVRS Error: Invalid Block Type!\n"); return -ENODEV; } return 0; } -void __init dump_acpi_table_header(struct acpi_table_header *table) -{ +static void __init dump_acpi_table_header(struct acpi_table_header *table) +{ +#ifdef AMD_IOV_DEBUG int i; - printk(XENLOG_INFO "AMD IOMMU: ACPI Table:\n"); - printk(XENLOG_INFO " Signature "); + amd_iov_info("ACPI Table:\n"); + amd_iov_info(" Signature "); for ( i = 0; i < ACPI_NAME_SIZE; i++ ) printk("%c", table->signature[i]); printk("\n"); - printk(" Length 0x%x\n", table->length); - printk(" Revision 0x%x\n", table->revision); - printk(" CheckSum 0x%x\n", table->checksum); - - printk(" OEM_Id "); + amd_iov_info(" Length 0x%x\n", table->length); + amd_iov_info(" Revision 0x%x\n", table->revision); + amd_iov_info(" CheckSum 0x%x\n", table->checksum); + + amd_iov_info(" OEM_Id "); for ( i = 0; i < ACPI_OEM_ID_SIZE; i++ ) printk("%c", table->oem_id[i]); printk("\n"); - printk(" OEM_Table_Id "); + amd_iov_info(" OEM_Table_Id "); for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; i++ ) printk("%c", table->oem_table_id[i]); printk("\n"); - printk(" OEM_Revision 0x%x\n", table->oem_revision); - - printk(" Creator_Id "); + amd_iov_info(" OEM_Revision 0x%x\n", table->oem_revision); + + amd_iov_info(" Creator_Id "); for ( i = 0; i < ACPI_NAME_SIZE; i++ ) printk("%c", table->asl_compiler_id[i]); printk("\n"); - printk(" Creator_Revision 0x%x\n", + amd_iov_info(" Creator_Revision 0x%x\n", table->asl_compiler_revision); +#endif + } int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size) @@ -827,9 +805,7 @@ int __init parse_ivrs_table(unsigned lon BUG_ON(!table); -#if 0 dump_acpi_table_header(table); -#endif /* validate checksum: sum of entire table == 0 */ checksum = 0; @@ -838,7 +814,7 @@ int __init parse_ivrs_table(unsigned lon checksum += raw_table[i]; if ( checksum ) { - dprintk(XENLOG_ERR, "IVRS Error: " + amd_iov_error("IVRS Error: " "Invalid Checksum 0x%x\n", checksum); return -ENODEV; } @@ -850,15 +826,15 @@ int __init parse_ivrs_table(unsigned lon ivrs_block = (struct acpi_ivrs_block_header *) ((u8 *)table + length); - dprintk(XENLOG_INFO, "IVRS Block:\n"); - dprintk(XENLOG_INFO, " Type 0x%x\n", ivrs_block->type); - dprintk(XENLOG_INFO, " Flags 0x%x\n", ivrs_block->flags); - dprintk(XENLOG_INFO, " Length 0x%x\n", ivrs_block->length); - dprintk(XENLOG_INFO, " Dev_Id 0x%x\n", ivrs_block->dev_id); + amd_iov_info("IVRS Block:\n"); + amd_iov_info(" Type 0x%x\n", ivrs_block->type); + amd_iov_info(" Flags 0x%x\n", ivrs_block->flags); + amd_iov_info(" Length 0x%x\n", ivrs_block->length); + amd_iov_info(" Dev_Id 0x%x\n", ivrs_block->dev_id); if ( table->length < (length + ivrs_block->length) ) { - dprintk(XENLOG_ERR, "IVRS Error: " + amd_iov_error("IVRS Error: " "Table Length Exceeded: 0x%x -> 0x%lx\n", table->length, (length + ivrs_block->length)); diff -r 837ea1f0aa8a xen/drivers/passthrough/amd/iommu_detect.c --- a/xen/drivers/passthrough/amd/iommu_detect.c Wed Apr 16 13:43:23 2008 +0100 +++ b/xen/drivers/passthrough/amd/iommu_detect.c Fri Apr 18 14:12:29 2008 +0200 @@ -117,7 +117,7 @@ static int __init get_iommu_msi_capabili if ( !iommu->msi_cap ) return -ENODEV; - dprintk(XENLOG_INFO, "AMD IOMMU: Found MSI capability block \n"); + amd_iov_info("Found MSI capability block \n"); control = pci_conf_read16(bus, dev, func, iommu->msi_cap + PCI_MSI_FLAGS); iommu->maskbit = control & PCI_MSI_FLAGS_MASKBIT; @@ -138,8 +138,7 @@ int __init get_iommu_capabilities(u8 bus if ( ((mmio_bar & 0x1) == 0) || (iommu->mmio_base_phys == 0) ) { - dprintk(XENLOG_ERR , - "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar); + amd_iov_error("Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar); return -ENODEV; } diff -r 837ea1f0aa8a xen/drivers/passthrough/amd/iommu_init.c --- a/xen/drivers/passthrough/amd/iommu_init.c Wed Apr 16 13:43:23 2008 +0100 +++ b/xen/drivers/passthrough/amd/iommu_init.c Fri Apr 18 14:12:29 2008 +0200 @@ -35,8 +35,7 @@ int __init map_iommu_mmio_region(struct if ( nr_amd_iommus > MAX_AMD_IOMMUS ) { - gdprintk(XENLOG_ERR, - "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus); + amd_iov_error("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus); return -ENOMEM; } @@ -395,7 +394,7 @@ static void parse_event_log_entry(u32 en if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST) || (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) ) { - dprintk(XENLOG_ERR, "Invalid event log entry!\n"); + amd_iov_error("Invalid event log entry!\n"); return; } @@ -408,8 +407,8 @@ static void parse_event_log_entry(u32 en IOMMU_EVENT_DOMAIN_ID_MASK, IOMMU_EVENT_DOMAIN_ID_SHIFT); addr= (u64*) (entry + 2); - dprintk(XENLOG_ERR, - "%s: domain = %d, device id = 0x%x, fault address = 0x%"PRIx64"\n", + printk(XENLOG_ERR "AMD_IOV: " + "%s: domain:%d, device id:0x%x, fault address:0x%"PRIx64"\n", event_str[code-1], domain_id, device_id, *addr); } } @@ -445,7 +444,7 @@ static int set_iommu_interrupt_handler(s if ( !vector ) { - gdprintk(XENLOG_ERR, "AMD IOMMU: no vectors\n"); + amd_iov_error("no vectors\n"); return 0; } @@ -453,7 +452,7 @@ static int set_iommu_interrupt_handler(s ret = request_irq(vector, amd_iommu_page_fault, 0, "dmar", iommu); if ( ret ) { - gdprintk(XENLOG_ERR, "AMD IOMMU: can't request irq\n"); + amd_iov_error("can't request irq\n"); return 0; } @@ -483,5 +482,5 @@ void __init enable_iommu(struct amd_iomm spin_unlock_irqrestore(&iommu->lock, flags); - printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus); -} + printk("AMD_IOV: IOMMU %d Enabled.\n", nr_amd_iommus); +} diff -r 837ea1f0aa8a xen/drivers/passthrough/amd/iommu_map.c --- a/xen/drivers/passthrough/amd/iommu_map.c Wed Apr 16 13:43:23 2008 +0100 +++ b/xen/drivers/passthrough/amd/iommu_map.c Fri Apr 18 14:12:29 2008 +0200 @@ -154,8 +154,7 @@ void flush_command_buffer(struct amd_iom } else { - dprintk(XENLOG_WARNING, "AMD IOMMU: Warning:" - " ComWaitInt bit did not assert!\n"); + amd_iov_warning("Warning: ComWaitInt bit did not assert!\n"); } } } @@ -402,10 +401,9 @@ int amd_iommu_map_page(struct domain *d, pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); if ( pte == NULL ) { - dprintk(XENLOG_ERR, - "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); + amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn); spin_unlock_irqrestore(&hd->mapping_lock, flags); - return -EIO; + return -EFAULT; } set_page_table_entry_present((u32 *)pte, maddr, iw, ir); @@ -439,10 +437,9 @@ int amd_iommu_unmap_page(struct domain * pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); if ( pte == NULL ) { - dprintk(XENLOG_ERR, - "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); + amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn); spin_unlock_irqrestore(&hd->mapping_lock, flags); - return -EIO; + return -EFAULT; } /* mark PTE as 'page not present' */ @@ -479,9 +476,8 @@ int amd_iommu_reserve_domain_unity_map( hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT); if ( pte == NULL ) { - dprintk(XENLOG_ERR, - "AMD IOMMU: Invalid IO pagetable entry " - "phys_addr = %lx\n", phys_addr); + amd_iov_error( + "Invalid IO pagetable entry phys_addr = %lx\n", phys_addr); spin_unlock_irqrestore(&hd->mapping_lock, flags); return -EFAULT; } @@ -528,8 +524,7 @@ int amd_iommu_sync_p2m(struct domain *d) pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); if ( pte == NULL ) { - dprintk(XENLOG_ERR, - "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); + amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn); spin_unlock_irqrestore(&hd->mapping_lock, flags); return -EFAULT; } diff -r 837ea1f0aa8a xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Wed Apr 16 13:43:23 2008 +0100 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Apr 18 14:12:29 2008 +0200 @@ -39,7 +39,7 @@ int amd_iommu_enabled = 0; int amd_iommu_enabled = 0; static int enable_amd_iommu = 0; -boolean_param("enable_amd_iommu", enable_amd_iommu); +boolean_param("enable_amd_iov", enable_amd_iommu); static void deallocate_domain_page_tables(struct hvm_iommu *hd) { @@ -104,7 +104,7 @@ static int __init allocate_iommu_table_s if ( !table->buffer ) { - dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name); + amd_iov_error("Error allocating %s\n", name); return -ENOMEM; } @@ -169,7 +169,7 @@ int iommu_detect_callback(u8 bus, u8 dev iommu = (struct amd_iommu *) xmalloc(struct amd_iommu); if ( !iommu ) { - dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n"); + amd_iov_error("Error allocating amd_iommu\n"); return -ENOMEM; } memset(iommu, 0, sizeof(struct amd_iommu)); @@ -237,7 +237,7 @@ static int __init amd_iommu_init(void) } if ( acpi_table_parse(ACPI_IVRS, parse_ivrs_table) != 0 ) - dprintk(XENLOG_INFO, "AMD IOMMU: Did not find IVRS table!\n"); + amd_iov_error("Did not find IVRS table!\n"); for_each_amd_iommu ( iommu ) { @@ -308,7 +308,7 @@ void amd_iommu_setup_domain_device( invalidate_dev_table_entry(iommu, req_id); flush_command_buffer(iommu); - dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, " + amd_iov_info("Enable DTE:0x%x, " "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n", req_id, root_ptr, hd->domain_id, hd->paging_mode); @@ -354,15 +354,18 @@ void __init amd_iommu_setup_dom0_devices } } -int amd_iommu_detect(void) +int amd_iov_detect(void) { unsigned long i; int last_bus; struct amd_iommu *iommu; + if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ) + return 0; + if ( !enable_amd_iommu ) { - printk("AMD IOMMU: Disabled\n"); + printk("AMD_IOV: Disabled.\n"); return 0; } @@ -370,13 +373,13 @@ int amd_iommu_detect(void) if ( scan_for_iommu(iommu_detect_callback) != 0 ) { - dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n"); + amd_iov_error("Error detection\n"); goto error_out; } if ( !iommu_found() ) { - printk("AMD IOMMU: Not found!\n"); + printk("AMD_IOV: IOMMU not found!\n"); return 0; } else @@ -394,8 +397,7 @@ int amd_iommu_detect(void) if ( !ivrs_mappings ) { - dprintk(XENLOG_ERR, "AMD IOMMU:" - " Error allocating IVRS DevMappings table\n"); + amd_iov_error("Error allocating IVRS DevMappings table\n"); goto error_out; } memset(ivrs_mappings, 0, @@ -404,7 +406,7 @@ int amd_iommu_detect(void) if ( amd_iommu_init() != 0 ) { - dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n"); + amd_iov_error("Error initialization\n"); goto error_out; } @@ -462,8 +464,6 @@ static int get_paging_mode(unsigned long return -ENOMEM; } - dprintk(XENLOG_INFO, "AMD IOMMU: paging mode = %d\n", level); - return level; } @@ -505,7 +505,7 @@ static void amd_iommu_disable_domain_dev memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE); invalidate_dev_table_entry(iommu, req_id); flush_command_buffer(iommu); - dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x," + amd_iov_info("Disable DTE:0x%x," " domain_id:%d, paging_mode:%d\n", req_id, domain_hvm_iommu(domain)->domain_id, domain_hvm_iommu(domain)->paging_mode); @@ -540,7 +540,7 @@ static int reassign_device( struct domai if ( !iommu ) { - gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu." + amd_iov_error("Fail to find iommu." " %x:%x.%x cannot be assigned to domain %d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id); return -ENODEV; @@ -555,8 +555,7 @@ static int reassign_device( struct domai spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); amd_iommu_setup_domain_device(target, iommu, bdf); - gdprintk(XENLOG_INFO , - "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n", + amd_iov_info("reassign %x:%x.%x domain %d -> domain %d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn), source->domain_id, target->domain_id); @@ -595,8 +594,7 @@ static void release_domain_devices(struc { pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list); pdev_flr(pdev->bus, pdev->devfn); - gdprintk(XENLOG_INFO , - "AMD IOMMU: release devices %x:%x.%x\n", + amd_iov_info("release domain %d devices %x:%x.%x\n", d->domain_id, pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); reassign_device(d, dom0, pdev->bus, pdev->devfn); } diff -r 837ea1f0aa8a xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Wed Apr 16 13:43:23 2008 +0100 +++ b/xen/include/asm-x86/amd-iommu.h Fri Apr 18 14:12:29 2008 +0200 @@ -31,7 +31,7 @@ extern int amd_iommu_enabled; extern int amd_iommu_enabled; extern struct list_head amd_iommu_head; -extern int __init amd_iommu_detect(void); +extern int __init amd_iov_detect(void); struct table_struct { void *buffer; diff -r 837ea1f0aa8a xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Apr 16 13:43:23 2008 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Fri Apr 18 14:12:29 2008 +0200 @@ -34,6 +34,19 @@ #define DMA_32BIT_MASK 0x00000000ffffffffULL #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) + +#ifdef AMD_IOV_DEBUG +#define amd_iov_info(fmt, args...) \ + printk(XENLOG_INFO "AMD_IOV: " fmt, ## args) +#define amd_iov_warning(fmt, args...) \ + printk(XENLOG_WARNING "AMD_IOV: " fmt, ## args) +#define amd_iov_error(fmt, args...) \ + printk(XENLOG_ERR "AMD_IOV: %s:%d: " fmt, __FILE__ , __LINE__ , ## args) +#else +#define amd_iov_info(fmt, args...) +#define amd_iov_warning(fmt, args...) +#define amd_iov_error(fmt, args...) +#endif typedef int (*iommu_detect_callback_ptr_t)( u8 bus, u8 dev, u8 func, u8 cap_ptr);