[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] AMD IOMMU: Fix up coding style issue in amd iommu files
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1205943384 0 # Node ID c927f758fcba590f9a4e443057b7f7b467a120bb # Parent 7d8892a90c9020f7ff8742edab33a6bf7afa7026 AMD IOMMU: Fix up coding style issue in amd iommu files Signed-off-by: Wei Wang <wei.wang2@xxxxxxx> Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/drivers/passthrough/amd/iommu_acpi.c | 297 +++++++++++++--------------- xen/drivers/passthrough/amd/iommu_detect.c | 166 +++++++-------- xen/drivers/passthrough/amd/iommu_init.c | 90 ++++---- xen/drivers/passthrough/amd/iommu_map.c | 30 +- xen/drivers/passthrough/amd/pci_amd_iommu.c | 78 +++---- xen/drivers/passthrough/iommu.c | 10 6 files changed, 338 insertions(+), 333 deletions(-) diff -r 7d8892a90c90 -r c927f758fcba xen/drivers/passthrough/amd/iommu_acpi.c --- a/xen/drivers/passthrough/amd/iommu_acpi.c Wed Mar 19 14:13:17 2008 +0000 +++ b/xen/drivers/passthrough/amd/iommu_acpi.c Wed Mar 19 16:16:24 2008 +0000 @@ -29,12 +29,12 @@ extern struct ivrs_mappings *ivrs_mappin extern struct ivrs_mappings *ivrs_mappings; static struct amd_iommu * __init find_iommu_from_bdf_cap( - u16 bdf, u8 cap_offset) + u16 bdf, u8 cap_offset) { struct amd_iommu *iommu; - for_each_amd_iommu( iommu ) - if ( iommu->bdf == bdf && iommu->cap_offset == cap_offset ) + for_each_amd_iommu ( iommu ) + if ( (iommu->bdf == bdf) && (iommu->cap_offset == cap_offset) ) return iommu; return NULL; @@ -57,15 +57,17 @@ static void __init reserve_iommu_exclusi iommu->exclusion_limit = limit; } -static void __init reserve_iommu_exclusion_range_all(struct amd_iommu *iommu, - unsigned long base, unsigned long limit) +static void __init reserve_iommu_exclusion_range_all( + struct amd_iommu *iommu, + unsigned long base, unsigned long limit) { reserve_iommu_exclusion_range(iommu, base, limit); iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED; } -static void __init reserve_unity_map_for_device(u16 bdf, unsigned long base, - unsigned long length, u8 iw, u8 ir) +static void __init reserve_unity_map_for_device( + u16 bdf, unsigned long base, + unsigned long length, u8 iw, u8 ir) { unsigned long old_top, new_top; @@ -80,7 +82,7 @@ static void __init reserve_unity_map_for if ( ivrs_mappings[bdf].addr_range_start < base ) base = ivrs_mappings[bdf].addr_range_start; length = new_top - base; - } + } /* extend r/w permissioms and keep aggregate */ if ( iw ) @@ -93,7 +95,7 @@ static void __init reserve_unity_map_for } static int __init register_exclusion_range_for_all_devices( - unsigned long base, unsigned long limit, u8 iw, u8 ir) + unsigned long base, unsigned long limit, u8 iw, u8 ir) { unsigned long range_top, iommu_top, length; struct amd_iommu *iommu; @@ -105,7 +107,7 @@ static int __init register_exclusion_ran iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { - if (range_top > iommu_top) + if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve r/w unity-mapped page entries for devices */ @@ -116,7 +118,7 @@ static int __init register_exclusion_ran base = iommu_top; } /* register IOMMU exclusion range settings */ - if (limit >= iommu_top) + if ( limit >= iommu_top ) { for_each_amd_iommu( iommu ) reserve_iommu_exclusion_range_all(iommu, base, limit); @@ -125,8 +127,8 @@ static int __init register_exclusion_ran return 0; } -static int __init register_exclusion_range_for_device(u16 bdf, - unsigned long base, unsigned long limit, u8 iw, u8 ir) +static int __init register_exclusion_range_for_device( + u16 bdf, unsigned long base, unsigned long limit, u8 iw, u8 ir) { unsigned long range_top, iommu_top, length; struct amd_iommu *iommu; @@ -147,7 +149,7 @@ static int __init register_exclusion_ran iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { - if (range_top > iommu_top) + if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve unity-mapped page entries for device */ @@ -159,8 +161,8 @@ static int __init register_exclusion_ran base = iommu_top; } - /* register IOMMU exclusion range settings for device */ - if ( limit >= iommu_top ) + /* register IOMMU exclusion range settings for device */ + if ( limit >= iommu_top ) { reserve_iommu_exclusion_range(iommu, base, limit); ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED; @@ -171,8 +173,8 @@ static int __init register_exclusion_ran } static int __init register_exclusion_range_for_iommu_devices( - struct amd_iommu *iommu, - unsigned long base, unsigned long limit, u8 iw, u8 ir) + struct amd_iommu *iommu, + unsigned long base, unsigned long limit, u8 iw, u8 ir) { unsigned long range_top, iommu_top, length; u16 bus, devfn, bdf, req; @@ -183,7 +185,7 @@ static int __init register_exclusion_ran iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { - if (range_top > iommu_top) + if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve r/w unity-mapped page entries for devices */ @@ -205,19 +207,19 @@ static int __init register_exclusion_ran } /* register IOMMU exclusion range settings */ - if (limit >= iommu_top) + if ( limit >= iommu_top ) reserve_iommu_exclusion_range_all(iommu, base, limit); return 0; } static int __init parse_ivmd_device_select( - struct acpi_ivmd_block_header *ivmd_block, - unsigned long base, unsigned long limit, u8 iw, u8 ir) + struct acpi_ivmd_block_header *ivmd_block, + unsigned long base, unsigned long limit, u8 iw, u8 ir) { u16 bdf; bdf = ivmd_block->header.dev_id; - if (bdf >= ivrs_bdf_entries) + if ( bdf >= ivrs_bdf_entries ) { dprintk(XENLOG_ERR, "IVMD Error: Invalid Dev_Id 0x%x\n", bdf); return -ENODEV; @@ -227,44 +229,41 @@ static int __init parse_ivmd_device_sele } static int __init parse_ivmd_device_range( - struct acpi_ivmd_block_header *ivmd_block, - unsigned long base, unsigned long limit, u8 iw, u8 ir) + struct acpi_ivmd_block_header *ivmd_block, + unsigned long base, unsigned long limit, u8 iw, u8 ir) { u16 first_bdf, last_bdf, bdf; int error; first_bdf = ivmd_block->header.dev_id; - if (first_bdf >= ivrs_bdf_entries) - { - dprintk(XENLOG_ERR, "IVMD Error: " - "Invalid Range_First Dev_Id 0x%x\n", first_bdf); - return -ENODEV; + if ( first_bdf >= ivrs_bdf_entries ) + { + dprintk(XENLOG_ERR, "IVMD Error: " + "Invalid Range_First Dev_Id 0x%x\n", first_bdf); + return -ENODEV; } last_bdf = ivmd_block->last_dev_id; - if (last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf) + if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { dprintk(XENLOG_ERR, "IVMD Error: " - "Invalid Range_Last Dev_Id 0x%x\n", last_bdf); - return -ENODEV; - } - - dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n", - first_bdf, last_bdf); - - for ( bdf = first_bdf, error = 0; - bdf <= last_bdf && !error; ++bdf ) - { - error = register_exclusion_range_for_device( - bdf, base, limit, iw, ir); - } - - return error; + "Invalid Range_Last Dev_Id 0x%x\n", last_bdf); + return -ENODEV; + } + + dprintk(XENLOG_ERR, " Dev_Id Range: 0x%x -> 0x%x\n", + first_bdf, last_bdf); + + for ( bdf = first_bdf, error = 0; (bdf <= last_bdf) && !error; bdf++ ) + error = register_exclusion_range_for_device( + bdf, base, limit, iw, ir); + + return error; } static int __init parse_ivmd_device_iommu( - struct acpi_ivmd_block_header *ivmd_block, - unsigned long base, unsigned long limit, u8 iw, u8 ir) + struct acpi_ivmd_block_header *ivmd_block, + unsigned long base, unsigned long limit, u8 iw, u8 ir) { struct amd_iommu *iommu; @@ -273,14 +272,14 @@ static int __init parse_ivmd_device_iomm ivmd_block->cap_offset); if ( !iommu ) { - dprintk(XENLOG_ERR, - "IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", - ivmd_block->header.dev_id, ivmd_block->cap_offset); - return -ENODEV; + dprintk(XENLOG_ERR, + "IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", + ivmd_block->header.dev_id, ivmd_block->cap_offset); + return -ENODEV; } return register_exclusion_range_for_iommu_devices( - iommu, base, limit, iw, ir); + iommu, base, limit, iw, ir); } static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block) @@ -288,11 +287,11 @@ static int __init parse_ivmd_block(struc unsigned long start_addr, mem_length, base, limit; u8 iw, ir; - if (ivmd_block->header.length < - sizeof(struct acpi_ivmd_block_header)) - { - dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n"); - return -ENODEV; + if ( ivmd_block->header.length < + sizeof(struct acpi_ivmd_block_header) ) + { + dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Length!\n"); + return -ENODEV; } start_addr = (unsigned long)ivmd_block->start_addr; @@ -301,7 +300,7 @@ static int __init parse_ivmd_block(struc limit = (start_addr + mem_length - 1) & PAGE_MASK; dprintk(XENLOG_INFO, "IVMD Block: Type 0x%x\n", - ivmd_block->header.type); + ivmd_block->header.type); dprintk(XENLOG_INFO, " Start_Addr_Phys 0x%lx\n", start_addr); dprintk(XENLOG_INFO, " Mem_Length 0x%lx\n", mem_length); @@ -322,27 +321,27 @@ static int __init parse_ivmd_block(struc } else { - dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n"); - return -ENODEV; + dprintk(KERN_ERR, "IVMD Error: Invalid Flag Field!\n"); + return -ENODEV; } switch( ivmd_block->header.type ) { case AMD_IOMMU_ACPI_IVMD_ALL_TYPE: return register_exclusion_range_for_all_devices( - base, limit, iw, ir); + base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_ONE_TYPE: return parse_ivmd_device_select(ivmd_block, - base, limit, iw, ir); + base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE: return parse_ivmd_device_range(ivmd_block, - base, limit, iw, ir); + base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE: return parse_ivmd_device_iommu(ivmd_block, - base, limit, iw, ir); + base, limit, iw, ir); default: dprintk(XENLOG_ERR, "IVMD Error: Invalid Block Type!\n"); @@ -350,8 +349,8 @@ static int __init parse_ivmd_block(struc } } -static u16 __init parse_ivhd_device_padding(u16 pad_length, - u16 header_length, u16 block_length) +static u16 __init parse_ivhd_device_padding( + u16 pad_length, u16 header_length, u16 block_length) { if ( header_length < (block_length + pad_length) ) { @@ -363,7 +362,7 @@ static u16 __init parse_ivhd_device_padd } static u16 __init parse_ivhd_device_select( - union acpi_ivhd_device *ivhd_device) + union acpi_ivhd_device *ivhd_device) { u16 bdf; @@ -385,8 +384,8 @@ static u16 __init parse_ivhd_device_sele } static u16 __init parse_ivhd_device_range( - union acpi_ivhd_device *ivhd_device, - u16 header_length, u16 block_length) + union acpi_ivhd_device *ivhd_device, + u16 header_length, u16 block_length) { u16 dev_length, first_bdf, last_bdf, bdf; u8 sys_mgt; @@ -399,7 +398,8 @@ static u16 __init parse_ivhd_device_rang } if ( ivhd_device->range.trailer.type != - AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END) { + AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) + { dprintk(XENLOG_ERR, "IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->range.trailer.type); @@ -409,35 +409,35 @@ static u16 __init parse_ivhd_device_rang first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Range: First Dev_Id 0x%x\n", first_bdf); - return 0; + dprintk(XENLOG_ERR, "IVHD Error: " + "Invalid Range: First Dev_Id 0x%x\n", first_bdf); + return 0; } last_bdf = ivhd_device->range.trailer.dev_id; - if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf ) - { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Range: Last Dev_Id 0x%x\n", last_bdf); - return 0; + if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) + { + dprintk(XENLOG_ERR, "IVHD Error: " + "Invalid Range: Last Dev_Id 0x%x\n", last_bdf); + return 0; } dprintk(XENLOG_INFO, " Dev_Id Range: 0x%x -> 0x%x\n", - first_bdf, last_bdf); + first_bdf, last_bdf); /* override flags for range of devices */ sys_mgt = get_field_from_byte(ivhd_device->header.flags, - AMD_IOMMU_ACPI_SYS_MGT_MASK, - AMD_IOMMU_ACPI_SYS_MGT_SHIFT); - for ( bdf = first_bdf; bdf <= last_bdf; ++bdf ) + AMD_IOMMU_ACPI_SYS_MGT_MASK, + AMD_IOMMU_ACPI_SYS_MGT_SHIFT); + for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt; return dev_length; } static u16 __init parse_ivhd_device_alias( - union acpi_ivhd_device *ivhd_device, - u16 header_length, u16 block_length) + union acpi_ivhd_device *ivhd_device, + u16 header_length, u16 block_length) { u16 dev_length, alias_id, bdf; @@ -445,7 +445,7 @@ static u16 __init parse_ivhd_device_alia if ( header_length < (block_length + dev_length) ) { dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device_Entry Length!\n"); + "Invalid Device_Entry Length!\n"); return 0; } @@ -460,9 +460,9 @@ static u16 __init parse_ivhd_device_alia alias_id = ivhd_device->alias.dev_id; if ( alias_id >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Alias Dev_Id 0x%x\n", alias_id); - return 0; + dprintk(XENLOG_ERR, "IVHD Error: " + "Invalid Alias Dev_Id 0x%x\n", alias_id); + return 0; } dprintk(XENLOG_INFO, " Dev_Id Alias: 0x%x\n", alias_id); @@ -470,18 +470,18 @@ static u16 __init parse_ivhd_device_alia /* override requestor_id and flags for device */ ivrs_mappings[bdf].dte_requestor_id = alias_id; ivrs_mappings[bdf].dte_sys_mgt_enable = - get_field_from_byte(ivhd_device->header.flags, - AMD_IOMMU_ACPI_SYS_MGT_MASK, - AMD_IOMMU_ACPI_SYS_MGT_SHIFT); + get_field_from_byte(ivhd_device->header.flags, + AMD_IOMMU_ACPI_SYS_MGT_MASK, + AMD_IOMMU_ACPI_SYS_MGT_SHIFT); ivrs_mappings[alias_id].dte_sys_mgt_enable = - ivrs_mappings[bdf].dte_sys_mgt_enable; + ivrs_mappings[bdf].dte_sys_mgt_enable; return dev_length; } static u16 __init parse_ivhd_device_alias_range( - union acpi_ivhd_device *ivhd_device, - u16 header_length, u16 block_length) + union acpi_ivhd_device *ivhd_device, + u16 header_length, u16 block_length) { u16 dev_length, first_bdf, last_bdf, alias_id, bdf; @@ -496,7 +496,7 @@ static u16 __init parse_ivhd_device_alia } if ( ivhd_device->alias_range.trailer.type != - AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) + AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { dprintk(XENLOG_ERR, "IVHD Error: " "Invalid Range: End_Type 0x%x\n", @@ -536,7 +536,7 @@ static u16 __init parse_ivhd_device_alia sys_mgt = get_field_from_byte(ivhd_device->header.flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); - for ( bdf = first_bdf; bdf <= last_bdf; ++bdf ) + for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) { ivrs_mappings[bdf].dte_requestor_id = alias_id; ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt; @@ -547,8 +547,8 @@ static u16 __init parse_ivhd_device_alia } static u16 __init parse_ivhd_device_extended( - union acpi_ivhd_device *ivhd_device, - u16 header_length, u16 block_length) + union acpi_ivhd_device *ivhd_device, + u16 header_length, u16 block_length) { u16 dev_length, bdf; @@ -578,8 +578,8 @@ static u16 __init parse_ivhd_device_exte } static u16 __init parse_ivhd_device_extended_range( - union acpi_ivhd_device *ivhd_device, - u16 header_length, u16 block_length) + union acpi_ivhd_device *ivhd_device, + u16 header_length, u16 block_length) { u16 dev_length, first_bdf, last_bdf, bdf; u8 sys_mgt; @@ -593,7 +593,7 @@ static u16 __init parse_ivhd_device_exte } if ( ivhd_device->extended_range.trailer.type != - AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) + AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { dprintk(XENLOG_ERR, "IVHD Error: " "Invalid Range: End_Type 0x%x\n", @@ -604,13 +604,13 @@ static u16 __init parse_ivhd_device_exte first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { - dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Range: First Dev_Id 0x%x\n", first_bdf); - return 0; + dprintk(XENLOG_ERR, "IVHD Error: " + "Invalid Range: First Dev_Id 0x%x\n", first_bdf); + return 0; } last_bdf = ivhd_device->extended_range.trailer.dev_id; - if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf ) + if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { dprintk(XENLOG_ERR, "IVHD Error: " "Invalid Range: Last Dev_Id 0x%x\n", last_bdf); @@ -624,7 +624,7 @@ static u16 __init parse_ivhd_device_exte sys_mgt = get_field_from_byte(ivhd_device->header.flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); - for ( bdf = first_bdf; bdf <= last_bdf; ++bdf ) + for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt; return dev_length; @@ -637,20 +637,20 @@ static int __init parse_ivhd_block(struc struct amd_iommu *iommu; if ( ivhd_block->header.length < - sizeof(struct acpi_ivhd_block_header) ) + sizeof(struct acpi_ivhd_block_header) ) { dprintk(XENLOG_ERR, "IVHD Error: Invalid Block Length!\n"); return -ENODEV; } iommu = find_iommu_from_bdf_cap(ivhd_block->header.dev_id, - ivhd_block->cap_offset); + ivhd_block->cap_offset); if ( !iommu ) { dprintk(XENLOG_ERR, "IVHD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", ivhd_block->header.dev_id, ivhd_block->cap_offset); - return -ENODEV; + return -ENODEV; } dprintk(XENLOG_INFO, "IVHD Block:\n"); @@ -668,29 +668,29 @@ static int __init parse_ivhd_block(struc AMD_IOMMU_ACPI_COHERENT_MASK, AMD_IOMMU_ACPI_COHERENT_SHIFT); iommu->iotlb_support = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_IOTLB_SUP_MASK, - AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT); + AMD_IOMMU_ACPI_IOTLB_SUP_MASK, + AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT); iommu->isochronous = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_ISOC_MASK, - AMD_IOMMU_ACPI_ISOC_SHIFT); + AMD_IOMMU_ACPI_ISOC_MASK, + AMD_IOMMU_ACPI_ISOC_SHIFT); iommu->res_pass_pw = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_RES_PASS_PW_MASK, - AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT); + AMD_IOMMU_ACPI_RES_PASS_PW_MASK, + AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT); iommu->pass_pw = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_PASS_PW_MASK, - AMD_IOMMU_ACPI_PASS_PW_SHIFT); + AMD_IOMMU_ACPI_PASS_PW_MASK, + AMD_IOMMU_ACPI_PASS_PW_SHIFT); iommu->ht_tunnel_enable = get_field_from_byte( - ivhd_block->header.flags, - AMD_IOMMU_ACPI_HT_TUN_ENB_MASK, - AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT); + ivhd_block->header.flags, + AMD_IOMMU_ACPI_HT_TUN_ENB_MASK, + AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT); /* parse Device Entries */ block_length = sizeof(struct acpi_ivhd_block_header); - while( ivhd_block->header.length >= - (block_length + sizeof(struct acpi_ivhd_device_header)) ) + while ( ivhd_block->header.length >= + (block_length + sizeof(struct acpi_ivhd_device_header)) ) { ivhd_device = (union acpi_ivhd_device *) - ((u8 *)ivhd_block + block_length); + ((u8 *)ivhd_block + block_length); dprintk(XENLOG_INFO, "IVHD Device Entry:\n"); dprintk(XENLOG_INFO, " Type 0x%x\n", @@ -700,7 +700,7 @@ static int __init parse_ivhd_block(struc dprintk(XENLOG_INFO, " Flags 0x%x\n", ivhd_device->header.flags); - switch( ivhd_device->header.type ) + switch ( ivhd_device->header.type ) { case AMD_IOMMU_ACPI_IVHD_DEV_U32_PAD: dev_length = parse_ivhd_device_padding( @@ -716,7 +716,8 @@ static int __init parse_ivhd_block(struc dev_length = parse_ivhd_device_select(ivhd_device); break; case AMD_IOMMU_ACPI_IVHD_DEV_RANGE_START: - dev_length = parse_ivhd_device_range(ivhd_device, + dev_length = parse_ivhd_device_range( + ivhd_device, ivhd_block->header.length, block_length); break; case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_SELECT: @@ -741,7 +742,7 @@ static int __init parse_ivhd_block(struc break; default: dprintk(XENLOG_ERR, "IVHD Error: " - "Invalid Device Type!\n"); + "Invalid Device Type!\n"); dev_length = 0; break; } @@ -759,7 +760,7 @@ static int __init parse_ivrs_block(struc struct acpi_ivhd_block_header *ivhd_block; struct acpi_ivmd_block_header *ivmd_block; - switch(ivrs_block->type) + switch ( ivrs_block->type ) { case AMD_IOMMU_ACPI_IVHD_TYPE: ivhd_block = (struct acpi_ivhd_block_header *)ivrs_block; @@ -786,7 +787,7 @@ void __init dump_acpi_table_header(struc printk(XENLOG_INFO "AMD IOMMU: ACPI Table:\n"); printk(XENLOG_INFO " Signature "); - for ( i = 0; i < ACPI_NAME_SIZE; ++i ) + for ( i = 0; i < ACPI_NAME_SIZE; i++ ) printk("%c", table->signature[i]); printk("\n"); @@ -795,28 +796,27 @@ void __init dump_acpi_table_header(struc printk(" CheckSum 0x%x\n", table->checksum); printk(" OEM_Id "); - for ( i = 0; i < ACPI_OEM_ID_SIZE; ++i ) + for ( i = 0; i < ACPI_OEM_ID_SIZE; i++ ) printk("%c", table->oem_id[i]); printk("\n"); printk(" OEM_Table_Id "); - for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; ++i ) + for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; i++ ) printk("%c", table->oem_table_id[i]); printk("\n"); printk(" OEM_Revision 0x%x\n", table->oem_revision); printk(" Creator_Id "); - for ( i = 0; i < ACPI_NAME_SIZE; ++i ) + for ( i = 0; i < ACPI_NAME_SIZE; i++ ) printk("%c", table->asl_compiler_id[i]); printk("\n"); printk(" Creator_Revision 0x%x\n", - table->asl_compiler_revision); -} - -int __init parse_ivrs_table(unsigned long phys_addr, - unsigned long size) + table->asl_compiler_revision); +} + +int __init parse_ivrs_table(unsigned long phys_addr, unsigned long size) { struct acpi_ivrs_block_header *ivrs_block; unsigned long length, i; @@ -834,7 +834,7 @@ int __init parse_ivrs_table(unsigned lon /* validate checksum: sum of entire table == 0 */ checksum = 0; raw_table = (u8 *)table; - for ( i = 0; i < table->length; ++i ) + for ( i = 0; i < table->length; i++ ) checksum += raw_table[i]; if ( checksum ) { @@ -845,11 +845,10 @@ int __init parse_ivrs_table(unsigned lon /* parse IVRS blocks */ length = sizeof(struct acpi_ivrs_table_header); - while( error == 0 && table->length > - (length + sizeof(struct acpi_ivrs_block_header)) ) + while ( (error == 0) && (table->length > (length + sizeof(*ivrs_block))) ) { ivrs_block = (struct acpi_ivrs_block_header *) - ((u8 *)table + length); + ((u8 *)table + length); dprintk(XENLOG_INFO, "IVRS Block:\n"); dprintk(XENLOG_INFO, " Type 0x%x\n", ivrs_block->type); @@ -857,16 +856,16 @@ int __init parse_ivrs_table(unsigned lon dprintk(XENLOG_INFO, " Length 0x%x\n", ivrs_block->length); dprintk(XENLOG_INFO, " Dev_Id 0x%x\n", ivrs_block->dev_id); - if (table->length >= (length + ivrs_block->length)) - error = parse_ivrs_block(ivrs_block); - else + if ( table->length < (length + ivrs_block->length) ) { - dprintk(XENLOG_ERR, "IVRS Error: " - "Table Length Exceeded: 0x%x -> 0x%lx\n", - table->length, - (length + ivrs_block->length)); - return -ENODEV; + dprintk(XENLOG_ERR, "IVRS Error: " + "Table Length Exceeded: 0x%x -> 0x%lx\n", + table->length, + (length + ivrs_block->length)); + return -ENODEV; } + + error = parse_ivrs_block(ivrs_block); length += ivrs_block->length; } diff -r 7d8892a90c90 -r c927f758fcba xen/drivers/passthrough/amd/iommu_detect.c --- a/xen/drivers/passthrough/amd/iommu_detect.c Wed Mar 19 14:13:17 2008 +0000 +++ b/xen/drivers/passthrough/amd/iommu_detect.c Wed Mar 19 16:16:24 2008 +0000 @@ -26,8 +26,8 @@ #include "../pci-direct.h" #include "../pci_regs.h" -static int __init valid_bridge_bus_config(int bus, int dev, int func, - int *sec_bus, int *sub_bus) +static int __init valid_bridge_bus_config( + int bus, int dev, int func, int *sec_bus, int *sub_bus) { int pri_bus; @@ -35,7 +35,7 @@ static int __init valid_bridge_bus_confi *sec_bus = read_pci_config_byte(bus, dev, func, PCI_SECONDARY_BUS); *sub_bus = read_pci_config_byte(bus, dev, func, PCI_SUBORDINATE_BUS); - return ( pri_bus == bus && *sec_bus > bus && *sub_bus >= *sec_bus ); + return ((pri_bus == bus) && (*sec_bus > bus) && (*sub_bus >= *sec_bus)); } int __init get_iommu_last_downstream_bus(struct amd_iommu *iommu) @@ -49,9 +49,11 @@ int __init get_iommu_last_downstream_bus iommu->downstream_bus_present[bus] = 1; dev = PCI_SLOT(iommu->first_devfn); multi_func = PCI_FUNC(iommu->first_devfn) > 0; - for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) { + for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; devfn++ ) + { /* skipping to next device#? */ - if ( dev != PCI_SLOT(devfn) ) { + if ( dev != PCI_SLOT(devfn) ) + { dev = PCI_SLOT(devfn); multi_func = 0; } @@ -62,14 +64,15 @@ int __init get_iommu_last_downstream_bus continue; hdr_type = read_pci_config_byte(bus, dev, func, - PCI_HEADER_TYPE); + PCI_HEADER_TYPE); if ( func == 0 ) multi_func = IS_PCI_MULTI_FUNCTION(hdr_type); if ( (func == 0 || multi_func) && - IS_PCI_TYPE1_HEADER(hdr_type) ) { - if (!valid_bridge_bus_config(bus, dev, func, - &sec_bus, &sub_bus)) + IS_PCI_TYPE1_HEADER(hdr_type) ) + { + if ( !valid_bridge_bus_config(bus, dev, func, + &sec_bus, &sub_bus) ) return -ENODEV; if ( sub_bus > iommu->last_downstream_bus ) @@ -84,18 +87,18 @@ int __init get_iommu_last_downstream_bus } int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr, - struct amd_iommu *iommu) + struct amd_iommu *iommu) { u32 cap_header, cap_range, misc_info; u64 mmio_bar; - mmio_bar = (u64)read_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32; + mmio_bar = (u64)read_pci_config( + bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32; mmio_bar |= read_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET); + cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET); iommu->mmio_base_phys = mmio_bar & (u64)~0x3FFF; - if ( (mmio_bar & 0x1) == 0 || iommu->mmio_base_phys == 0 ) + if ( ((mmio_bar & 0x1) == 0) || (iommu->mmio_base_phys == 0) ) { dprintk(XENLOG_ERR , "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar); @@ -106,42 +109,37 @@ int __init get_iommu_capabilities(u8 bus iommu->cap_offset = cap_ptr; cap_header = read_pci_config(bus, dev, func, cap_ptr); - iommu->revision = get_field_from_reg_u32(cap_header, - PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT); - iommu->iotlb_support = get_field_from_reg_u32(cap_header, - PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT); - iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header, - PCI_CAP_HT_TUNNEL_MASK, - PCI_CAP_HT_TUNNEL_SHIFT); - iommu->pte_not_present_cached = get_field_from_reg_u32(cap_header, - PCI_CAP_NP_CACHE_MASK, - PCI_CAP_NP_CACHE_SHIFT); + iommu->revision = get_field_from_reg_u32( + cap_header, PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT); + iommu->iotlb_support = get_field_from_reg_u32( + cap_header, PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT); + iommu->ht_tunnel_support = get_field_from_reg_u32( + cap_header, PCI_CAP_HT_TUNNEL_MASK, PCI_CAP_HT_TUNNEL_SHIFT); + iommu->pte_not_present_cached = get_field_from_reg_u32( + cap_header, PCI_CAP_NP_CACHE_MASK, PCI_CAP_NP_CACHE_SHIFT); cap_range = read_pci_config(bus, dev, func, - cap_ptr + PCI_CAP_RANGE_OFFSET); - iommu->unit_id = get_field_from_reg_u32(cap_range, - PCI_CAP_UNIT_ID_MASK, - PCI_CAP_UNIT_ID_SHIFT); - iommu->root_bus = get_field_from_reg_u32(cap_range, - PCI_CAP_BUS_NUMBER_MASK, - PCI_CAP_BUS_NUMBER_SHIFT); - iommu->first_devfn = get_field_from_reg_u32(cap_range, - PCI_CAP_FIRST_DEVICE_MASK, - PCI_CAP_FIRST_DEVICE_SHIFT); - iommu->last_devfn = get_field_from_reg_u32(cap_range, - PCI_CAP_LAST_DEVICE_MASK, - PCI_CAP_LAST_DEVICE_SHIFT); + cap_ptr + PCI_CAP_RANGE_OFFSET); + iommu->unit_id = get_field_from_reg_u32( + cap_range, PCI_CAP_UNIT_ID_MASK, PCI_CAP_UNIT_ID_SHIFT); + iommu->root_bus = get_field_from_reg_u32( + cap_range, PCI_CAP_BUS_NUMBER_MASK, PCI_CAP_BUS_NUMBER_SHIFT); + iommu->first_devfn = get_field_from_reg_u32( + cap_range, PCI_CAP_FIRST_DEVICE_MASK, PCI_CAP_FIRST_DEVICE_SHIFT); + iommu->last_devfn = get_field_from_reg_u32( + cap_range, PCI_CAP_LAST_DEVICE_MASK, PCI_CAP_LAST_DEVICE_SHIFT); misc_info = read_pci_config(bus, dev, func, - cap_ptr + PCI_MISC_INFO_OFFSET); - iommu->msi_number = get_field_from_reg_u32(misc_info, - PCI_CAP_MSI_NUMBER_MASK, - PCI_CAP_MSI_NUMBER_SHIFT); + cap_ptr + PCI_MISC_INFO_OFFSET); + iommu->msi_number = get_field_from_reg_u32( + misc_info, PCI_CAP_MSI_NUMBER_MASK, PCI_CAP_MSI_NUMBER_SHIFT); + return 0; } -static int __init scan_caps_for_iommu(int bus, int dev, int func, - iommu_detect_callback_ptr_t iommu_detect_callback) +static int __init scan_caps_for_iommu( + int bus, int dev, int func, + iommu_detect_callback_ptr_t iommu_detect_callback) { int cap_ptr, cap_id, cap_type; u32 cap_header; @@ -149,32 +147,35 @@ static int __init scan_caps_for_iommu(in count = 0; cap_ptr = read_pci_config_byte(bus, dev, func, - PCI_CAPABILITY_LIST); - while ( cap_ptr >= PCI_MIN_CAP_OFFSET && - count < PCI_MAX_CAP_BLOCKS && !error ) { + PCI_CAPABILITY_LIST); + while ( (cap_ptr >= PCI_MIN_CAP_OFFSET) && + (count < PCI_MAX_CAP_BLOCKS) && + !error ) + { cap_ptr &= PCI_CAP_PTR_MASK; cap_header = read_pci_config(bus, dev, func, cap_ptr); - cap_id = get_field_from_reg_u32(cap_header, - PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT); - - if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) { - cap_type = get_field_from_reg_u32(cap_header, - PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT); - if ( cap_type == PCI_CAP_TYPE_IOMMU ) { + cap_id = get_field_from_reg_u32( + cap_header, PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT); + + if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) + { + cap_type = get_field_from_reg_u32( + cap_header, PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT); + if ( cap_type == PCI_CAP_TYPE_IOMMU ) error = iommu_detect_callback( - bus, dev, func, cap_ptr); - } - } - - cap_ptr = get_field_from_reg_u32(cap_header, - PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT); - ++count; } - - return error; -} - -static int __init scan_functions_for_iommu(int bus, int dev, - iommu_detect_callback_ptr_t iommu_detect_callback) + bus, dev, func, cap_ptr); + } + + cap_ptr = get_field_from_reg_u32( + cap_header, PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT); + count++; + } + + return error; +} + +static int __init scan_functions_for_iommu( + int bus, int dev, iommu_detect_callback_ptr_t iommu_detect_callback) { int func, hdr_type; int count, error = 0; @@ -182,19 +183,20 @@ static int __init scan_functions_for_iom func = 0; count = 1; while ( VALID_PCI_VENDOR_ID(read_pci_config_16(bus, dev, func, - PCI_VENDOR_ID)) && !error && func < count ) { + PCI_VENDOR_ID)) && + !error && (func < count) ) + { hdr_type = read_pci_config_byte(bus, dev, func, - PCI_HEADER_TYPE); + PCI_HEADER_TYPE); if ( func == 0 && IS_PCI_MULTI_FUNCTION(hdr_type) ) count = PCI_MAX_FUNC_COUNT; if ( IS_PCI_TYPE0_HEADER(hdr_type) || - IS_PCI_TYPE1_HEADER(hdr_type) ) { - error = scan_caps_for_iommu(bus, dev, func, - iommu_detect_callback); - } - ++func; + IS_PCI_TYPE1_HEADER(hdr_type) ) + error = scan_caps_for_iommu(bus, dev, func, + iommu_detect_callback); + func++; } return error; @@ -205,13 +207,11 @@ int __init scan_for_iommu(iommu_detect_c { int bus, dev, error = 0; - for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus ) { - for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev ) { - error = scan_functions_for_iommu(bus, dev, - iommu_detect_callback); - } - } - - return error; -} - + for ( bus = 0; bus < PCI_MAX_BUS_COUNT && !error; ++bus ) + for ( dev = 0; dev < PCI_MAX_DEV_COUNT && !error; ++dev ) + error = scan_functions_for_iommu(bus, dev, + iommu_detect_callback); + + return error; +} + diff -r 7d8892a90c90 -r c927f758fcba xen/drivers/passthrough/amd/iommu_init.c --- a/xen/drivers/passthrough/amd/iommu_init.c Wed Mar 19 14:13:17 2008 +0000 +++ b/xen/drivers/passthrough/amd/iommu_init.c Wed Mar 19 16:16:24 2008 +0000 @@ -32,26 +32,28 @@ int __init map_iommu_mmio_region(struct { unsigned long mfn; - if ( nr_amd_iommus > MAX_AMD_IOMMUS ) { + if ( nr_amd_iommus > MAX_AMD_IOMMUS ) + { gdprintk(XENLOG_ERR, - "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus); + "IOMMU: nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus); return -ENOMEM; } - iommu->mmio_base = (void *) fix_to_virt(FIX_IOMMU_MMIO_BASE_0 + - nr_amd_iommus * MMIO_PAGES_PER_IOMMU); - mfn = (unsigned long)iommu->mmio_base_phys >> PAGE_SHIFT; + iommu->mmio_base = (void *)fix_to_virt( + FIX_IOMMU_MMIO_BASE_0 + nr_amd_iommus * MMIO_PAGES_PER_IOMMU); + mfn = (unsigned long)(iommu->mmio_base_phys >> PAGE_SHIFT); map_pages_to_xen((unsigned long)iommu->mmio_base, mfn, - MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE); + MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE); - memset((u8*)iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH); + memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH); return 0; } void __init unmap_iommu_mmio_region(struct amd_iommu *iommu) { - if ( iommu->mmio_base ) { + if ( iommu->mmio_base ) + { iounmap(iommu->mmio_base); iommu->mmio_base = NULL; } @@ -67,16 +69,16 @@ void __init register_iommu_dev_table_in_ addr_hi = addr_64 >> 32; set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, - IOMMU_DEV_TABLE_BASE_LOW_MASK, - IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry); + IOMMU_DEV_TABLE_BASE_LOW_MASK, + IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry); set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1, - entry, IOMMU_DEV_TABLE_SIZE_MASK, - IOMMU_DEV_TABLE_SIZE_SHIFT, &entry); + entry, IOMMU_DEV_TABLE_SIZE_MASK, + IOMMU_DEV_TABLE_SIZE_SHIFT, &entry); writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET); set_field_in_reg_u32((u32)addr_hi, 0, - IOMMU_DEV_TABLE_BASE_HIGH_MASK, - IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry); + IOMMU_DEV_TABLE_BASE_HIGH_MASK, + IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry); writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET); } @@ -91,49 +93,49 @@ void __init register_iommu_cmd_buffer_in addr_hi = addr_64 >> 32; set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, - IOMMU_CMD_BUFFER_BASE_LOW_MASK, - IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry); + IOMMU_CMD_BUFFER_BASE_LOW_MASK, + IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry); writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET); power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) + IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE; set_field_in_reg_u32((u32)addr_hi, 0, - IOMMU_CMD_BUFFER_BASE_HIGH_MASK, - IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry); + IOMMU_CMD_BUFFER_BASE_HIGH_MASK, + IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry); set_field_in_reg_u32(power_of2_entries, entry, - IOMMU_CMD_BUFFER_LENGTH_MASK, - IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry); + IOMMU_CMD_BUFFER_LENGTH_MASK, + IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET); } static void __init set_iommu_translation_control(struct amd_iommu *iommu, - int enable) + int enable) { u32 entry; entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_ENABLED, entry, - IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK, - IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry); + IOMMU_CONTROL_ENABLED, entry, + IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK, + IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry); set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_ENABLED, entry, - IOMMU_CONTROL_TRANSLATION_ENABLE_MASK, - IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry); + IOMMU_CONTROL_ENABLED, entry, + IOMMU_CONTROL_TRANSLATION_ENABLE_MASK, + IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); } static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu, - int enable) + int enable) { u32 entry; entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_ENABLED, entry, - IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK, - IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry); + IOMMU_CONTROL_ENABLED, entry, + IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK, + IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); } @@ -146,34 +148,34 @@ static void __init register_iommu_exclus addr_hi = iommu->exclusion_limit >> 32; set_field_in_reg_u32((u32)addr_hi, 0, - IOMMU_EXCLUSION_LIMIT_HIGH_MASK, - IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry); + IOMMU_EXCLUSION_LIMIT_HIGH_MASK, + IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET); set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, - IOMMU_EXCLUSION_LIMIT_LOW_MASK, - IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry); + IOMMU_EXCLUSION_LIMIT_LOW_MASK, + IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET); addr_lo = iommu->exclusion_base & DMA_32BIT_MASK; addr_hi = iommu->exclusion_base >> 32; set_field_in_reg_u32((u32)addr_hi, 0, - IOMMU_EXCLUSION_BASE_HIGH_MASK, - IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry); + IOMMU_EXCLUSION_BASE_HIGH_MASK, + IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET); set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, - IOMMU_EXCLUSION_BASE_LOW_MASK, - IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry); + IOMMU_EXCLUSION_BASE_LOW_MASK, + IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry); set_field_in_reg_u32(iommu->exclusion_allow_all, entry, - IOMMU_EXCLUSION_ALLOW_ALL_MASK, - IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry); + IOMMU_EXCLUSION_ALLOW_ALL_MASK, + IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry); set_field_in_reg_u32(iommu->exclusion_enable, entry, - IOMMU_EXCLUSION_RANGE_ENABLE_MASK, - IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry); + IOMMU_EXCLUSION_RANGE_ENABLE_MASK, + IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET); } @@ -184,5 +186,3 @@ void __init enable_iommu(struct amd_iomm set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED); printk("AMD IOMMU %d: Enabled\n", nr_amd_iommus); } - - diff -r 7d8892a90c90 -r c927f758fcba xen/drivers/passthrough/amd/iommu_map.c --- a/xen/drivers/passthrough/amd/iommu_map.c Wed Mar 19 14:13:17 2008 +0000 +++ b/xen/drivers/passthrough/amd/iommu_map.c Wed Mar 19 16:16:24 2008 +0000 @@ -132,7 +132,8 @@ void flush_command_buffer(struct amd_iom send_iommu_command(iommu, cmd); /* wait for 'ComWaitInt' to signal comp#endifletion? */ - if ( amd_iommu_poll_comp_wait ) { + if ( amd_iommu_poll_comp_wait ) + { loop_count = amd_iommu_poll_comp_wait; do { status = readl(iommu->mmio_base + @@ -152,8 +153,10 @@ void flush_command_buffer(struct amd_iom IOMMU_STATUS_MMIO_OFFSET); } else + { dprintk(XENLOG_WARNING, "AMD IOMMU: Warning:" " ComWaitInt bit did not assert!\n"); + } } } @@ -234,7 +237,7 @@ static void amd_iommu_set_page_directory } void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id, - u8 sys_mgt, u8 dev_ex, u8 paging_mode) + u8 sys_mgt, u8 dev_ex, u8 paging_mode) { u64 addr_hi, addr_lo; u32 entry; @@ -397,7 +400,7 @@ int amd_iommu_map_page(struct domain *d, spin_lock_irqsave(&hd->mapping_lock, flags); pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); - if ( pte == 0 ) + if ( pte == NULL ) { dprintk(XENLOG_ERR, "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); @@ -428,7 +431,7 @@ int amd_iommu_unmap_page(struct domain * spin_lock_irqsave(&hd->mapping_lock, flags); pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); - if ( pte == 0 ) + if ( pte == NULL ) { dprintk(XENLOG_ERR, "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); @@ -441,7 +444,7 @@ int amd_iommu_unmap_page(struct domain * spin_unlock_irqrestore(&hd->mapping_lock, flags); /* send INVALIDATE_IOMMU_PAGES command */ - for_each_amd_iommu(iommu) + for_each_amd_iommu ( iommu ) { spin_lock_irqsave(&iommu->lock, flags); invalidate_iommu_page(iommu, io_addr, requestor_id); @@ -453,9 +456,9 @@ int amd_iommu_unmap_page(struct domain * } int amd_iommu_reserve_domain_unity_map( - struct domain *domain, - unsigned long phys_addr, - unsigned long size, int iw, int ir) + struct domain *domain, + unsigned long phys_addr, + unsigned long size, int iw, int ir) { unsigned long flags, npages, i; void *pte; @@ -466,17 +469,18 @@ int amd_iommu_reserve_domain_unity_map( spin_lock_irqsave(&hd->mapping_lock, flags); for ( i = 0; i < npages; ++i ) { - pte = get_pte_from_page_tables(hd->root_table, - hd->paging_mode, phys_addr>>PAGE_SHIFT); - if ( pte == 0 ) + pte = get_pte_from_page_tables( + hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT); + if ( pte == NULL ) { dprintk(XENLOG_ERR, - "AMD IOMMU: Invalid IO pagetable entry phys_addr = %lx\n", phys_addr); + "AMD IOMMU: Invalid IO pagetable entry " + "phys_addr = %lx\n", phys_addr); spin_unlock_irqrestore(&hd->mapping_lock, flags); return -EFAULT; } set_page_table_entry_present((u32 *)pte, - phys_addr, iw, ir); + phys_addr, iw, ir); phys_addr += PAGE_SIZE; } spin_unlock_irqrestore(&hd->mapping_lock, flags); diff -r 7d8892a90c90 -r c927f758fcba xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Wed Mar 19 14:13:17 2008 +0000 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Wed Mar 19 16:16:24 2008 +0000 @@ -168,7 +168,7 @@ int iommu_detect_callback(u8 bus, u8 dev list_add_tail(&iommu->list, &amd_iommu_head); /* allocate resources for this IOMMU */ - if (allocate_iommu_resources(iommu) != 0) + if ( allocate_iommu_resources(iommu) != 0 ) goto error_out; return 0; @@ -208,7 +208,7 @@ static int __init amd_iommu_init(void) } /* assign default values for device entries */ - for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf ) + for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) { ivrs_mappings[bdf].dte_requestor_id = bdf; ivrs_mappings[bdf].dte_sys_mgt_enable = @@ -288,7 +288,8 @@ void amd_iommu_setup_domain_device( sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable; dev_ex = ivrs_mappings[req_id].dte_allow_exclusion; amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr, - req_id, sys_mgt, dev_ex, hd->paging_mode); + req_id, sys_mgt, dev_ex, + hd->paging_mode); invalidate_dev_table_entry(iommu, req_id); flush_command_buffer(iommu); @@ -317,8 +318,8 @@ void __init amd_iommu_setup_dom0_devices { l = read_pci_config(bus, dev, func, PCI_VENDOR_ID); /* some broken boards return 0 or ~0 if a slot is empty: */ - if ( l == 0xffffffff || l == 0x00000000 || - l == 0x0000ffff || l == 0xffff0000 ) + if ( (l == 0xffffffff) || (l == 0x00000000) || + (l == 0x0000ffff) || (l == 0xffff0000) ) continue; pdev = xmalloc(struct pci_dev); @@ -368,22 +369,22 @@ int amd_iommu_detect(void) /* allocate 'ivrs mappings' table */ /* note: the table has entries to accomodate all IOMMUs */ last_bus = 0; - for_each_amd_iommu (iommu) - if (iommu->last_downstream_bus > last_bus) - last_bus = iommu->last_downstream_bus; + for_each_amd_iommu ( iommu ) + if ( iommu->last_downstream_bus > last_bus ) + last_bus = iommu->last_downstream_bus; ivrs_bdf_entries = (last_bus + 1) * - IOMMU_DEV_TABLE_ENTRIES_PER_BUS; + IOMMU_DEV_TABLE_ENTRIES_PER_BUS; ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries); if ( !ivrs_mappings ) { dprintk(XENLOG_ERR, "AMD IOMMU:" - " Error allocating IVRS DevMappings table\n"); + " Error allocating IVRS DevMappings table\n"); goto error_out; } memset(ivrs_mappings, 0, - ivrs_bdf_entries * sizeof(struct ivrs_mappings)); + ivrs_bdf_entries * sizeof(struct ivrs_mappings)); } if ( amd_iommu_init() != 0 ) @@ -424,6 +425,7 @@ static int allocate_domain_resources(str spin_unlock_irqrestore(&hd->mapping_lock, flags); return 0; + error_out: spin_unlock_irqrestore(&hd->mapping_lock, flags); return -ENOMEM; @@ -433,7 +435,7 @@ static int get_paging_mode(unsigned long { int level = 1; - BUG_ON ( !max_page ); + BUG_ON(!max_page); if ( entries > max_page ) entries = max_page; @@ -441,8 +443,7 @@ static int get_paging_mode(unsigned long while ( entries > PTE_PER_TABLE_SIZE ) { entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT; - ++level; - if ( level > 6 ) + if ( ++level > 6 ) return -ENOMEM; } @@ -509,7 +510,7 @@ static int reassign_device( struct domai int bdf; unsigned long flags; - for_each_pdev( source, pdev ) + for_each_pdev ( source, pdev ) { if ( (pdev->bus != bus) || (pdev->devfn != devfn) ) continue; @@ -522,23 +523,7 @@ static int reassign_device( struct domai iommu = (bdf < ivrs_bdf_entries) ? find_iommu_for_device(bus, pdev->devfn) : NULL; - if ( iommu ) - { - amd_iommu_disable_domain_device(source, iommu, bdf); - /* Move pci device from the source domain to target domain. */ - spin_lock_irqsave(&source_hd->iommu_list_lock, flags); - spin_lock_irqsave(&target_hd->iommu_list_lock, flags); - list_move(&pdev->list, &target_hd->pdev_list); - spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags); - spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); - - amd_iommu_setup_domain_device(target, iommu, bdf); - gdprintk(XENLOG_INFO , - "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n", - bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - source->domain_id, target->domain_id); - } - else + if ( !iommu ) { gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu." " %x:%x.%x cannot be assigned to domain %d\n", @@ -546,6 +531,20 @@ static int reassign_device( struct domai return -ENODEV; } + amd_iommu_disable_domain_device(source, iommu, bdf); + /* Move pci device from the source domain to target domain. */ + spin_lock_irqsave(&source_hd->iommu_list_lock, flags); + spin_lock_irqsave(&target_hd->iommu_list_lock, flags); + list_move(&pdev->list, &target_hd->pdev_list); + spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags); + spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); + + amd_iommu_setup_domain_device(target, iommu, bdf); + gdprintk(XENLOG_INFO , + "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n", + bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + source->domain_id, target->domain_id); + break; } return 0; @@ -557,9 +556,10 @@ int amd_iommu_assign_device(struct domai int req_id; req_id = ivrs_mappings[bdf].dte_requestor_id; - if (ivrs_mappings[req_id].unity_map_enable) - { - amd_iommu_reserve_domain_unity_map(d, + if ( ivrs_mappings[req_id].unity_map_enable ) + { + amd_iommu_reserve_domain_unity_map( + d, ivrs_mappings[req_id].addr_range_start, ivrs_mappings[req_id].addr_range_length, ivrs_mappings[req_id].write_permission, @@ -606,7 +606,7 @@ static void deallocate_next_page_table(v { deallocate_next_page_table(next_table, next_index, next_level); - ++next_index; + next_index++; } while (next_index < PTE_PER_TABLE_SIZE); } @@ -622,11 +622,12 @@ static void deallocate_iommu_page_tables if ( hd ->root_table ) { index = 0; + do { deallocate_next_page_table(hd->root_table, index, hd->paging_mode); - ++index; + index++; } while ( index < PTE_PER_TABLE_SIZE ); free_xenheap_page(hd ->root_table); @@ -644,7 +645,8 @@ void amd_iommu_domain_destroy(struct dom release_domain_devices(d); } -void amd_iommu_return_device(struct domain *s, struct domain *t, u8 bus, u8 devfn) +void amd_iommu_return_device( + struct domain *s, struct domain *t, u8 bus, u8 devfn) { pdev_flr(bus, devfn); reassign_device(s, t, bus, devfn); diff -r 7d8892a90c90 -r c927f758fcba xen/drivers/passthrough/iommu.c --- a/xen/drivers/passthrough/iommu.c Wed Mar 19 14:13:17 2008 +0000 +++ b/xen/drivers/passthrough/iommu.c Wed Mar 19 16:16:24 2008 +0000 @@ -50,7 +50,7 @@ int assign_device(struct domain *d, u8 b { struct hvm_iommu *hd = domain_hvm_iommu(d); - if ( !iommu_enabled || !hd->platform_ops) + if ( !iommu_enabled || !hd->platform_ops ) return 0; return hd->platform_ops->assign_device(d, bus, devfn); @@ -65,7 +65,7 @@ void iommu_domain_destroy(struct domain struct g2m_ioport *ioport; struct dev_intx_gsi_link *digl; - if ( !iommu_enabled || !hd->platform_ops) + if ( !iommu_enabled || !hd->platform_ops ) return; if ( hvm_irq_dpci != NULL ) @@ -109,7 +109,7 @@ int iommu_map_page(struct domain *d, uns { struct hvm_iommu *hd = domain_hvm_iommu(d); - if ( !iommu_enabled || !hd->platform_ops) + if ( !iommu_enabled || !hd->platform_ops ) return 0; return hd->platform_ops->map_page(d, gfn, mfn); @@ -119,7 +119,7 @@ int iommu_unmap_page(struct domain *d, u { struct hvm_iommu *hd = domain_hvm_iommu(d); - if ( !iommu_enabled || !hd->platform_ops) + if ( !iommu_enabled || !hd->platform_ops ) return 0; return hd->platform_ops->unmap_page(d, gfn); @@ -129,7 +129,7 @@ void deassign_device(struct domain *d, u { struct hvm_iommu *hd = domain_hvm_iommu(d); - if ( !iommu_enabled || !hd->platform_ops) + if ( !iommu_enabled || !hd->platform_ops ) return; return hd->platform_ops->reassign_device(d, dom0, bus, devfn); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |