|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 03/14] AMD/IOMMU: use bit field for control register
On Tue, Jul 16, 2019 at 04:36:06PM +0000, Jan Beulich wrote:
> Also introduce a field in struct amd_iommu caching the most recently
> written control register. All writes should now happen exclusively from
> that cached value, such that it is guaranteed to be up to date.
>
> Take the opportunity and add further fields. Also convert a few boolean
> function parameters to bool, such that use of !! can be avoided.
>
> Because of there now being definitions beyond bit 31, writel() also gets
> replaced by writeq() when updating hardware.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Brian Woods <brian.woods@xxxxxxx>
> ---
> v3: Switch boolean bitfields to bool.
> v2: Add domain_id_pne field. Mention writel() -> writeq() change.
>
> --- a/xen/drivers/passthrough/amd/iommu_guest.c
> +++ b/xen/drivers/passthrough/amd/iommu_guest.c
> @@ -317,7 +317,7 @@ static int do_invalidate_iotlb_pages(str
>
> static int do_completion_wait(struct domain *d, cmd_entry_t *cmd)
> {
> - bool_t com_wait_int_en, com_wait_int, i, s;
> + bool com_wait_int, i, s;
> struct guest_iommu *iommu;
> unsigned long gfn;
> p2m_type_t p2mt;
> @@ -354,12 +354,10 @@ static int do_completion_wait(struct dom
> unmap_domain_page(vaddr);
> }
>
> - com_wait_int_en = iommu_get_bit(iommu->reg_ctrl.lo,
> - IOMMU_CONTROL_COMP_WAIT_INT_SHIFT);
> com_wait_int = iommu_get_bit(iommu->reg_status.lo,
> IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
>
> - if ( com_wait_int_en && com_wait_int )
> + if ( iommu->reg_ctrl.com_wait_int_en && com_wait_int )
> guest_iommu_deliver_msi(d);
>
> return 0;
> @@ -521,40 +519,17 @@ static void guest_iommu_process_command(
> return;
> }
>
> -static int guest_iommu_write_ctrl(struct guest_iommu *iommu, uint64_t
> newctrl)
> +static int guest_iommu_write_ctrl(struct guest_iommu *iommu, uint64_t val)
> {
> - bool_t cmd_en, event_en, iommu_en, ppr_en, ppr_log_en;
> - bool_t cmd_en_old, event_en_old, iommu_en_old;
> - bool_t cmd_run;
> -
> - iommu_en = iommu_get_bit(newctrl,
> - IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT);
> - iommu_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
> - IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT);
> -
> - cmd_en = iommu_get_bit(newctrl,
> - IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
> - cmd_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
> - IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
> - cmd_run = iommu_get_bit(iommu->reg_status.lo,
> - IOMMU_STATUS_CMD_BUFFER_RUN_SHIFT);
> - event_en = iommu_get_bit(newctrl,
> - IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
> - event_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
> - IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
> -
> - ppr_en = iommu_get_bit(newctrl,
> - IOMMU_CONTROL_PPR_ENABLE_SHIFT);
> - ppr_log_en = iommu_get_bit(newctrl,
> - IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT);
> + union amd_iommu_control newctrl = { .raw = val };
>
> - if ( iommu_en )
> + if ( newctrl.iommu_en )
> {
> guest_iommu_enable(iommu);
> guest_iommu_enable_dev_table(iommu);
> }
>
> - if ( iommu_en && cmd_en )
> + if ( newctrl.iommu_en && newctrl.cmd_buf_en )
> {
> guest_iommu_enable_ring_buffer(iommu, &iommu->cmd_buffer,
> sizeof(cmd_entry_t));
> @@ -562,7 +537,7 @@ static int guest_iommu_write_ctrl(struct
> tasklet_schedule(&iommu->cmd_buffer_tasklet);
> }
>
> - if ( iommu_en && event_en )
> + if ( newctrl.iommu_en && newctrl.event_log_en )
> {
> guest_iommu_enable_ring_buffer(iommu, &iommu->event_log,
> sizeof(event_entry_t));
> @@ -570,7 +545,7 @@ static int guest_iommu_write_ctrl(struct
> guest_iommu_clear_status(iommu, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT);
> }
>
> - if ( iommu_en && ppr_en && ppr_log_en )
> + if ( newctrl.iommu_en && newctrl.ppr_en && newctrl.ppr_log_en )
> {
> guest_iommu_enable_ring_buffer(iommu, &iommu->ppr_log,
> sizeof(ppr_entry_t));
> @@ -578,19 +553,21 @@ static int guest_iommu_write_ctrl(struct
> guest_iommu_clear_status(iommu,
> IOMMU_STATUS_PPR_LOG_OVERFLOW_SHIFT);
> }
>
> - if ( iommu_en && cmd_en_old && !cmd_en )
> + if ( newctrl.iommu_en && iommu->reg_ctrl.cmd_buf_en &&
> + !newctrl.cmd_buf_en )
> {
> /* Disable iommu command processing */
> tasklet_kill(&iommu->cmd_buffer_tasklet);
> }
>
> - if ( event_en_old && !event_en )
> + if ( iommu->reg_ctrl.event_log_en && !newctrl.event_log_en )
> guest_iommu_clear_status(iommu, IOMMU_STATUS_EVENT_LOG_RUN_SHIFT);
>
> - if ( iommu_en_old && !iommu_en )
> + if ( iommu->reg_ctrl.iommu_en && !newctrl.iommu_en )
> guest_iommu_disable(iommu);
>
> - u64_to_reg(&iommu->reg_ctrl, newctrl);
> + iommu->reg_ctrl = newctrl;
> +
> return 0;
> }
>
> @@ -632,7 +609,7 @@ static uint64_t iommu_mmio_read64(struct
> val = reg_to_u64(iommu->ppr_log.reg_tail);
> break;
> case IOMMU_CONTROL_MMIO_OFFSET:
> - val = reg_to_u64(iommu->reg_ctrl);
> + val = iommu->reg_ctrl.raw;
> break;
> case IOMMU_STATUS_MMIO_OFFSET:
> val = reg_to_u64(iommu->reg_status);
> --- a/xen/drivers/passthrough/amd/iommu_init.c
> +++ b/xen/drivers/passthrough/amd/iommu_init.c
> @@ -41,7 +41,7 @@ LIST_HEAD_READ_MOSTLY(amd_iommu_head);
> struct table_struct device_table;
> bool_t iommuv2_enabled;
>
> -static int iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask)
> +static bool iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask)
> {
> return iommu->ht_flags & mask;
> }
> @@ -69,31 +69,18 @@ static void __init unmap_iommu_mmio_regi
>
> static void set_iommu_ht_flags(struct amd_iommu *iommu)
> {
> - u32 entry;
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> -
> /* Setup HT flags */
> if ( iommu_has_cap(iommu, PCI_CAP_HT_TUNNEL_SHIFT) )
> - iommu_has_ht_flag(iommu, ACPI_IVHD_TT_ENABLE) ?
> - iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT)
> :
> - iommu_clear_bit(&entry,
> IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT);
> -
> - iommu_has_ht_flag(iommu, ACPI_IVHD_RES_PASS_PW) ?
> - iommu_set_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT):
> - iommu_clear_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT);
> -
> - iommu_has_ht_flag(iommu, ACPI_IVHD_ISOC) ?
> - iommu_set_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT):
> - iommu_clear_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT);
> -
> - iommu_has_ht_flag(iommu, ACPI_IVHD_PASS_PW) ?
> - iommu_set_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT):
> - iommu_clear_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT);
> + iommu->ctrl.ht_tun_en = iommu_has_ht_flag(iommu,
> ACPI_IVHD_TT_ENABLE);
> +
> + iommu->ctrl.pass_pw = iommu_has_ht_flag(iommu, ACPI_IVHD_PASS_PW);
> + iommu->ctrl.res_pass_pw = iommu_has_ht_flag(iommu,
> ACPI_IVHD_RES_PASS_PW);
> + iommu->ctrl.isoc = iommu_has_ht_flag(iommu, ACPI_IVHD_ISOC);
>
> /* Force coherent */
> - iommu_set_bit(&entry, IOMMU_CONTROL_COHERENT_SHIFT);
> + iommu->ctrl.coherent = true;
>
> - writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
> + writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> }
>
> static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
> @@ -205,55 +192,37 @@ static void register_iommu_ppr_log_in_mm
>
>
> static void set_iommu_translation_control(struct amd_iommu *iommu,
> - int enable)
> + bool enable)
> {
> - u32 entry;
> + iommu->ctrl.iommu_en = enable;
>
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> -
> - enable ?
> - iommu_set_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT) :
> - iommu_clear_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT);
> -
> - writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
> + writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> }
>
> static void set_iommu_guest_translation_control(struct amd_iommu *iommu,
> - int enable)
> + bool enable)
> {
> - u32 entry;
> -
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> + iommu->ctrl.gt_en = enable;
>
> - enable ?
> - iommu_set_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT) :
> - iommu_clear_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT);
> -
> - writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
> + writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
>
> if ( enable )
> AMD_IOMMU_DEBUG("Guest Translation Enabled.\n");
> }
>
> static void set_iommu_command_buffer_control(struct amd_iommu *iommu,
> - int enable)
> + bool enable)
> {
> - u32 entry;
> -
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> -
> - /*reset head and tail pointer manually before enablement */
> + /* Reset head and tail pointer manually before enablement */
> if ( enable )
> {
> writeq(0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
> writeq(0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
> -
> - iommu_set_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
> }
> - else
> - iommu_clear_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
>
> - writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
> + iommu->ctrl.cmd_buf_en = enable;
> +
> + writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> }
>
> static void register_iommu_exclusion_range(struct amd_iommu *iommu)
> @@ -295,57 +264,38 @@ static void register_iommu_exclusion_ran
> }
>
> static void set_iommu_event_log_control(struct amd_iommu *iommu,
> - int enable)
> + bool enable)
> {
> - u32 entry;
> -
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> -
> - /*reset head and tail pointer manually before enablement */
> + /* Reset head and tail pointer manually before enablement */
> if ( enable )
> {
> writeq(0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
> writeq(0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
> -
> - iommu_set_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT);
> - iommu_set_bit(&entry, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
> - }
> - else
> - {
> - iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT);
> - iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
> }
>
> - iommu_clear_bit(&entry, IOMMU_CONTROL_COMP_WAIT_INT_SHIFT);
> + iommu->ctrl.event_int_en = enable;
> + iommu->ctrl.event_log_en = enable;
> + iommu->ctrl.com_wait_int_en = false;
>
> - writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> + writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> }
>
> static void set_iommu_ppr_log_control(struct amd_iommu *iommu,
> - int enable)
> + bool enable)
> {
> - u32 entry;
> -
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> -
> - /*reset head and tail pointer manually before enablement */
> + /* Reset head and tail pointer manually before enablement */
> if ( enable )
> {
> writeq(0, iommu->mmio_base + IOMMU_PPR_LOG_HEAD_OFFSET);
> writeq(0, iommu->mmio_base + IOMMU_PPR_LOG_TAIL_OFFSET);
> -
> - iommu_set_bit(&entry, IOMMU_CONTROL_PPR_ENABLE_SHIFT);
> - iommu_set_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT);
> - iommu_set_bit(&entry, IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT);
> - }
> - else
> - {
> - iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_ENABLE_SHIFT);
> - iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT);
> - iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT);
> }
>
> - writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> + iommu->ctrl.ppr_en = enable;
> + iommu->ctrl.ppr_int_en = enable;
> + iommu->ctrl.ppr_log_en = enable;
> +
> + writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> +
> if ( enable )
> AMD_IOMMU_DEBUG("PPR Log Enabled.\n");
> }
> @@ -398,7 +348,7 @@ static int iommu_read_log(struct amd_iom
> /* reset event log or ppr log when overflow */
> static void iommu_reset_log(struct amd_iommu *iommu,
> struct ring_buffer *log,
> - void (*ctrl_func)(struct amd_iommu *iommu, int))
> + void (*ctrl_func)(struct amd_iommu *iommu, bool))
> {
> u32 entry;
> int log_run, run_bit;
> @@ -615,11 +565,11 @@ static void iommu_check_event_log(struct
> iommu_reset_log(iommu, &iommu->event_log,
> set_iommu_event_log_control);
> else
> {
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> - if ( !(entry & IOMMU_CONTROL_EVENT_LOG_INT_MASK) )
> + if ( !iommu->ctrl.event_int_en )
> {
> - entry |= IOMMU_CONTROL_EVENT_LOG_INT_MASK;
> - writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> + iommu->ctrl.event_int_en = true;
> + writeq(iommu->ctrl.raw,
> + iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> /*
> * Re-schedule the tasklet to handle eventual log entries added
> * between reading the log above and re-enabling the interrupt.
> @@ -704,11 +654,11 @@ static void iommu_check_ppr_log(struct a
> iommu_reset_log(iommu, &iommu->ppr_log, set_iommu_ppr_log_control);
> else
> {
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> - if ( !(entry & IOMMU_CONTROL_PPR_LOG_INT_MASK) )
> + if ( !iommu->ctrl.ppr_int_en )
> {
> - entry |= IOMMU_CONTROL_PPR_LOG_INT_MASK;
> - writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> + iommu->ctrl.ppr_int_en = true;
> + writeq(iommu->ctrl.raw,
> + iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> /*
> * Re-schedule the tasklet to handle eventual log entries added
> * between reading the log above and re-enabling the interrupt.
> @@ -754,7 +704,6 @@ static void do_amd_iommu_irq(unsigned lo
> static void iommu_interrupt_handler(int irq, void *dev_id,
> struct cpu_user_regs *regs)
> {
> - u32 entry;
> unsigned long flags;
> struct amd_iommu *iommu = dev_id;
>
> @@ -764,10 +713,9 @@ static void iommu_interrupt_handler(int
> * Silence interrupts from both event and PPR by clearing the
> * enable logging bits in the control register
> */
> - entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> - iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT);
> - iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT);
> - writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
> + iommu->ctrl.event_int_en = false;
> + iommu->ctrl.ppr_int_en = false;
> + writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
>
> spin_unlock_irqrestore(&iommu->lock, flags);
>
> --- a/xen/include/asm-x86/amd-iommu.h
> +++ b/xen/include/asm-x86/amd-iommu.h
> @@ -88,6 +88,8 @@ struct amd_iommu {
> void *mmio_base;
> unsigned long mmio_base_phys;
>
> + union amd_iommu_control ctrl;
> +
> struct table_struct dev_table;
> struct ring_buffer cmd_buffer;
> struct ring_buffer event_log;
> @@ -172,7 +174,7 @@ struct guest_iommu {
> uint64_t mmio_base; /* MMIO base address */
>
> /* MMIO regs */
> - struct mmio_reg reg_ctrl; /* MMIO offset 0018h */
> + union amd_iommu_control reg_ctrl; /* MMIO offset 0018h */
> struct mmio_reg reg_status; /* MMIO offset 2020h */
> union amd_iommu_ext_features reg_ext_feature; /* MMIO offset 0030h */
>
> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
> +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
> @@ -295,38 +295,56 @@ struct amd_iommu_dte {
>
> /* Control Register */
> #define IOMMU_CONTROL_MMIO_OFFSET 0x18
> -#define IOMMU_CONTROL_TRANSLATION_ENABLE_MASK 0x00000001
> -#define IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT 0
> -#define IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK 0x00000002
> -#define IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT 1
> -#define IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK 0x00000004
> -#define IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT 2
> -#define IOMMU_CONTROL_EVENT_LOG_INT_MASK 0x00000008
> -#define IOMMU_CONTROL_EVENT_LOG_INT_SHIFT 3
> -#define IOMMU_CONTROL_COMP_WAIT_INT_MASK 0x00000010
> -#define IOMMU_CONTROL_COMP_WAIT_INT_SHIFT 4
> -#define IOMMU_CONTROL_INVALIDATION_TIMEOUT_MASK 0x000000E0
> -#define IOMMU_CONTROL_INVALIDATION_TIMEOUT_SHIFT 5
> -#define IOMMU_CONTROL_PASS_POSTED_WRITE_MASK 0x00000100
> -#define IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT 8
> -#define IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK 0x00000200
> -#define IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT 9
> -#define IOMMU_CONTROL_COHERENT_MASK 0x00000400
> -#define IOMMU_CONTROL_COHERENT_SHIFT 10
> -#define IOMMU_CONTROL_ISOCHRONOUS_MASK 0x00000800
> -#define IOMMU_CONTROL_ISOCHRONOUS_SHIFT 11
> -#define IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK 0x00001000
> -#define IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT 12
> -#define IOMMU_CONTROL_PPR_LOG_ENABLE_MASK 0x00002000
> -#define IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT 13
> -#define IOMMU_CONTROL_PPR_LOG_INT_MASK 0x00004000
> -#define IOMMU_CONTROL_PPR_LOG_INT_SHIFT 14
> -#define IOMMU_CONTROL_PPR_ENABLE_MASK 0x00008000
> -#define IOMMU_CONTROL_PPR_ENABLE_SHIFT 15
> -#define IOMMU_CONTROL_GT_ENABLE_MASK 0x00010000
> -#define IOMMU_CONTROL_GT_ENABLE_SHIFT 16
> -#define IOMMU_CONTROL_RESTART_MASK 0x80000000
> -#define IOMMU_CONTROL_RESTART_SHIFT 31
> +
> +union amd_iommu_control {
> + uint64_t raw;
> + struct {
> + bool iommu_en:1;
> + bool ht_tun_en:1;
> + bool event_log_en:1;
> + bool event_int_en:1;
> + bool com_wait_int_en:1;
> + unsigned int inv_timeout:3;
> + bool pass_pw:1;
> + bool res_pass_pw:1;
> + bool coherent:1;
> + bool isoc:1;
> + bool cmd_buf_en:1;
> + bool ppr_log_en:1;
> + bool ppr_int_en:1;
> + bool ppr_en:1;
> + bool gt_en:1;
> + bool ga_en:1;
> + unsigned int crw:4;
> + bool smif_en:1;
> + bool slf_wb_dis:1;
> + bool smif_log_en:1;
> + unsigned int gam_en:3;
> + bool ga_log_en:1;
> + bool ga_int_en:1;
> + unsigned int dual_ppr_log_en:2;
> + unsigned int dual_event_log_en:2;
> + unsigned int dev_tbl_seg_en:3;
> + unsigned int priv_abrt_en:2;
> + bool ppr_auto_rsp_en:1;
> + bool marc_en:1;
> + bool blk_stop_mrk_en:1;
> + bool ppr_auto_rsp_aon:1;
> + bool domain_id_pne:1;
> + unsigned int :1;
> + bool eph_en:1;
> + unsigned int had_update:2;
> + bool gd_update_dis:1;
> + unsigned int :1;
> + bool xt_en:1;
> + bool int_cap_xt_en:1;
> + bool vcmd_en:1;
> + bool viommu_en:1;
> + bool ga_update_dis:1;
> + bool gappi_en:1;
> + unsigned int :8;
> + };
> +};
>
> /* Exclusion Register */
> #define IOMMU_EXCLUSION_BASE_LOW_OFFSET 0x20
>
--
Brian Woods
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |