|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86: adjust PV I/O emulation functions' types
admin_io_okay(), guest_io_read(), and guest_io_write() all don't need
their current "regs" parameter at all, and they don't use the vCPU
passed to them for other than obtaining its domain. Drop the former and
replace the latter by a struct domain pointer.
pci_cfg_okay() returns a boolean type, and its "write" parameter is of
boolean kind too.
All of them get called for the current vCPU (and hence current domain)
only, so name the domain parameters accordingly except in the
admin_io_okay() case, which a subsequent patch will use for simplifying
setup_io_bitmap().
Latch current->domain into a local variable in emulate_privileged_op().
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -599,13 +599,12 @@ static int hvm_access_cf8(
static int handle_pvh_io(
int dir, uint32_t port, uint32_t bytes, uint32_t *val)
{
- struct vcpu *curr = current;
- struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct domain *currd = current->domain;
if ( dir == IOREQ_WRITE )
- guest_io_write(port, bytes, *val, curr, regs);
+ guest_io_write(port, bytes, *val, currd);
else
- *val = guest_io_read(port, bytes, curr, regs);
+ *val = guest_io_read(port, bytes, currd);
return X86EMUL_OKAY;
}
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1754,9 +1754,8 @@ static int guest_io_okay(
}
/* Has the administrator granted sufficient permission for this I/O access? */
-static int admin_io_okay(
- unsigned int port, unsigned int bytes,
- struct vcpu *v, struct cpu_user_regs *regs)
+static bool_t admin_io_okay(unsigned int port, unsigned int bytes,
+ const struct domain *d)
{
/*
* Port 0xcf8 (CONFIG_ADDRESS) is only visible for DWORD accesses.
@@ -1769,17 +1768,18 @@ static int admin_io_okay(
if ( ((port & ~1) == RTC_PORT(0)) )
return 0;
- return ioports_access_permitted(v->domain, port, port + bytes - 1);
+ return ioports_access_permitted(d, port, port + bytes - 1);
}
-static int pci_cfg_ok(struct domain *d, int write, int size)
+static bool_t pci_cfg_ok(struct domain *currd, bool_t write, unsigned int size)
{
uint32_t machine_bdf;
- uint16_t start, end;
- if (!is_hardware_domain(d))
+ unsigned int start;
+
+ if ( !is_hardware_domain(currd) )
return 0;
- machine_bdf = (d->arch.pci_cf8 >> 8) & 0xFFFF;
+ machine_bdf = (currd->arch.pci_cf8 >> 8) & 0xFFFF;
if ( write )
{
const unsigned long *ro_map = pci_get_ro_map(0);
@@ -1787,9 +1787,9 @@ static int pci_cfg_ok(struct domain *d,
if ( ro_map && test_bit(machine_bdf, ro_map) )
return 0;
}
- start = d->arch.pci_cf8 & 0xFF;
+ start = currd->arch.pci_cf8 & 0xFF;
/* AMD extended configuration space access? */
- if ( (d->arch.pci_cf8 & 0x0F000000) &&
+ if ( (currd->arch.pci_cf8 & 0x0F000000) &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 >= 0x10 && boot_cpu_data.x86 <= 0x17 )
{
@@ -1798,22 +1798,20 @@ static int pci_cfg_ok(struct domain *d,
if ( rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) )
return 0;
if ( msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT) )
- start |= (d->arch.pci_cf8 >> 16) & 0xF00;
+ start |= (currd->arch.pci_cf8 >> 16) & 0xF00;
}
- end = start + size - 1;
- if (xsm_pci_config_permission(XSM_HOOK, d, machine_bdf, start, end, write))
- return 0;
- return 1;
+
+ return !xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf,
+ start, start + size - 1, write);
}
-uint32_t guest_io_read(
- unsigned int port, unsigned int bytes,
- struct vcpu *v, struct cpu_user_regs *regs)
+uint32_t guest_io_read(unsigned int port, unsigned int bytes,
+ struct domain *currd)
{
uint32_t data = 0;
unsigned int shift = 0;
- if ( admin_io_okay(port, bytes, v, regs) )
+ if ( admin_io_okay(port, bytes, currd) )
{
switch ( bytes )
{
@@ -1834,31 +1832,30 @@ uint32_t guest_io_read(
}
else if ( (port == RTC_PORT(0)) )
{
- sub_data = v->domain->arch.cmos_idx;
+ sub_data = currd->arch.cmos_idx;
}
else if ( (port == RTC_PORT(1)) &&
- ioports_access_permitted(v->domain, RTC_PORT(0),
- RTC_PORT(1)) )
+ ioports_access_permitted(currd, RTC_PORT(0), RTC_PORT(1)) )
{
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
- outb(v->domain->arch.cmos_idx & 0x7f, RTC_PORT(0));
+ outb(currd->arch.cmos_idx & 0x7f, RTC_PORT(0));
sub_data = inb(RTC_PORT(1));
spin_unlock_irqrestore(&rtc_lock, flags);
}
else if ( (port == 0xcf8) && (bytes == 4) )
{
size = 4;
- sub_data = v->domain->arch.pci_cf8;
+ sub_data = currd->arch.pci_cf8;
}
else if ( (port & 0xfffc) == 0xcfc )
{
size = min(bytes, 4 - (port & 3));
if ( size == 3 )
size = 2;
- if ( pci_cfg_ok(v->domain, 0, size) )
- sub_data = pci_conf_read(v->domain->arch.pci_cf8, port & 3,
size);
+ if ( pci_cfg_ok(currd, 0, size) )
+ sub_data = pci_conf_read(currd->arch.pci_cf8, port & 3, size);
}
if ( size == 4 )
@@ -1873,11 +1870,10 @@ uint32_t guest_io_read(
return data;
}
-void guest_io_write(
- unsigned int port, unsigned int bytes, uint32_t data,
- struct vcpu *v, struct cpu_user_regs *regs)
+void guest_io_write(unsigned int port, unsigned int bytes, uint32_t data,
+ struct domain *currd)
{
- if ( admin_io_okay(port, bytes, v, regs) )
+ if ( admin_io_okay(port, bytes, currd) )
{
switch ( bytes ) {
case 1:
@@ -1905,33 +1901,32 @@ void guest_io_write(
}
else if ( (port == RTC_PORT(0)) )
{
- v->domain->arch.cmos_idx = data;
+ currd->arch.cmos_idx = data;
}
else if ( (port == RTC_PORT(1)) &&
- ioports_access_permitted(v->domain, RTC_PORT(0),
- RTC_PORT(1)) )
+ ioports_access_permitted(currd, RTC_PORT(0), RTC_PORT(1)) )
{
unsigned long flags;
if ( pv_rtc_handler )
- pv_rtc_handler(v->domain->arch.cmos_idx & 0x7f, data);
+ pv_rtc_handler(currd->arch.cmos_idx & 0x7f, data);
spin_lock_irqsave(&rtc_lock, flags);
- outb(v->domain->arch.cmos_idx & 0x7f, RTC_PORT(0));
+ outb(currd->arch.cmos_idx & 0x7f, RTC_PORT(0));
outb(data, RTC_PORT(1));
spin_unlock_irqrestore(&rtc_lock, flags);
}
else if ( (port == 0xcf8) && (bytes == 4) )
{
size = 4;
- v->domain->arch.pci_cf8 = data;
+ currd->arch.pci_cf8 = data;
}
else if ( (port & 0xfffc) == 0xcfc )
{
size = min(bytes, 4 - (port & 3));
if ( size == 3 )
size = 2;
- if ( pci_cfg_ok(v->domain, 1, size) )
- pci_conf_write(v->domain->arch.pci_cf8, port & 3, size, data);
+ if ( pci_cfg_ok(currd, 1, size) )
+ pci_conf_write(currd->arch.pci_cf8, port & 3, size, data);
}
if ( size == 4 )
@@ -1989,6 +1984,7 @@ static int is_cpufreq_controller(struct
static int emulate_privileged_op(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
+ struct domain *currd = v->domain;
unsigned long *reg, eip = regs->eip;
u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0, lock = 0, rex = 0;
enum { lm_seg_none, lm_seg_fs, lm_seg_gs } lm_ovr = lm_seg_none;
@@ -2145,7 +2141,7 @@ static int emulate_privileged_op(struct
(rd_ad(edi) > (data_limit - (op_bytes - 1))) ||
!guest_io_okay(port, op_bytes, v, regs) )
goto fail;
- data = guest_io_read(port, op_bytes, v, regs);
+ data = guest_io_read(port, op_bytes, currd);
if ( (rc = copy_to_user((void *)data_base + rd_ad(edi),
&data, op_bytes)) != 0 )
{
@@ -2171,7 +2167,7 @@ static int emulate_privileged_op(struct
+ op_bytes - rc, 0);
return EXCRET_fault_fixed;
}
- guest_io_write(port, op_bytes, data, v, regs);
+ guest_io_write(port, op_bytes, data, currd);
wr_ad(esi, regs->esi + (int)((regs->eflags & X86_EFLAGS_DF)
? -op_bytes : op_bytes));
break;
@@ -2232,7 +2228,7 @@ static int emulate_privileged_op(struct
exec_in:
if ( !guest_io_okay(port, op_bytes, v, regs) )
goto fail;
- if ( admin_io_okay(port, op_bytes, v, regs) )
+ if ( admin_io_okay(port, op_bytes, currd) )
{
mark_regs_dirty(regs);
io_emul(regs);
@@ -2243,7 +2239,7 @@ static int emulate_privileged_op(struct
regs->eax = 0;
else
regs->eax &= ~((1 << (op_bytes * 8)) - 1);
- regs->eax |= guest_io_read(port, op_bytes, v, regs);
+ regs->eax |= guest_io_read(port, op_bytes, currd);
}
bpmatch = check_guest_io_breakpoint(v, port, op_bytes);
goto done;
@@ -2262,7 +2258,7 @@ static int emulate_privileged_op(struct
exec_out:
if ( !guest_io_okay(port, op_bytes, v, regs) )
goto fail;
- if ( admin_io_okay(port, op_bytes, v, regs) )
+ if ( admin_io_okay(port, op_bytes, currd) )
{
mark_regs_dirty(regs);
io_emul(regs);
@@ -2271,7 +2267,7 @@ static int emulate_privileged_op(struct
}
else
{
- guest_io_write(port, op_bytes, regs->eax, v, regs);
+ guest_io_write(port, op_bytes, regs->eax, currd);
}
bpmatch = check_guest_io_breakpoint(v, port, op_bytes);
goto done;
@@ -2353,7 +2349,7 @@ static int emulate_privileged_op(struct
case 0x09: /* WBINVD */
/* Ignore the instruction if unprivileged. */
- if ( !cache_flush_permitted(v->domain) )
+ if ( !cache_flush_permitted(currd) )
/* Non-physdev domain attempted WBINVD; ignore for now since
newer linux uses this in some start-of-day timing loops */
;
@@ -2386,8 +2382,7 @@ static int emulate_privileged_op(struct
if ( !is_pv_32on64_vcpu(v) )
{
mfn = pagetable_get_pfn(v->arch.guest_table);
- *reg = xen_pfn_to_cr3(mfn_to_gmfn(
- v->domain, mfn));
+ *reg = xen_pfn_to_cr3(mfn_to_gmfn(currd, mfn));
}
else
{
@@ -2396,8 +2391,7 @@ static int emulate_privileged_op(struct
mfn = l4e_get_pfn(*pl4e);
unmap_domain_page(pl4e);
- *reg = compat_pfn_to_cr3(mfn_to_gmfn(
- v->domain, mfn));
+ *reg = compat_pfn_to_cr3(mfn_to_gmfn(currd, mfn));
}
/* PTs should not be shared */
BUG_ON(page_get_owner(mfn_to_page(mfn)) == dom_cow);
@@ -2457,7 +2451,7 @@ static int emulate_privileged_op(struct
gfn = !is_pv_32on64_vcpu(v)
? xen_cr3_to_pfn(*reg) : compat_cr3_to_pfn(*reg);
- page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
if ( page )
{
rc = new_guest_cr3(page_to_mfn(page));
@@ -2541,7 +2535,7 @@ static int emulate_privileged_op(struct
case MSR_K8_HWCR:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
goto fail;
- if ( !is_cpufreq_controller(v->domain) )
+ if ( !is_cpufreq_controller(currd) )
break;
if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
goto fail;
@@ -2550,7 +2544,7 @@ static int emulate_privileged_op(struct
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )
goto fail;
- if ( !is_hardware_domain(v->domain) || !is_pinned_vcpu(v) )
+ if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) )
break;
if ( (rdmsr_safe(MSR_AMD64_NB_CFG, val) != 0) ||
(eax != (uint32_t)val) ||
@@ -2563,7 +2557,7 @@ static int emulate_privileged_op(struct
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )
goto fail;
- if ( !is_hardware_domain(v->domain) || !is_pinned_vcpu(v) )
+ if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) )
break;
if ( (rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) != 0) )
goto fail;
@@ -2583,7 +2577,7 @@ static int emulate_privileged_op(struct
case MSR_IA32_UCODE_REV:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
goto fail;
- if ( !is_hardware_domain(v->domain) || !is_pinned_vcpu(v) )
+ if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) )
break;
if ( rdmsr_safe(regs->ecx, val) )
goto fail;
@@ -2602,7 +2596,7 @@ static int emulate_privileged_op(struct
if (( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) &&
( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ) )
goto fail;
- if ( !is_cpufreq_controller(v->domain) )
+ if ( !is_cpufreq_controller(currd) )
break;
if ( wrmsr_safe(regs->ecx, msr_content ) != 0 )
goto fail;
@@ -2610,7 +2604,7 @@ static int emulate_privileged_op(struct
case MSR_IA32_PERF_CTL:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
goto fail;
- if ( !is_cpufreq_controller(v->domain) )
+ if ( !is_cpufreq_controller(currd) )
break;
if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
goto fail;
@@ -2619,7 +2613,7 @@ static int emulate_privileged_op(struct
case MSR_IA32_ENERGY_PERF_BIAS:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
goto fail;
- if ( !is_hardware_domain(v->domain) || !is_pinned_vcpu(v) )
+ if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) )
break;
if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
goto fail;
@@ -2665,7 +2659,7 @@ static int emulate_privileged_op(struct
if ( (v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_TSD) &&
!guest_kernel_mode(v, regs) )
goto fail;
- if ( v->domain->arch.vtsc )
+ if ( currd->arch.vtsc )
pv_soft_rdtsc(v, regs, 0);
else
{
@@ -2708,7 +2702,7 @@ static int emulate_privileged_op(struct
case MSR_K8_PSTATE7:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
goto fail;
- if ( !is_cpufreq_controller(v->domain) )
+ if ( !is_cpufreq_controller(currd) )
{
regs->eax = regs->edx = 0;
break;
--- a/xen/include/asm-x86/traps.h
+++ b/xen/include/asm-x86/traps.h
@@ -48,8 +48,8 @@ extern int send_guest_trap(struct domain
unsigned int trap_nr);
uint32_t guest_io_read(unsigned int port, unsigned int bytes,
- struct vcpu *, struct cpu_user_regs *);
+ struct domain *);
void guest_io_write(unsigned int port, unsigned int bytes, uint32_t data,
- struct vcpu *, struct cpu_user_regs *);
+ struct domain *);
#endif /* ASM_TRAP_H */
Attachment:
x86-guest-io-params.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |