|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 10/17] PVH xen: introduce vmx_pvh.c and pvh.c
On Wed, 24 Apr 2013 09:47:55 +0100
"Jan Beulich" <JBeulich@xxxxxxxx> wrote:
> >>> On 23.04.13 at 23:25, Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> >>> wrote:
>
> > +int vmx_pvh_read_descriptor(unsigned int sel, const struct vcpu *v,
> > + const struct cpu_user_regs *regs,
> > + unsigned long *base, unsigned long
> > *limit,
> > + unsigned int *ar)
> > +{
> > + unsigned int tmp_ar = 0;
> > + ASSERT(v == current);
> > + ASSERT(is_pvh_vcpu(v));
> > +
> > + if ( sel == (unsigned int)regs->cs )
> > + {
> > + *base = __vmread(GUEST_CS_BASE);
> > + *limit = __vmread(GUEST_CS_LIMIT);
> > + tmp_ar = __vmread(GUEST_CS_AR_BYTES);
> > + }
> > + else if ( sel == (unsigned int)regs->ds )
>
> This if/else-if sequence can't be right - a selector can be in more
> than one selector register (and one of them may have got reloaded
> after a GDT/LDT adjustment, while another may not), so you can't
> base the descriptor read upon the selector value. The caller will
> have to tell you which register it wants the descriptor for, not which
> selector.
Ok, I redid it. Created a new function read_descriptor_sel() and rewrote
vmx_pvh_read_descriptor(). Please lmk if looks ok to you. thanks a lot :
static int read_descriptor_sel(unsigned int sel,
enum sel_type which_sel,
const struct vcpu *v,
const struct cpu_user_regs *regs,
unsigned long *base,
unsigned long *limit,
unsigned int *ar,
unsigned int vm86attr)
{
if ( is_pvh_vcpu(v) )
return hvm_read_descriptor(which_sel, v, regs, base, limit, ar);
return read_descriptor(sel, v, regs, base, limit, ar, vm86attr);
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index d003ae2..776522e 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1862,6 +1875,7 @@ static int is_cpufreq_controller(struct domain *d)
int emulate_privileged_op(struct cpu_user_regs *regs)
{
+ enum sel_type which_sel;
struct vcpu *v = current;
unsigned long *reg, eip = regs->eip;
u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0, lock = 0, rex = 0;
@@ -1884,9 +1898,10 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1)));
uint64_t val, msr_content;
- if ( !read_descriptor(regs->cs, v, regs,
- &code_base, &code_limit, &ar,
- _SEGMENT_CODE|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P) )
+ if ( !read_descriptor_sel(regs->cs, SEL_CS, v, regs,
+ &code_base, &code_limit, &ar,
+ _SEGMENT_CODE|_SEGMENT_S|
+ _SEGMENT_DPL|_SEGMENT_P) )
goto fail;
op_default = op_bytes = (ar & (_SEGMENT_L|_SEGMENT_DB)) ? 4 : 2;
ad_default = ad_bytes = (ar & _SEGMENT_L) ? 8 : op_default;
@@ -1897,6 +1912,7 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
/* emulating only opcodes not allowing SS to be default */
data_sel = read_segment_register(v, regs, ds);
+ which_sel = SEL_DS;
/* Legacy prefixes. */
for ( i = 0; i < 8; i++, rex == opcode || (rex = 0) )
@@ -1912,23 +1928,29 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
continue;
case 0x2e: /* CS override */
data_sel = regs->cs;
+ which_sel = SEL_CS;
continue;
case 0x3e: /* DS override */
data_sel = read_segment_register(v, regs, ds);
+ which_sel = SEL_DS;
continue;
case 0x26: /* ES override */
data_sel = read_segment_register(v, regs, es);
+ which_sel = SEL_ES;
continue;
case 0x64: /* FS override */
data_sel = read_segment_register(v, regs, fs);
+ which_sel = SEL_FS;
lm_ovr = lm_seg_fs;
continue;
case 0x65: /* GS override */
data_sel = read_segment_register(v, regs, gs);
+ which_sel = SEL_GS;
lm_ovr = lm_seg_gs;
continue;
case 0x36: /* SS override */
data_sel = regs->ss;
+ which_sel = SEL_SS;
continue;
case 0xf0: /* LOCK */
lock = 1;
@@ -1972,15 +1994,16 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
if ( !(opcode & 2) )
{
data_sel = read_segment_register(v, regs, es);
+ which_sel = SEL_ES;
lm_ovr = lm_seg_none;
}
if ( !(ar & _SEGMENT_L) )
{
- if ( !read_descriptor(data_sel, v, regs,
- &data_base, &data_limit, &ar,
- _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|
- _SEGMENT_P) )
+ if ( !read_descriptor_sel(data_sel, which_sel, v, regs,
+ &data_base, &data_limit, &ar,
+ _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|
+ _SEGMENT_P) )
goto fail;
if ( !(ar & _SEGMENT_S) ||
!(ar & _SEGMENT_P) ||
@@ -2010,9 +2033,9 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
}
}
else
- read_descriptor(data_sel, v, regs,
- &data_base, &data_limit, &ar,
- 0);
+ read_descriptor_sel(data_sel, which_sel, v, regs,
+ &data_base, &data_limit, &ar,
+ 0);
data_limit = ~0UL;
ar = _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P;
}
diff --git a/xen/include/asm-x86/desc.h b/xen/include/asm-x86/desc.h
index 4dca0a3..deecef4 100644
--- a/xen/include/asm-x86/desc.h
+++ b/xen/include/asm-x86/desc.h
@@ -199,6 +199,8 @@ DECLARE_PER_CPU(struct desc_struct *, compat_gdt_table);
extern void set_intr_gate(unsigned int irq, void * addr);
extern void load_TR(void);
+enum sel_type { SEL_NONE, SEL_CS, SEL_SS, SEL_DS, SEL_ES, SEL_GS, SEL_FS };
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARCH_DESC_H */
=============================================================================
New version of int vmx_pvh_read_descriptor:
int vmx_pvh_read_descriptor(enum sel_type which_sel, const struct vcpu *v,
const struct cpu_user_regs *regs,
unsigned long *base, unsigned long *limit,
unsigned int *ar)
{
unsigned int tmp_ar = 0;
ASSERT(v == current);
ASSERT(is_pvh_vcpu(v));
switch ( which_sel )
{
case SEL_CS:
tmp_ar = __vmread(GUEST_CS_AR_BYTES);
if ( tmp_ar & X86_SEG_AR_CS_LM_ACTIVE )
{
*base = 0UL;
*limit = ~0UL;
}
else
{
*base = __vmread(GUEST_CS_BASE);
*limit = __vmread(GUEST_CS_LIMIT);
}
break;
case SEL_DS:
*base = __vmread(GUEST_DS_BASE);
*limit = __vmread(GUEST_DS_LIMIT);
tmp_ar = __vmread(GUEST_DS_AR_BYTES);
break;
case SEL_SS:
*base = __vmread(GUEST_SS_BASE);
*limit = __vmread(GUEST_SS_LIMIT);
tmp_ar = __vmread(GUEST_SS_AR_BYTES);
break;
case SEL_GS:
*base = __vmread(GUEST_GS_BASE);
*limit = __vmread(GUEST_GS_LIMIT);
tmp_ar = __vmread(GUEST_GS_AR_BYTES);
break;
case SEL_FS:
*base = __vmread(GUEST_FS_BASE);
*limit = __vmread(GUEST_FS_LIMIT);
tmp_ar = __vmread(GUEST_FS_AR_BYTES);
break;
case SEL_ES:
*base = __vmread(GUEST_ES_BASE);
*limit = __vmread(GUEST_ES_LIMIT);
tmp_ar = __vmread(GUEST_ES_AR_BYTES);
break;
default:
gdprintk(XENLOG_WARNING, "Unmatched segment selector:%d\n", which_sel);
return 0;
}
/* Fixup ar so that it looks the same as in native mode */
*ar = (tmp_ar << 8);
return 1;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |