[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 04/16] xen: Add is_vmware_port_enabled



On 09/12/14 09:08, Boris Ostrovsky wrote:
> On 09/11/2014 02:36 PM, Don Slutz wrote:
>>   int __get_instruction_length_from_list(struct vcpu *v,
>> -        const enum instruction_index *list, unsigned int list_count)
>> +                                       const enum instruction_index 
>> *list,
>> +                                       unsigned int list_count,
>> +                                       bool_t err_rpt)
>>   {
>>       struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
>>       unsigned int i, j, inst_len = 0;
>> @@ -211,10 +222,13 @@ int __get_instruction_length_from_list(struct 
>> vcpu *v,
>>       mismatch: ;
>>       }
>>   -    gdprintk(XENLOG_WARNING,
>> -             "%s: Mismatch between expected and actual instruction 
>> bytes: "
>> -             "eip = %lx\n",  __func__, (unsigned long)vmcb->rip);
>> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>> +    if ( err_rpt )
>> +    {
>> +        gdprintk(XENLOG_WARNING,
>> +                 "%s: Mismatch between expected and actual 
>> instruction bytes: "
>> +                 "eip = %lx\n",  __func__, (unsigned long)vmcb->rip);
>> +        hvm_inject_hw_exception(TRAP_gp_fault, 0);
>> +    }
>>       return 0;
>>      done:
>> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
>> index b5188e6..9e14d2a 100644
>> --- a/xen/arch/x86/hvm/svm/svm.c
>> +++ b/xen/arch/x86/hvm/svm/svm.c
>> @@ -59,6 +59,7 @@
>>   #include <public/sched.h>
>>   #include <asm/hvm/vpt.h>
>>   #include <asm/hvm/trace.h>
>> +#include <asm/hvm/vmport.h>
>>   #include <asm/hap.h>
>>   #include <asm/apic.h>
>>   #include <asm/debugger.h>
>> @@ -2065,6 +2066,38 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
>>       return;
>>   }
>>   +static void svm_vmexit_gp_intercept(struct cpu_user_regs *regs,
>> +                                    struct vcpu *v)
>> +{
>> +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
>> +    unsigned long inst_len;
>> +    unsigned long inst_addr = svm_rip2pointer(v);
>> +    int rc;
>> +    static const enum instruction_index list[] = {
>> +        INSTR_INL_DX, INSTR_INB_DX, INSTR_OUTL_DX, INSTR_OUTB_DX
>> +    };
>> +
>> +    inst_len = __get_instruction_length_from_list(
>> +        v, list, ARRAY_SIZE(list), 0);
>
> I should have asked earlier but I don't think I understand why the 
> last argument here is 0 (and therefore why you have this last argument 
> at all).
>
> Because whether or not you are warning in 
> __get_instruction_length_from_list() it will still return 0. And that, 
> in turn, will cause vmport_gp_check() to return an error. And then you 
> will print another warning in VMPORT_LOG. So there is a warning anyway.
>

A key part that you appear to have missed is that
__get_instruction_length_from_list() uses gdprintk(XENLOG_WARNING,...
but VMPORT_DBG_LOG is only available in debug=y builds.  So the new
last argument is used to control this.  Since this change includes
enabling #GP vmexits, it is now possible for ring 3 users to generate a
large volume of these which with gdprintk() can flood the console.


> Second, since this handler appears to be handling #GP only for VMware 
> guest we should make sure that it is not executed for any other guest. 
> You do now condition intercept got #GP for such guests only but I 
> still think having a check here is worth doing. Maybe a BUG() or 
> ASSERT()?
>
> The same comments are applicable to VMX code, I suspect.
>

I will change the check in vmport_gp_check on is_vmware_port_enabled
into an ASSERT() so both SVM and VMX will be covered.

>> +
>> +    rc = vmport_gp_check(regs, v, inst_len, inst_addr,
>> +                         vmcb->exitinfo1, vmcb->exitinfo2);
>> +    if ( !rc )
>> +        __update_guest_eip(regs, inst_len);
>> +    else
>> +    {
>> +        VMPORT_DBG_LOG(VMPORT_LOG_GP_UNKNOWN,
>> +                       "gp: rc=%d ei1=0x%lx ei2=0x%lx ip=%"PRIx64
>> +                       " (0x%lx,%ld) ax=%"PRIx64" bx=%"PRIx64" 
>> cx=%"PRIx64
>> +                       " dx=%"PRIx64" si=%"PRIx64" di=%"PRIx64, rc,
>> +                       (unsigned long)vmcb->exitinfo1,
>> +                       (unsigned long)vmcb->exitinfo2, regs->rip, 
>> inst_addr,
>> +                       inst_len, regs->rax, regs->rbx, regs->rcx, 
>> regs->rdx,
>> +                       regs->rsi, regs->rdi);
>> +        hvm_inject_hw_exception(TRAP_gp_fault, regs->error_code);
>> +    }
>> +}
>> +
>
> ...
>
>> +
>> +int vmport_gp_check(struct cpu_user_regs *regs, struct vcpu *v,
>> +                    unsigned long inst_len, unsigned long inst_addr,
>> +                    unsigned long ei1, unsigned long ei2)
>> +{
>> +    if ( !v->domain->arch.hvm_domain.is_vmware_port_enabled )
>> +        return 10;

I.E. ASSERT(v->domain->arch.hvm_domain.is_vmware_port_enabled);

>> +
>> +    if ( inst_len && inst_len <= 2 && get_low_bits(regs->rdx) == 
>> BDOOR_PORT &&
>> +         ei1 == 0 && ei2 == 0 && regs->error_code == 0 &&
>> +         (uint32_t)regs->rax == BDOOR_MAGIC )
>> +    {
>> +        int i = 0;
>> +        uint32_t val;
>> +        uint32_t byte_cnt = 4;
>> +        unsigned char bytes[2];
>> +        unsigned int fetch_len;
>> +        int frc;
>> +        int rc;
>> +
>> +        /*
>> +         * Fetch up to the next page break; we'll fetch from the
>> +         * next page later if we have to.
>> +         */
>> +        fetch_len = min_t(unsigned int, inst_len,
>> +                          PAGE_SIZE - (inst_addr  & ~PAGE_MASK));
>> +        frc = hvm_fetch_from_guest_virt_nofault(bytes, inst_addr, 
>> fetch_len,
>> + PFEC_page_present);
>> +        if ( frc != HVMCOPY_okay )
>> +        {
>> +            gdprintk(XENLOG_WARNING,
>> +                     "Bad instruction fetch at %#lx (frc=%d il=%lu 
>> fl=%u)\n",
>> +                     (unsigned long) inst_addr, frc, inst_len, 
>> fetch_len);
>> +            return 11;
>> +        }
>> +        if ( bytes[0] == 0x66 )     /* operand size prefix */
>> +        {
>> +            byte_cnt = 2;
>> +            i = 1;
>> +            if ( fetch_len != inst_len )
>> +            {
>> +                frc = hvm_fetch_from_guest_virt_nofault(&bytes[1],
>> + inst_addr + 1, 1,
>> + PFEC_page_present);
>> +                if ( frc != HVMCOPY_okay )
>> +                {
>> +                    gdprintk(XENLOG_WARNING,
>> +                             "Bad instruction fetch at %#lx + 1 
>> (frc=%d)\n",
>> +                             (unsigned long) inst_addr, frc);
>> +                    return 12;
>> +                }
>> +            }
>> +        }
>> +        if ( bytes[i] == 0xed )     /* in (%dx),%eax or in (%dx),%ax */
>> +        {
>> +            rc = vmport_ioport(IOREQ_READ, BDOOR_PORT, byte_cnt, &val);
>> +            VMPORT_DBG_LOG(VMPORT_LOG_GP_VMWARE_AFTER,
>> +                           "gp: VMwareIn  rc=%d ip=%"PRIx64" 
>> byte_cnt=%d ax=%"
>> +                           PRIx64" bx=%"PRIx64" cx=%"PRIx64" 
>> dx=%"PRIx64
>> +                           " si=%"PRIx64" di=%"PRIx64, rc,
>> +                           inst_addr, byte_cnt, regs->rax, regs->rbx,
>> +                           regs->rcx, regs->rdx, regs->rsi, regs->rdi);
>> +            return rc;
>> +        }
>> +        else if ( bytes[i] == 0xec )     /* in (%dx),%al */
>> +        {
>> +            rc = vmport_ioport(IOREQ_READ, BDOOR_PORT, 1, &val);
>> +            VMPORT_DBG_LOG(VMPORT_LOG_GP_VMWARE_AFTER,
>> +                           "gp: VMwareIn  rc=%d ip=%"PRIx64" 
>> byte_cnt=1 ax=%"
>> +                           PRIx64" bx=%"PRIx64" cx=%"PRIx64" 
>> dx=%"PRIx64
>> +                           " si=%"PRIx64" di=%"PRIx64, rc,
>> +                           inst_addr, regs->rax, regs->rbx, regs->rcx,
>> +                           regs->rdx, regs->rsi, regs->rdi);
>> +            return rc;
>> +        }
>> +        else if ( bytes[i] == 0xef )     /* out %eax,(%dx) or out 
>> %ax,(%dx) */
>> +        {
>> +            rc = vmport_ioport(IOREQ_WRITE, BDOOR_PORT, byte_cnt, 
>> &val);
>> +            VMPORT_DBG_LOG(VMPORT_LOG_GP_VMWARE_AFTER,
>> +                           "gp: VMwareOut rc=%d ip=%"PRIx64" 
>> byte_cnt=%d ax=%"
>> +                           PRIx64" bx=%"PRIx64" cx=%"PRIx64" 
>> dx=%"PRIx64
>> +                           " si=%"PRIx64" di=%"PRIx64, rc,
>> +                           inst_addr, byte_cnt, regs->rax, regs->rbx,
>> +                           regs->rcx, regs->rdx, regs->rsi, regs->rdi);
>> +            return rc;
>> +        }
>> +        else if ( bytes[i] == 0xee )     /* out %al,(%dx) */
>> +        {
>> +            rc = vmport_ioport(IOREQ_WRITE, BDOOR_PORT, 1, &val);
>> +            VMPORT_DBG_LOG(VMPORT_LOG_GP_VMWARE_AFTER,
>> +                           "gp: VMwareOut rc=%d ip=%"PRIx64" 
>> byte_cnt=1 ax=%"
>> +                           PRIx64" bx=%"PRIx64" cx=%"PRIx64" 
>> dx=%"PRIx64
>> +                           " si=%"PRIx64" di=%"PRIx64, rc,
>> +                           inst_addr, regs->rax, regs->rbx, regs->rcx,
>> +                           regs->rdx, regs->rsi, regs->rdi);
>> +            return rc;
>> +        }
>> +        else
>> +        {
>> +            VMPORT_DBG_LOG(VMPORT_LOG_GP_FAIL_RD_INST,
>> +                           "gp: VMware? lip=%"PRIx64"[%d]=>0x%x(%ld) 
>> ax=%"
>> +                           PRIx64" bx=%"PRIx64" cx=%"PRIx64" 
>> dx=%"PRIx64
>> +                           " si=%"PRIx64" di=%"PRIx64,
>> +                           inst_addr, i, bytes[i], inst_len, regs->rax,
>> +                           regs->rbx, regs->rcx, regs->rdx, regs->rsi,
>> +                           regs->rdi);
>> +            return 13;
>> +        }
>> +    }
>> +    return 14;
>
> The return values should be defined as macros --- otherwise they look 
> like some magic integers.
>

Ok, will add a set of #define for them.

    -Don Slutz


> -boris
>
>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.