[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/svm: Clean up intinfo_t variables



commit 644f61d363f064a67f34937480a536062412cf6b
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Nov 25 13:29:20 2019 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Dec 5 13:19:28 2019 +0000

    x86/svm: Clean up intinfo_t variables
    
    The type name is poor because the type is also used for the IDT vectoring
    field, not just for the event injection field.  Rename it to intinfo_t which
    is how the APM refers to the data.
    
    Rearrange the union to drop the .fields infix, and rename bytes to the more
    common raw.  Also take the opportunity to rename the fields in the VMCB to
    increase legibility.
    
    While adjusting all call sites, fix up style issues and make use of 
structure
    assignments where applicable.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/hvm/svm/intr.c        | 32 ++++++++---------
 xen/arch/x86/hvm/svm/nestedsvm.c   | 36 +++++++++----------
 xen/arch/x86/hvm/svm/svm.c         | 74 +++++++++++++++++++-------------------
 xen/arch/x86/hvm/svm/svmdebug.c    | 16 ++++-----
 xen/arch/x86/hvm/svm/vmcb.c        |  2 +-
 xen/include/asm-x86/hvm/svm/vmcb.h | 22 ++++++------
 6 files changed, 88 insertions(+), 94 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index ff755165cd..38011bd4e2 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -43,15 +43,15 @@ static void svm_inject_nmi(struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
-    eventinj_t event;
+    intinfo_t event;
 
-    event.bytes = 0;
-    event.fields.v = 1;
-    event.fields.type = X86_EVENTTYPE_NMI;
-    event.fields.vector = 2;
+    event.raw = 0;
+    event.v = true;
+    event.type = X86_EVENTTYPE_NMI;
+    event.vector = TRAP_nmi;
 
-    ASSERT(vmcb->eventinj.fields.v == 0);
-    vmcb->eventinj = event;
+    ASSERT(!vmcb->event_inj.v);
+    vmcb->event_inj = event;
 
     /*
      * SVM does not virtualise the NMI mask, so we emulate it by intercepting
@@ -64,15 +64,15 @@ static void svm_inject_nmi(struct vcpu *v)
 static void svm_inject_extint(struct vcpu *v, int vector)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
-    eventinj_t event;
+    intinfo_t event;
 
-    event.bytes = 0;
-    event.fields.v = 1;
-    event.fields.type = X86_EVENTTYPE_EXT_INTR;
-    event.fields.vector = vector;
+    event.raw = 0;
+    event.v = true;
+    event.type = X86_EVENTTYPE_EXT_INTR;
+    event.vector = vector;
 
-    ASSERT(vmcb->eventinj.fields.v == 0);
-    vmcb->eventinj = event;
+    ASSERT(!vmcb->event_inj.v);
+    vmcb->event_inj = event;
 }
 
 static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
@@ -99,7 +99,7 @@ static void svm_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
     }
 
     HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
-                vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);
+                vmcb->event_inj.v ? vmcb->event_inj.vector : -1);
 
     /*
      * Create a dummy virtual interrupt to intercept as soon as the
@@ -197,7 +197,7 @@ void svm_intr_assist(void)
          *      have cleared the interrupt out of the IRR.
          * 2. The IRQ is masked.
          */
-        if ( unlikely(vmcb->eventinj.fields.v) || intblk )
+        if ( unlikely(vmcb->event_inj.v) || intblk )
         {
             svm_enable_intr_window(v, intack);
             return;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index fef124fb11..3bd2a119d3 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -340,7 +340,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct 
cpu_user_regs *regs)
     /* Clear exitintinfo to prevent a fault loop of re-injecting
      * exceptions forever.
      */
-    n1vmcb->exitintinfo.bytes = 0;
+    n1vmcb->exit_int_info.raw = 0;
 
     /* Cleanbits */
     n1vmcb->cleanbits.bytes = 0;
@@ -514,10 +514,10 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct 
cpu_user_regs *regs)
     n2vmcb->exitcode = ns_vmcb->exitcode;
     n2vmcb->exitinfo1 = ns_vmcb->exitinfo1;
     n2vmcb->exitinfo2 = ns_vmcb->exitinfo2;
-    n2vmcb->exitintinfo = ns_vmcb->exitintinfo;
+    n2vmcb->exit_int_info = ns_vmcb->exit_int_info;
 
     /* Pending Interrupts */
-    n2vmcb->eventinj = ns_vmcb->eventinj;
+    n2vmcb->event_inj = ns_vmcb->event_inj;
 
     /* LBR and other virtualization */
     if (!vcleanbit_set(lbr)) {
@@ -806,13 +806,10 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct 
cpu_user_regs *regs,
 
         switch (exitcode) {
         case VMEXIT_INTR:
-            if ( unlikely(ns_vmcb->eventinj.fields.v)
-                && nv->nv_vmentry_pending
-                && hvm_event_needs_reinjection(ns_vmcb->eventinj.fields.type,
-                    ns_vmcb->eventinj.fields.vector) )
-            {
-                ns_vmcb->exitintinfo.bytes = ns_vmcb->eventinj.bytes;
-            }
+            if ( unlikely(ns_vmcb->event_inj.v) && nv->nv_vmentry_pending &&
+                 hvm_event_needs_reinjection(ns_vmcb->event_inj.type,
+                                             ns_vmcb->event_inj.vector) )
+                ns_vmcb->exit_int_info = ns_vmcb->event_inj;
             break;
         case VMEXIT_EXCEPTION_PF:
             ns_vmcb->_cr2 = ns_vmcb->exitinfo2;
@@ -837,7 +834,7 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct 
cpu_user_regs *regs,
     }
 
     ns_vmcb->exitcode = exitcode;
-    ns_vmcb->eventinj.bytes = 0;
+    ns_vmcb->event_inj.raw = 0;
     return 0;
 }
 
@@ -1067,7 +1064,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct 
cpu_user_regs *regs)
     ns_vmcb->exitcode = n2vmcb->exitcode;
     ns_vmcb->exitinfo1 = n2vmcb->exitinfo1;
     ns_vmcb->exitinfo2 = n2vmcb->exitinfo2;
-    ns_vmcb->exitintinfo = n2vmcb->exitintinfo;
+    ns_vmcb->exit_int_info = n2vmcb->exit_int_info;
 
     /* Interrupts */
     /* If we emulate a VMRUN/#VMEXIT in the same host #VMEXIT cycle we have
@@ -1077,14 +1074,12 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct 
cpu_user_regs *regs)
      * only happens on a VMRUN instruction intercept which has no valid
      * exitintinfo set.
      */
-    if ( unlikely(n2vmcb->eventinj.fields.v) &&
-         hvm_event_needs_reinjection(n2vmcb->eventinj.fields.type,
-                                     n2vmcb->eventinj.fields.vector) )
-    {
-        ns_vmcb->exitintinfo = n2vmcb->eventinj;
-    }
+    if ( unlikely(n2vmcb->event_inj.v) &&
+         hvm_event_needs_reinjection(n2vmcb->event_inj.type,
+                                     n2vmcb->event_inj.vector) )
+        ns_vmcb->exit_int_info = n2vmcb->event_inj;
 
-    ns_vmcb->eventinj.bytes = 0;
+    ns_vmcb->event_inj.raw = 0;
 
     /* Nested paging mode */
     if (nestedhvm_paging_mode_hap(v)) {
@@ -1249,7 +1244,8 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
         if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE )
             return hvm_intblk_shadow;
 
-        if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
+        if ( !nv->nv_vmexit_pending && n2vmcb->exit_int_info.v )
+        {
             /* Give the l2 guest a chance to finish the delivery of
              * the last injected interrupt or exception before we
              * emulate a VMEXIT (e.g. VMEXIT(INTR) ).
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a0be4da6a1..48203c538c 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -259,12 +259,12 @@ static int svm_vmcb_save(struct vcpu *v, struct 
hvm_hw_cpu *c)
     c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
     c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
 
-    if ( vmcb->eventinj.fields.v &&
-         hvm_event_needs_reinjection(vmcb->eventinj.fields.type,
-                                     vmcb->eventinj.fields.vector) )
+    if ( vmcb->event_inj.v &&
+         hvm_event_needs_reinjection(vmcb->event_inj.type,
+                                     vmcb->event_inj.vector) )
     {
-        c->pending_event = (uint32_t)vmcb->eventinj.bytes;
-        c->error_code = vmcb->eventinj.fields.errorcode;
+        c->pending_event = vmcb->event_inj.raw;
+        c->error_code = vmcb->event_inj.ec;
     }
 
     return 1;
@@ -339,11 +339,11 @@ static int svm_vmcb_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
     {
         gdprintk(XENLOG_INFO, "Re-injecting %#"PRIx32", %#"PRIx32"\n",
                  c->pending_event, c->error_code);
-        vmcb->eventinj.bytes = c->pending_event;
-        vmcb->eventinj.fields.errorcode = c->error_code;
+        vmcb->event_inj.raw = c->pending_event;
+        vmcb->event_inj.ec = c->error_code;
     }
     else
-        vmcb->eventinj.bytes = 0;
+        vmcb->event_inj.raw = 0;
 
     vmcb->cleanbits.bytes = 0;
     paging_update_paging_modes(v);
@@ -1301,7 +1301,7 @@ static void svm_inject_event(const struct x86_event 
*event)
 {
     struct vcpu *curr = current;
     struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
-    eventinj_t eventinj = vmcb->eventinj;
+    intinfo_t eventinj = vmcb->event_inj;
     struct x86_event _event = *event;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
 
@@ -1342,18 +1342,17 @@ static void svm_inject_event(const struct x86_event 
*event)
         break;
     }
 
-    if ( unlikely(eventinj.fields.v) &&
-         (eventinj.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
+    if ( eventinj.v && (eventinj.type == X86_EVENTTYPE_HW_EXCEPTION) )
     {
         _event.vector = hvm_combine_hw_exceptions(
-            eventinj.fields.vector, _event.vector);
+            eventinj.vector, _event.vector);
         if ( _event.vector == TRAP_double_fault )
             _event.error_code = 0;
     }
 
-    eventinj.bytes = 0;
-    eventinj.fields.v = 1;
-    eventinj.fields.vector = _event.vector;
+    eventinj.raw = 0;
+    eventinj.v = true;
+    eventinj.vector = _event.vector;
 
     /*
      * Refer to AMD Vol 2: System Programming, 15.20 Event Injection.
@@ -1373,7 +1372,7 @@ static void svm_inject_event(const struct x86_event 
*event)
             vmcb->nextrip = regs->rip + _event.insn_len;
         else
             regs->rip += _event.insn_len;
-        eventinj.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
+        eventinj.type = X86_EVENTTYPE_SW_INTERRUPT;
         break;
 
     case X86_EVENTTYPE_PRI_SW_EXCEPTION: /* icebp */
@@ -1385,7 +1384,7 @@ static void svm_inject_event(const struct x86_event 
*event)
         regs->rip += _event.insn_len;
         if ( cpu_has_svm_nrips )
             vmcb->nextrip = regs->rip;
-        eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+        eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
         break;
 
     case X86_EVENTTYPE_SW_EXCEPTION: /* int3, into */
@@ -1397,13 +1396,13 @@ static void svm_inject_event(const struct x86_event 
*event)
             vmcb->nextrip = regs->rip + _event.insn_len;
         else
             regs->rip += _event.insn_len;
-        eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+        eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
         break;
 
     default:
-        eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
-        eventinj.fields.ev = (_event.error_code != X86_EVENT_NO_EC);
-        eventinj.fields.errorcode = _event.error_code;
+        eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
+        eventinj.ev = (_event.error_code != X86_EVENT_NO_EC);
+        eventinj.ec = _event.error_code;
         break;
     }
 
@@ -1417,9 +1416,8 @@ static void svm_inject_event(const struct x86_event 
*event)
         vmcb->nextrip = (uint32_t)vmcb->nextrip;
     }
 
-    ASSERT(!eventinj.fields.ev ||
-           eventinj.fields.errorcode == (uint16_t)eventinj.fields.errorcode);
-    vmcb->eventinj = eventinj;
+    ASSERT(!eventinj.ev || eventinj.ec == (uint16_t)eventinj.ec);
+    vmcb->event_inj = eventinj;
 
     if ( _event.vector == TRAP_page_fault &&
          _event.type == X86_EVENTTYPE_HW_EXCEPTION )
@@ -1431,7 +1429,7 @@ static void svm_inject_event(const struct x86_event 
*event)
 
 static bool svm_event_pending(const struct vcpu *v)
 {
-    return v->arch.hvm.svm.vmcb->eventinj.fields.v;
+    return v->arch.hvm.svm.vmcb->event_inj.v;
 }
 
 static void svm_cpu_dead(unsigned int cpu)
@@ -2410,12 +2408,12 @@ static bool svm_get_pending_event(struct vcpu *v, 
struct x86_event *info)
 {
     const struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    if ( vmcb->eventinj.fields.v )
+    if ( vmcb->event_inj.v )
         return false;
 
-    info->vector = vmcb->eventinj.fields.vector;
-    info->type = vmcb->eventinj.fields.type;
-    info->error_code = vmcb->eventinj.fields.errorcode;
+    info->vector = vmcb->event_inj.vector;
+    info->type = vmcb->event_inj.type;
+    info->error_code = vmcb->event_inj.ec;
 
     return true;
 }
@@ -2602,10 +2600,10 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     vmcb->cleanbits.bytes = cpu_has_svm_cleanbits ? ~0u : 0u;
 
     /* Event delivery caused this intercept? Queue for redelivery. */
-    if ( unlikely(vmcb->exitintinfo.fields.v) &&
-         hvm_event_needs_reinjection(vmcb->exitintinfo.fields.type,
-                                     vmcb->exitintinfo.fields.vector) )
-        vmcb->eventinj = vmcb->exitintinfo;
+    if ( unlikely(vmcb->exit_int_info.v) &&
+         hvm_event_needs_reinjection(vmcb->exit_int_info.type,
+                                     vmcb->exit_int_info.vector) )
+        vmcb->event_inj = vmcb->exit_int_info;
 
     switch ( exit_reason )
     {
@@ -2765,9 +2763,9 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
          * switches.
          */
         insn_len = -1;
-        if ( vmcb->exitintinfo.fields.v )
+        if ( vmcb->exit_int_info.v )
         {
-            switch ( vmcb->exitintinfo.fields.type )
+            switch ( vmcb->exit_int_info.type )
             {
                 /*
                  * #BP and #OF are from INT3/INTO respectively.  #DB from
@@ -2775,8 +2773,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
                  * semantics.
                  */
             case X86_EVENTTYPE_HW_EXCEPTION:
-                if ( vmcb->exitintinfo.fields.vector == TRAP_int3 ||
-                     vmcb->exitintinfo.fields.vector == TRAP_overflow )
+                if ( vmcb->exit_int_info.vector == TRAP_int3 ||
+                     vmcb->exit_int_info.vector == TRAP_overflow )
                     break;
                 /* Fallthrough */
             case X86_EVENTTYPE_EXT_INTR:
@@ -2789,7 +2787,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
              * The common logic above will have forwarded the vectoring
              * information.  Undo this as we are going to emulate.
              */
-            vmcb->eventinj.bytes = 0;
+            vmcb->event_inj.raw = 0;
         }
 
         /*
diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
index 4293d8dba5..366a003f21 100644
--- a/xen/arch/x86/hvm/svm/svmdebug.c
+++ b/xen/arch/x86/hvm/svm/svmdebug.c
@@ -54,12 +54,12 @@ void svm_vmcb_dump(const char *from, const struct 
vmcb_struct *vmcb)
     printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = 
%#"PRIx64"\n",
            vmcb->tlb_control, vmcb_get_vintr(vmcb).bytes,
            vmcb->interrupt_shadow);
-    printk("eventinj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n",
-           vmcb->eventinj.bytes, vmcb->eventinj.fields.v,
-           vmcb->eventinj.fields.ev, vmcb->eventinj.fields.type,
-           vmcb->eventinj.fields.vector);
-    printk("exitcode = %#"PRIx64" exitintinfo = %#"PRIx64"\n",
-           vmcb->exitcode, vmcb->exitintinfo.bytes);
+    printk("event_inj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n",
+           vmcb->event_inj.raw, vmcb->event_inj.v,
+           vmcb->event_inj.ev, vmcb->event_inj.type,
+           vmcb->event_inj.vector);
+    printk("exitcode = %#"PRIx64" exit_int_info = %#"PRIx64"\n",
+           vmcb->exitcode, vmcb->exit_int_info.raw);
     printk("exitinfo1 = %#"PRIx64" exitinfo2 = %#"PRIx64"\n",
            vmcb->exitinfo1, vmcb->exitinfo2);
     printk("np_enable = %#"PRIx64" guest_asid = %#x\n",
@@ -164,9 +164,9 @@ bool svm_vmcb_isvalid(const char *from, const struct 
vmcb_struct *vmcb,
         PRINTF("GENERAL2_INTERCEPT: VMRUN intercept bit is clear 
(%#"PRIx32")\n",
                vmcb_get_general2_intercepts(vmcb));
 
-    if ( vmcb->eventinj.fields.resvd1 )
+    if ( vmcb->event_inj.resvd1 )
         PRINTF("eventinj: MBZ bits are set (%#"PRIx64")\n",
-               vmcb->eventinj.bytes);
+               vmcb->event_inj.raw);
 
 #undef PRINTF
     return ret;
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 3e460ca9cb..373d5d4af4 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -264,7 +264,7 @@ static void __init __maybe_unused build_assertions(void)
     BUILD_BUG_ON(sizeof(vmcb) != PAGE_SIZE);
     BUILD_BUG_ON(offsetof(typeof(vmcb), _pause_filter_thresh) != 0x03c);
     BUILD_BUG_ON(offsetof(typeof(vmcb), _vintr)               != 0x060);
-    BUILD_BUG_ON(offsetof(typeof(vmcb), eventinj)             != 0x0a8);
+    BUILD_BUG_ON(offsetof(typeof(vmcb), event_inj)            != 0x0a8);
     BUILD_BUG_ON(offsetof(typeof(vmcb), es)                   != 0x400);
     BUILD_BUG_ON(offsetof(typeof(vmcb), _cpl)                 != 0x4cb);
     BUILD_BUG_ON(offsetof(typeof(vmcb), _cr4)                 != 0x548);
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h 
b/xen/include/asm-x86/hvm/svm/vmcb.h
index e37220edf2..d2fc4d7281 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -306,17 +306,17 @@ enum VMEXIT_EXITCODE
 
 typedef union
 {
-    u64 bytes;
     struct
     {
-        u64 vector:    8;
-        u64 type:      3;
-        u64 ev:        1;
-        u64 resvd1:   19;
-        u64 v:         1;
-        u64 errorcode:32;
-    } fields;
-} eventinj_t;
+        uint8_t  vector;
+        uint8_t  type:3;
+        bool     ev:1;
+        uint32_t resvd1:19;
+        bool     v:1;
+        uint32_t ec;
+    };
+    uint64_t raw;
+} intinfo_t;
 
 typedef union
 {
@@ -420,10 +420,10 @@ struct vmcb_struct {
     u64 exitcode;               /* offset 0x70 */
     u64 exitinfo1;              /* offset 0x78 */
     u64 exitinfo2;              /* offset 0x80 */
-    eventinj_t  exitintinfo;    /* offset 0x88 */
+    intinfo_t exit_int_info;    /* offset 0x88 */
     u64 _np_enable;             /* offset 0x90 - cleanbit 4 */
     u64 res08[2];
-    eventinj_t  eventinj;       /* offset 0xA8 */
+    intinfo_t event_inj;        /* offset 0xA8 */
     u64 _h_cr3;                 /* offset 0xB0 - cleanbit 4 */
     virt_ext_t virt_ext;        /* offset 0xB8 */
     vmcbcleanbits_t cleanbits;  /* offset 0xC0 */
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.