[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] x86: avoid Misra Rule 19.1 violations


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Mon, 17 Jul 2023 16:13:29 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=fZMrEW7j9/0Rv29bQJ4p6Vlp0AxJhC0sEjVBAeejoBw=; b=P02miAwgUlgWIUXmOmR4A3BRLiOoRlyol9Mo6DeZ15pfRCG4eE3SfHKyEiy1sYLxrfKFH6Cozw7sD01mhVT+83XTOn8s24NL32iJ0SR+EWbHh+91p0mkMrhhNaKMrhampeaHq2xY5IoOs15+qVJMdd49RoUqtI2t4iLjZtW+aSB5HTuWmMfLwIRZg4ReyKv7LlT7OxK8KDQTqn00vE5U9pJZk9xmtueorLoMm8EKw6R9VddaBe54Grncmb+y6ydmzEbL+RIBV6fmqLBQkjj3uZID3vToGTSH+Ee+Qg2c/cNnjs7IQe17omGGhz/FBmdQDgF/yZSBb2azoHRlYMNtwg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=YiUKpPrcLXxWnlUYJJnwqozIfa8Ld9bXe5wUL/eXrzgBt4pHDvFVCsL+WA4x+vnnhhBlB/wQz8WWNVI7I+Ep14nB7HyAOyVns742FYgd2bOul3P/Ob8r/u6cMzs9NktCj1qaGohV79m9VC+2qw19LWmfJbEUgEdPguNaSNTYph2W+JBXwrM6FriF3bmocLNWHi3VBJ7lVPJi+cKonwYgEFfuT/gvpEbdQghaLS5Phllqyzkbd5m4W6NQv1HdP5nZqvxbecAoiKW8nY6adM5nXybHiiP+6y48U2dXwNqauJ5n8Zpg39YtLxBAgfRZ5dL6M5mF2lTwxBaCkkcKlJ1UXg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Roberto Bagnara <roberto.bagnara@xxxxxxxxxxx>, Nicola Vetrini <nicola.vetrini@xxxxxxxxxxx>
  • Delivery-date: Mon, 17 Jul 2023 14:14:15 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Not exactly overlapping accesses to objects on the left and right hand
sides of an assignment are generally UB, and hence disallowed by Misra.
While in the specific cases we're talking about here no actual UB can
result as long as the compiler doesn't act actively "maliciously", let's
still switch to using casts combined with exactly overlapping accesses.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Oddly enough in my (release) build using gcc12 I actually see emulator
code shrink by about 40 bytes. Diff-ing the disassembly I can't really
attribute this to the particular changes, but instead it looks like
certain scheduling, inlining, and code folding decisions are done
differently.

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3860,7 +3860,7 @@ void hvm_ud_intercept(struct cpu_user_re
 
             /* Zero the upper 32 bits of %rip if not in 64bit mode. */
             if ( !(hvm_long_mode_active(cur) && cs->l) )
-                regs->rip = regs->eip;
+                regs->rip = (uint32_t)regs->rip;
 
             add_taint(TAINT_HVM_FEP);
 
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1420,7 +1420,7 @@ static void cf_check svm_inject_event(co
      */
     if ( !((vmcb_get_efer(vmcb) & EFER_LMA) && vmcb->cs.l) )
     {
-        regs->rip = regs->eip;
+        regs->rip = (uint32_t)regs->rip;
         vmcb->nextrip = (uint32_t)vmcb->nextrip;
     }
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4760,7 +4760,7 @@ out:
                 regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >>
                             (64 - VADDR_BITS);
             else
-                regs->rip = regs->eip;
+                regs->rip = (uint32_t)regs->rip;
         }
         else
             domain_crash(v->domain);
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -503,8 +503,8 @@ static inline void put_loop_count(
         if ( mode_64bit() && ad_bytes == 4 )                            \
         {                                                               \
             _regs.r(cx) = 0;                                            \
-            if ( using_si ) _regs.r(si) = _regs.esi;                    \
-            if ( using_di ) _regs.r(di) = _regs.edi;                    \
+            if ( using_si ) _regs.r(si) = (uint32_t)_regs.r(si);        \
+            if ( using_di ) _regs.r(di) = (uint32_t)_regs.r(di);        \
         }                                                               \
         goto complete_insn;                                             \
     }                                                                   \
@@ -1984,9 +1984,9 @@ x86_emulate(
     case 0x98: /* cbw/cwde/cdqe */
         switch ( op_bytes )
         {
-        case 2: _regs.ax = (int8_t)_regs.al; break; /* cbw */
+        case 2: _regs.ax = (int8_t)_regs.ax; break; /* cbw */
         case 4: _regs.r(ax) = (uint32_t)(int16_t)_regs.ax; break; /* cwde */
-        case 8: _regs.r(ax) = (int32_t)_regs.eax; break; /* cdqe */
+        case 8: _regs.r(ax) = (int32_t)_regs.r(ax); break; /* cdqe */
         }
         break;
 
@@ -8377,7 +8377,7 @@ x86_emulate(
 
     /* Zero the upper 32 bits of %rip if not in 64-bit mode. */
     if ( !mode_64bit() )
-        _regs.r(ip) = _regs.eip;
+        _regs.r(ip) = (uint32_t)_regs.r(ip);
 
     /* Should a singlestep #DB be raised? */
     if ( rc == X86EMUL_OKAY && singlestep && !ctxt->retire.mov_ss )



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.