[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 03/13] x86/IRQ: desc->affinity should strictly represent the requested value


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <JBeulich@xxxxxxxx>
  • Date: Tue, 16 Jul 2019 07:38:31 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1;spf=pass smtp.mailfrom=suse.com;dmarc=pass action=none header.from=suse.com;dkim=pass header.d=suse.com;arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=FiZRk4taJlIjNWnHAkXa9+2wjebdatk/DSYCLPwCSkI=; b=WFTee2gwwU2v/MlrWCFBJsN0EtqBgW9/OChF1ulyNpmVin2HZ/bUMHhpCSTj4CCN5U+k1DKOx1vxLSIpkZ8B+I4ulBrWk6cQsOfCdruKQsuOp6NdttAsFfsoVWliE2WbCOM7jgzwFAd8HEh+riCwS4OyR23CNMuMCX6XPOnklGuJ8u6EC/A2IRItS8Ht54hMe7Ni8TEqouZzQIX6ftmt0vZ7m5gFu4fyEYxnXQcoGCN5bWHPwKskbdWxjpR62v2d2tRPigzHaECftnWqP1Yy/kpTUn7FCbx+VwCg1M1Pyz5epJZ8zARXrO2gggoMOc//984nO4dJTz7mFNwjSsdrPg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=i32TIC/XCj0bt7zFkJAj+NkEKhlWVHc1KeMNryQci7JGgbnqVLKOlUydxqj1p4/eeEEeExOfpfR5Iq0jmv10dllowOymwpBxEJCwLX2JqzHtLar7s32niCzucEe3q7B7ZiafAw22JFb48vo+OgzojDlp4cwYwC5OaJhyCfNZw/pSmSAkBz4vJNc/GelX2Utq+fBnPs1lBb22lpl6S2CQnwIke8FirJWnKBXYo7nptU7r8d5jK5Cp1qYfNBdBk0nt7eOMdr7bJNd3qVekQ6rWDrZtGK0uytkgHnbWbLdvvtUhHYpjbNfLPo14V22wWo4UF0OhCrUNiMX023x+wzebpg==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=JBeulich@xxxxxxxx;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Tue, 16 Jul 2019 07:46:03 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVO6l35XHzXx7WVEi7OkZUFqn0pQ==
  • Thread-topic: [PATCH v4 03/13] x86/IRQ: desc->affinity should strictly represent the requested value

desc->arch.cpu_mask reflects the actual set of target CPUs. Don't ever
fiddle with desc->affinity itself, except to store caller requested
values. Note that assign_irq_vector() now takes a NULL incoming CPU mask
to mean "all CPUs" now, rather than just "all currently online CPUs".
This way no further affinity adjustment is needed after onlining further
CPUs.

This renders both set_native_irq_info() uses (which weren't using proper
locking anyway) redundant - drop the function altogether.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
v4: Use %*pbl.
---
TBD: To reduce the bad effect on the so far tabular output of the 'i'
      debug key, shifting the two affinity values further to the right
      may be worthwhile to consider.

--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -1039,7 +1039,6 @@ static void __init setup_IO_APIC_irqs(vo
              SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS));
              spin_lock_irqsave(&ioapic_lock, flags);
              __ioapic_write_entry(apic, pin, 0, entry);
-            set_native_irq_info(irq, TARGET_CPUS);
              spin_unlock_irqrestore(&ioapic_lock, flags);
          }
      }
@@ -2248,7 +2247,6 @@ int io_apic_set_pci_routing (int ioapic,
  
      spin_lock_irqsave(&ioapic_lock, flags);
      __ioapic_write_entry(ioapic, pin, 0, entry);
-    set_native_irq_info(irq, TARGET_CPUS);
      spin_unlock(&ioapic_lock);
  
      spin_lock(&desc->lock);
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -589,11 +589,16 @@ int assign_irq_vector(int irq, const cpu
  
      spin_lock_irqsave(&vector_lock, flags);
      ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS);
-    if (!ret) {
+    if ( !ret )
+    {
          ret = desc->arch.vector;
-        cpumask_copy(desc->affinity, desc->arch.cpu_mask);
+        if ( mask )
+            cpumask_copy(desc->affinity, mask);
+        else
+            cpumask_setall(desc->affinity);
      }
      spin_unlock_irqrestore(&vector_lock, flags);
+
      return ret;
  }
  
@@ -2345,9 +2350,10 @@ static void dump_irqs(unsigned char key)
  
          spin_lock_irqsave(&desc->lock, flags);
  
-        printk("   IRQ:%4d aff:%*pb vec:%02x %-15s status=%03x ",
-               irq, nr_cpu_ids, cpumask_bits(desc->affinity), 
desc->arch.vector,
-               desc->handler->typename, desc->status);
+        printk("   IRQ:%4d aff:{%*pbl}/{%*pbl} vec:%02x %-15s status=%03x ",
+               irq, nr_cpu_ids, cpumask_bits(desc->affinity),
+               nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
+               desc->arch.vector, desc->handler->typename, desc->status);
  
          if ( ssid )
              printk("Z=%-25s ", ssid);
@@ -2435,8 +2441,7 @@ void fixup_irqs(const cpumask_t *mask, b
                  release_old_vec(desc);
          }
  
-        cpumask_copy(&affinity, desc->affinity);
-        if ( !desc->action || cpumask_subset(&affinity, mask) )
+        if ( !desc->action || cpumask_subset(desc->affinity, mask) )
          {
              spin_unlock(&desc->lock);
              continue;
@@ -2469,12 +2474,13 @@ void fixup_irqs(const cpumask_t *mask, b
              desc->arch.move_in_progress = 0;
          }
  
-        cpumask_and(&affinity, &affinity, mask);
-        if ( cpumask_empty(&affinity) )
+        if ( !cpumask_intersects(mask, desc->affinity) )
          {
              break_affinity = true;
-            cpumask_copy(&affinity, mask);
+            cpumask_setall(&affinity);
          }
+        else
+            cpumask_copy(&affinity, desc->affinity);
  
          if ( desc->handler->disable )
              desc->handler->disable(desc);
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -162,11 +162,6 @@ extern irq_desc_t *domain_spin_lock_irq_
  extern irq_desc_t *pirq_spin_lock_irq_desc(
      const struct pirq *, unsigned long *pflags);
  
-static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask)
-{
-    cpumask_copy(irq_to_desc(irq)->affinity, mask);
-}
-
  unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *);
  
  #ifndef arch_hwdom_irqs

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.