[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 07/13] xen: generalized event channel operations



On Thu, Jan 31, 2013 at 02:47:01PM +0000, Wei Liu wrote:
> Use global pointers in common operations to allow for better code sharing
> between N-level event channel.
> 
> Functions which are not suitable for sharing are also taken care of.
> 
> Also update drivers/xen/evtchn.c to use exported variable instead of macro.
> 
> Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> ---
>  drivers/xen/events.c |  180 
> ++++++++++++++++++++++++++++++++------------------
>  drivers/xen/evtchn.c |   12 ++--
>  include/xen/events.h |    2 +
>  3 files changed, 123 insertions(+), 71 deletions(-)
> 
> diff --git a/drivers/xen/events.c b/drivers/xen/events.c
> index 0679d27..4820a52 100644
> --- a/drivers/xen/events.c
> +++ b/drivers/xen/events.c
> @@ -51,6 +51,16 @@
>  #include <xen/interface/hvm/hvm_op.h>
>  #include <xen/interface/hvm/params.h>
>  
> +/* N-level event channel, starting from 2 */
> +unsigned int evtchn_level = 2;

What if the hypervisor does not support that? Shouldn't be by default
at 1?

> +EXPORT_SYMBOL_GPL(evtchn_level);

Prefix it please with 'xen'

> +unsigned int nr_event_channels;
> +EXPORT_SYMBOL_GPL(nr_event_channels);

Ditto here.

> +
> +/* The following pointers point to pending bitmap and mask bitmap. */
> +static unsigned long *evtchn_pending;
> +static unsigned long *evtchn_mask;
> +
>  /*
>   * This lock protects updates to the following mapping and reference-count
>   * arrays. The lock does not need to be acquired to read the mapping tables.
> @@ -113,7 +123,7 @@ static int *evtchn_to_irq;
>  static unsigned long *pirq_eoi_map;
>  static bool (*pirq_needs_eoi)(unsigned irq);
>  
> -static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
> +static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS_L2/BITS_PER_LONG],
>                     cpu_evtchn_mask);
>  
>  /* Xen will never allocate port zero for any purpose. */
> @@ -286,12 +296,11 @@ static bool pirq_needs_eoi_flag(unsigned irq)
>  }
>  
>  static inline unsigned long active_evtchns(unsigned int cpu,
> -                                        struct shared_info *sh,
>                                          unsigned int idx)
>  {
> -     return sh->evtchn_pending[idx] &
> +     return evtchn_pending[idx] &
>               per_cpu(cpu_evtchn_mask, cpu)[idx] &
> -             ~sh->evtchn_mask[idx];
> +             ~evtchn_mask[idx];
>  }
>  
>  static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
> @@ -329,26 +338,22 @@ static void init_evtchn_cpu_bindings(void)
>  
>  static inline void clear_evtchn(int port)
>  {
> -     struct shared_info *s = HYPERVISOR_shared_info;
> -     sync_clear_bit(port, &s->evtchn_pending[0]);
> +     sync_clear_bit(port, &evtchn_pending[0]);
>  }
>  
>  static inline void set_evtchn(int port)
>  {
> -     struct shared_info *s = HYPERVISOR_shared_info;
> -     sync_set_bit(port, &s->evtchn_pending[0]);
> +     sync_set_bit(port, &evtchn_pending[0]);
>  }
>  
>  static inline int test_evtchn(int port)
>  {
> -     struct shared_info *s = HYPERVISOR_shared_info;
> -     return sync_test_bit(port, &s->evtchn_pending[0]);
> +     return sync_test_bit(port, &evtchn_pending[0]);
>  }
>  
>  static inline int test_and_set_mask(int port)
>  {
> -     struct shared_info *s = HYPERVISOR_shared_info;
> -     return sync_test_and_set_bit(port, &s->evtchn_mask[0]);
> +     return sync_test_and_set_bit(port, &evtchn_mask[0]);
>  }
>  
>  
> @@ -371,13 +376,28 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq);
>  
>  static void mask_evtchn(int port)
>  {
> -     struct shared_info *s = HYPERVISOR_shared_info;
> -     sync_set_bit(port, &s->evtchn_mask[0]);
> +     sync_set_bit(port, &evtchn_mask[0]);
> +}
> +
> +static inline void __unmask_local_port_l2(int port)
> +{
> +     struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
> +
> +     sync_clear_bit(port, &evtchn_mask[0]);
> +
> +     /*
> +      * The following is basically the equivalent of
> +      * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
> +      * the interrupt edge' if the channel is masked.
> +      */
> +     if (sync_test_bit(port, &evtchn_pending[0]) &&
> +         !sync_test_and_set_bit(port / BITS_PER_LONG,
> +                                &vcpu_info->evtchn_pending_sel))
> +             vcpu_info->evtchn_upcall_pending = 1;
>  }
>  
>  static void unmask_evtchn(int port)
>  {
> -     struct shared_info *s = HYPERVISOR_shared_info;
>       unsigned int cpu = get_cpu();
>  
>       BUG_ON(!irqs_disabled());
> @@ -387,19 +407,13 @@ static void unmask_evtchn(int port)
>               struct evtchn_unmask unmask = { .port = port };
>               (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
>       } else {
> -             struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
> -
> -             sync_clear_bit(port, &s->evtchn_mask[0]);
> -
> -             /*
> -              * The following is basically the equivalent of
> -              * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
> -              * the interrupt edge' if the channel is masked.
> -              */
> -             if (sync_test_bit(port, &s->evtchn_pending[0]) &&
> -                 !sync_test_and_set_bit(port / BITS_PER_LONG,
> -                                        &vcpu_info->evtchn_pending_sel))
> -                     vcpu_info->evtchn_upcall_pending = 1;
> +             switch (evtchn_level) {
> +             case 2:
> +                     __unmask_local_port_l2(port);
> +                     break;
> +             default:
> +                     BUG();
> +             }
>       }
>  
>       put_cpu();
> @@ -902,7 +916,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
>       int port, rc = -ENOENT;
>  
>       memset(&status, 0, sizeof(status));
> -     for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
> +     for (port = 0; port <= nr_event_channels; port++) {
>               status.dom = DOMID_SELF;
>               status.port = port;
>               rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
> @@ -1127,7 +1141,7 @@ int evtchn_get(unsigned int evtchn)
>       struct irq_info *info;
>       int err = -ENOENT;
>  
> -     if (evtchn >= NR_EVENT_CHANNELS)
> +     if (evtchn >= nr_event_channels)
>               return -EINVAL;
>  
>       mutex_lock(&irq_mapping_update_lock);
> @@ -1170,15 +1184,16 @@ void xen_send_IPI_one(unsigned int cpu, enum 
> ipi_vector vector)
>       notify_remote_via_irq(irq);
>  }
>  
> +static irqreturn_t xen_debug_interrupt_l2(int irq, void *dev_id);
> +
>  irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
>  {
> -     struct shared_info *sh = HYPERVISOR_shared_info;
> -     int cpu = smp_processor_id();
> -     unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
> -     int i;
> -     unsigned long flags;
> +     irqreturn_t rc;
>       static DEFINE_SPINLOCK(debug_lock);
> +     unsigned long flags;
> +     int cpu = smp_processor_id();
>       struct vcpu_info *v;
> +     int i;
>  
>       spin_lock_irqsave(&debug_lock, flags);
>  
> @@ -1195,24 +1210,45 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
>                      (int)(sizeof(v->evtchn_pending_sel)*2),
>                      v->evtchn_pending_sel);
>       }
> +
> +     switch (evtchn_level) {
> +     case 2:
> +             rc = xen_debug_interrupt_l2(irq, dev_id);
> +             break;
> +     default:
> +             BUG();
> +     }
> +
> +     spin_unlock_irqrestore(&debug_lock, flags);
> +     return rc;
> +}
> +
> +static irqreturn_t xen_debug_interrupt_l2(int irq, void *dev_id)
> +{
> +     int cpu = smp_processor_id();
> +     unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
> +     int i;
> +     unsigned long nr_elems = NR_EVENT_CHANNELS_L2 / BITS_PER_LONG;
> +     struct vcpu_info *v;
> +
>       v = per_cpu(xen_vcpu, cpu);
>  
>       printk(KERN_DEBUG "\npending:\n   ");
> -     for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
> -             printk(KERN_DEBUG "%0*lx%s", 
> (int)sizeof(sh->evtchn_pending[0])*2,
> -                    sh->evtchn_pending[i],
> +     for (i = nr_elems; i >= 0; i--)
> +             printk(KERN_DEBUG "%0*lx%s", (int)sizeof(evtchn_pending[0])*2,
> +                    evtchn_pending[i],
>                      i % 8 == 0 ? "\n   " : " ");
>       printk(KERN_DEBUG "\nglobal mask:\n   ");
> -     for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
> +     for (i = nr_elems-1; i >= 0; i--)
>               printk(KERN_DEBUG "%0*lx%s",
> -                    (int)(sizeof(sh->evtchn_mask[0])*2),
> -                    sh->evtchn_mask[i],
> +                    (int)(sizeof(evtchn_mask[0])*2),
> +                    evtchn_mask[i],
>                      i % 8 == 0 ? "\n   " : " ");
>  
>       printk(KERN_DEBUG "\nglobally unmasked:\n   ");
> -     for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
> -             printk(KERN_DEBUG "%0*lx%s", 
> (int)(sizeof(sh->evtchn_mask[0])*2),
> -                    sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
> +     for (i = nr_elems-1; i >= 0; i--)
> +             printk(KERN_DEBUG "%0*lx%s", (int)(sizeof(evtchn_mask[0])*2),
> +                    evtchn_pending[i] & ~evtchn_mask[i],
>                      i % 8 == 0 ? "\n   " : " ");
>  
>       printk(KERN_DEBUG "\nlocal cpu%d mask:\n   ", cpu);
> @@ -1222,32 +1258,30 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
>                      i % 8 == 0 ? "\n   " : " ");
>  
>       printk(KERN_DEBUG "\nlocally unmasked:\n   ");
> -     for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
> -             unsigned long pending = sh->evtchn_pending[i]
> -                     & ~sh->evtchn_mask[i]
> +     for (i = nr_elems-1; i >= 0; i--) {
> +             unsigned long pending = evtchn_pending[i]
> +                     & ~evtchn_mask[i]
>                       & cpu_evtchn[i];
> -             printk(KERN_DEBUG "%0*lx%s", 
> (int)(sizeof(sh->evtchn_mask[0])*2),
> +             printk(KERN_DEBUG "%0*lx%s", (int)(sizeof(evtchn_mask[0])*2),
>                      pending, i % 8 == 0 ? "\n   " : " ");
>       }
>  
>       printk(KERN_DEBUG "\npending list:\n");
> -     for (i = 0; i < NR_EVENT_CHANNELS; i++) {
> -             if (sync_test_bit(i, sh->evtchn_pending)) {
> +     for (i = 0; i < NR_EVENT_CHANNELS_L2; i++) {
> +             if (sync_test_bit(i, evtchn_pending)) {
>                       int word_idx = i / BITS_PER_LONG;
>                       printk(KERN_DEBUG "  %d: event %d -> irq %d%s%s%s\n",
>                              cpu_from_evtchn(i), i,
>                              evtchn_to_irq[i],
>                              !sync_test_bit(word_idx, &v->evtchn_pending_sel)
>                                            ? "" : " l1-clear",
> -                            sync_test_bit(i, sh->evtchn_mask)
> +                            sync_test_bit(i, evtchn_mask)
>                                            ? "" : " globally-masked",
>                              sync_test_bit(i, cpu_evtchn)
>                                            ? "" : " locally-masked");
>               }
>       }
>  
> -     spin_unlock_irqrestore(&debug_lock, flags);
> -
>       return IRQ_HANDLED;
>  }
>  
> @@ -1269,13 +1303,12 @@ static DEFINE_PER_CPU(unsigned int, current_bit_idx);
>   * a bitset of words which contain pending event bits.  The second
>   * level is a bitset of pending events themselves.
>   */
> -static void __xen_evtchn_do_upcall(void)
> +static void __xen_evtchn_do_upcall_l2(void)
>  {
>       int start_word_idx, start_bit_idx;
>       int word_idx, bit_idx;
>       int i;
>       int cpu = get_cpu();
> -     struct shared_info *s = HYPERVISOR_shared_info;
>       struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
>       unsigned count;
>  
> @@ -1314,7 +1347,7 @@ static void __xen_evtchn_do_upcall(void)
>                       }
>                       word_idx = __ffs(words);
>  
> -                     pending_bits = active_evtchns(cpu, s, word_idx);
> +                     pending_bits = active_evtchns(cpu, word_idx);
>                       bit_idx = 0; /* usually scan entire word from start */
>                       if (word_idx == start_word_idx) {
>                               /* We scan the starting word in two parts */
> @@ -1383,7 +1416,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
>       exit_idle();
>       irq_enter();
>  
> -     __xen_evtchn_do_upcall();
> +     switch (evtchn_level) {
> +     case 2:
> +             __xen_evtchn_do_upcall_l2();
> +             break;
> +     default:
> +             BUG();
> +     }
>  
>       irq_exit();
>       set_irq_regs(old_regs);
> @@ -1391,7 +1430,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
>  
>  void xen_hvm_evtchn_do_upcall(void)
>  {
> -     __xen_evtchn_do_upcall();
> +     switch (evtchn_level) {
> +     case 2:
> +             __xen_evtchn_do_upcall_l2();
> +             break;
> +     default:
> +             BUG();
> +     }
>  }
>  EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
>  
> @@ -1465,7 +1510,6 @@ static int set_affinity_irq(struct irq_data *data, 
> const struct cpumask *dest,
>  int resend_irq_on_evtchn(unsigned int irq)
>  {
>       int masked, evtchn = evtchn_from_irq(irq);
> -     struct shared_info *s = HYPERVISOR_shared_info;
>  
>       if (!VALID_EVTCHN(evtchn))
>               return 1;
> @@ -1513,7 +1557,6 @@ static void mask_ack_dynirq(struct irq_data *data)
>  static int retrigger_dynirq(struct irq_data *data)
>  {
>       int evtchn = evtchn_from_irq(data->irq);
> -     struct shared_info *sh = HYPERVISOR_shared_info;
>       int ret = 0;
>  
>       if (VALID_EVTCHN(evtchn)) {
> @@ -1689,14 +1732,14 @@ void xen_irq_resume(void)
>       init_evtchn_cpu_bindings();
>  
>       /* New event-channel space is not 'live' yet. */
> -     for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
> +     for (evtchn = 0; evtchn < nr_event_channels; evtchn++)
>               mask_evtchn(evtchn);
>  
>       /* No IRQ <-> event-channel mappings. */
>       list_for_each_entry(info, &xen_irq_list_head, list)
>               info->evtchn = 0; /* zap event-channel binding */
>  
> -     for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
> +     for (evtchn = 0; evtchn < nr_event_channels; evtchn++)
>               evtchn_to_irq[evtchn] = -1;
>  
>       for_each_possible_cpu(cpu) {
> @@ -1792,17 +1835,24 @@ void xen_callback_vector(void) {}
>  void __init xen_init_IRQ(void)
>  {
>       int i, rc;
> +     struct shared_info *s = HYPERVISOR_shared_info;
> +
> +     evtchn_pending = s->evtchn_pending;
> +     evtchn_mask = s->evtchn_mask;
> +
> +     evtchn_level = 2;
> +     nr_event_channels = NR_EVENT_CHANNELS_L2;
>  
> -     evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
> +     evtchn_to_irq = kcalloc(nr_event_channels, sizeof(*evtchn_to_irq),
>                                   GFP_KERNEL);
>       BUG_ON(!evtchn_to_irq);
> -     for (i = 0; i < NR_EVENT_CHANNELS; i++)
> +     for (i = 0; i < nr_event_channels; i++)
>               evtchn_to_irq[i] = -1;
>  
>       init_evtchn_cpu_bindings();
>  
>       /* No event channels are 'live' right now. */
> -     for (i = 0; i < NR_EVENT_CHANNELS; i++)
> +     for (i = 0; i < nr_event_channels; i++)
>               mask_evtchn(i);
>  
>       pirq_needs_eoi = pirq_needs_eoi_flag;
> diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
> index d2bbea1..7515ecc 100644
> --- a/drivers/xen/evtchn.c
> +++ b/drivers/xen/evtchn.c
> @@ -232,7 +232,7 @@ static ssize_t evtchn_write(struct file *file, const char 
> __user *buf,
>       for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
>               unsigned port = kbuf[i];
>  
> -             if (port < NR_EVENT_CHANNELS &&
> +             if (port < nr_event_channels &&
>                   get_port_user(port) == u &&
>                   !get_port_enabled(port)) {
>                       set_port_enabled(port, true);
> @@ -374,7 +374,7 @@ static long evtchn_ioctl(struct file *file,
>                       break;
>  
>               rc = -EINVAL;
> -             if (unbind.port >= NR_EVENT_CHANNELS)
> +             if (unbind.port >= nr_event_channels)
>                       break;
>  
>               spin_lock_irq(&port_user_lock);
> @@ -402,7 +402,7 @@ static long evtchn_ioctl(struct file *file,
>               if (copy_from_user(&notify, uarg, sizeof(notify)))
>                       break;
>  
> -             if (notify.port >= NR_EVENT_CHANNELS) {
> +             if (notify.port >= nr_event_channels) {
>                       rc = -EINVAL;
>               } else if (get_port_user(notify.port) != u) {
>                       rc = -ENOTCONN;
> @@ -492,7 +492,7 @@ static int evtchn_release(struct inode *inode, struct 
> file *filp)
>  
>       free_page((unsigned long)u->ring);
>  
> -     for (i = 0; i < NR_EVENT_CHANNELS; i++) {
> +     for (i = 0; i < nr_event_channels; i++) {
>               if (get_port_user(i) != u)
>                       continue;
>  
> @@ -501,7 +501,7 @@ static int evtchn_release(struct inode *inode, struct 
> file *filp)
>  
>       spin_unlock_irq(&port_user_lock);
>  
> -     for (i = 0; i < NR_EVENT_CHANNELS; i++) {
> +     for (i = 0; i < nr_event_channels; i++) {
>               if (get_port_user(i) != u)
>                       continue;
>  
> @@ -538,7 +538,7 @@ static int __init evtchn_init(void)
>       if (!xen_domain())
>               return -ENODEV;
>  
> -     port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL);
> +     port_user = kcalloc(nr_event_channels, sizeof(*port_user), GFP_KERNEL);
>       if (port_user == NULL)
>               return -ENOMEM;
>  
> diff --git a/include/xen/events.h b/include/xen/events.h
> index 04399b2..6b117ac 100644
> --- a/include/xen/events.h
> +++ b/include/xen/events.h
> @@ -109,4 +109,6 @@ int xen_irq_from_gsi(unsigned gsi);
>  /* Determine whether to ignore this IRQ if it is passed to a guest. */
>  int xen_test_irq_shared(int irq);
>  
> +extern unsigned int nr_event_channels;
> +
>  #endif       /* _XEN_EVENTS_H */
> -- 
> 1.7.10.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.