|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH V5 08/14] xen: dynamically allocate cpu_evtchn_mask
The size of cpu_evtchn_mask can change, use dynamic allocation to cope with
this. To save space, cpu_evtchn_mask is not allocated for offline cpus. It
will get allocated as soon as a cpu goes online.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
drivers/xen/events.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 53 insertions(+), 4 deletions(-)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 217efb2..ee35ff9 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/irqnr.h>
#include <linux/pci.h>
+#include <linux/cpu.h>
#ifdef CONFIG_X86
#include <asm/desc.h>
@@ -156,8 +157,7 @@ static bool (*pirq_needs_eoi)(unsigned irq);
/* Find the first set bit in a evtchn mask */
#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
-static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS_L2/BITS_PER_EVTCHN_WORD],
- cpu_evtchn_mask);
+static DEFINE_PER_CPU(xen_ulong_t *, cpu_evtchn_mask);
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -356,6 +356,9 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned
int cpu)
static void init_evtchn_cpu_bindings(void)
{
int i;
+ unsigned int nr = xen_nr_event_channels / BITS_PER_EVTCHN_WORD;
+ unsigned int nr_bytes = nr * sizeof(xen_ulong_t);
+
#ifdef CONFIG_SMP
struct irq_info *info;
@@ -366,9 +369,9 @@ static void init_evtchn_cpu_bindings(void)
}
#endif
- for_each_possible_cpu(i)
+ for_each_online_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+ (i == 0) ? ~0 : 0, nr_bytes);
}
static inline void clear_evtchn(int port)
@@ -1867,6 +1870,41 @@ const struct evtchn_ops evtchn_l2_ops = {
.do_upcall = __xen_evtchn_do_upcall_l2
};
+static int __cpuinit xen_events_notifier_cb(struct notifier_block *self,
+ unsigned long action,
+ void *hcpu)
+{
+ int cpu = (long)hcpu;
+ int rc = NOTIFY_OK;
+ void *p;
+ unsigned int nr = xen_nr_event_channels / BITS_PER_EVTCHN_WORD;
+ unsigned int nr_bytes = nr * sizeof(xen_ulong_t);
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (!per_cpu(cpu_evtchn_mask, cpu)) {
+ p = kzalloc_node(sizeof(xen_ulong_t) * nr,
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!p)
+ rc = NOTIFY_BAD;
+ else {
+ per_cpu(cpu_evtchn_mask, cpu) = p;
+ memset(per_cpu(cpu_evtchn_mask, cpu),
+ (cpu == 0) ? ~0 : 0, nr_bytes);
+ rc = NOTIFY_OK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+static struct notifier_block xen_events_notifier __cpuinitdata = {
+ .notifier_call = xen_events_notifier_cb,
+};
+
void __init xen_init_IRQ(void)
{
int i;
@@ -1890,6 +1928,17 @@ void __init xen_init_IRQ(void)
for (i = 0; i < xen_nr_event_channels; i++)
evtchn_to_irq[i] = -1;
+ for_each_online_cpu(cpu) {
+ void *p;
+ unsigned int nr = xen_nr_event_channels / BITS_PER_EVTCHN_WORD;
+
+ p = kzalloc_node(sizeof(xen_ulong_t) * nr,
+ GFP_KERNEL, cpu_to_node(cpu));
+ BUG_ON(!p);
+ per_cpu(cpu_evtchn_mask, cpu) = p;
+ }
+ register_cpu_notifier(&xen_events_notifier);
+
init_evtchn_cpu_bindings();
/* No event channels are 'live' right now. */
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |