[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.10] xen/pvshim: forward evtchn ops between L0 Xen and L2 DomU



commit bbad376ab1c1c57ba31059bd2269aa9f213579d6
Author:     Roger Pau Monne <roger.pau@xxxxxxxxxx>
AuthorDate: Thu Jan 11 11:41:19 2018 +0000
Commit:     Roger Pau Monne <roger.pau@xxxxxxxxxx>
CommitDate: Fri Jan 12 15:47:32 2018 +0000

    xen/pvshim: forward evtchn ops between L0 Xen and L2 DomU
    
    Note that the unmask and the virq operations are handled by the shim
    itself, and that FIFO event channels are not exposed to the guest.
    
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Signed-off-by: Anthony Liguori <aliguori@xxxxxxxxxx>
    Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
    ---
    Changes since v1:
     - Use find_first_set_bit instead of ffsl.
     - Indent macro one more level.
     - Have a single evtchn_close struct in pv_shim_event_channel_op.
     - Add blank lines between switch cases.
     - Use -EOPNOTSUPP in order to signal lack of FIFO or PIRQ support.
     - Switch evtchn_bind_virq parameter to evtchn_port_t and use 0 signal
       allocation needed.
     - Switch evtchn helpers return type to int instead of long.
     - Re-write event channel hypercall table handler instead of adding
       hooks.
     - Remove the pv_domain variable and instead use a static variable in
       shim code.
---
 xen/arch/x86/compat.c             |   4 +-
 xen/arch/x86/guest/xen.c          |  25 +++-
 xen/arch/x86/pv/hypercall.c       |  17 +++
 xen/arch/x86/pv/shim.c            | 263 ++++++++++++++++++++++++++++++++++++++
 xen/common/event_channel.c        |  99 ++++++++------
 xen/drivers/char/xen_pv_console.c |  11 +-
 xen/include/asm-x86/hypercall.h   |   3 +
 xen/include/asm-x86/pv/shim.h     |   5 +
 xen/include/xen/event.h           |  15 +++
 xen/include/xen/pv_console.h      |   6 +
 10 files changed, 402 insertions(+), 46 deletions(-)

diff --git a/xen/arch/x86/compat.c b/xen/arch/x86/compat.c
index f417cd5034..9d376a4589 100644
--- a/xen/arch/x86/compat.c
+++ b/xen/arch/x86/compat.c
@@ -69,8 +69,8 @@ long 
do_event_channel_op_compat(XEN_GUEST_HANDLE_PARAM(evtchn_op_t) uop)
     case EVTCHNOP_bind_ipi:
     case EVTCHNOP_bind_vcpu:
     case EVTCHNOP_unmask:
-        return do_event_channel_op(op.cmd,
-                                   guest_handle_from_ptr(&uop.p->u, void));
+        return pv_get_hypercall_handler(__HYPERVISOR_event_channel_op, false)
+               (op.cmd, (unsigned long)&uop.p->u, 0, 0, 0, 0);
 
     default:
         return -ENOSYS;
diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c
index aff16a0e35..57b297ad47 100644
--- a/xen/arch/x86/guest/xen.c
+++ b/xen/arch/x86/guest/xen.c
@@ -18,6 +18,7 @@
  *
  * Copyright (c) 2017 Citrix Systems Ltd.
  */
+#include <xen/event.h>
 #include <xen/init.h>
 #include <xen/mm.h>
 #include <xen/pfn.h>
@@ -193,11 +194,31 @@ static void __init init_memmap(void)
 static void xen_evtchn_upcall(struct cpu_user_regs *regs)
 {
     struct vcpu_info *vcpu_info = this_cpu(vcpu_info);
+    unsigned long pending;
 
     vcpu_info->evtchn_upcall_pending = 0;
-    write_atomic(&vcpu_info->evtchn_pending_sel, 0);
+    pending = xchg(&vcpu_info->evtchn_pending_sel, 0);
 
-    pv_console_rx(regs);
+    while ( pending )
+    {
+        unsigned int l1 = find_first_set_bit(pending);
+        unsigned long evtchn = xchg(&XEN_shared_info->evtchn_pending[l1], 0);
+
+        __clear_bit(l1, &pending);
+        evtchn &= ~XEN_shared_info->evtchn_mask[l1];
+        while ( evtchn )
+        {
+            unsigned int port = find_first_set_bit(evtchn);
+
+            __clear_bit(port, &evtchn);
+            port += l1 * BITS_PER_LONG;
+
+            if ( pv_console && port == pv_console_evtchn() )
+                pv_console_rx(regs);
+            else if ( pv_shim )
+                pv_shim_inject_evtchn(port);
+        }
+    }
 
     ack_APIC_irq();
 }
diff --git a/xen/arch/x86/pv/hypercall.c b/xen/arch/x86/pv/hypercall.c
index f79f7eef62..3b72d6a44d 100644
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -320,6 +320,23 @@ void hypercall_page_initialise_ring1_kernel(void 
*hypercall_page)
     *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
 }
 
+void __init pv_hypercall_table_replace(unsigned int hypercall,
+                                       hypercall_fn_t * native,
+                                       hypercall_fn_t *compat)
+{
+#define HANDLER_POINTER(f) \
+    ((unsigned long *)__va(__pa(&pv_hypercall_table[hypercall].f)))
+    write_atomic(HANDLER_POINTER(native), (unsigned long)native);
+    write_atomic(HANDLER_POINTER(compat), (unsigned long)compat);
+#undef HANDLER_POINTER
+}
+
+hypercall_fn_t *pv_get_hypercall_handler(unsigned int hypercall, bool compat)
+{
+    return compat ? pv_hypercall_table[hypercall].compat
+                  : pv_hypercall_table[hypercall].native;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index 78351c9ee0..36f3a366d3 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -18,6 +18,8 @@
  *
  * Copyright (c) 2017 Citrix Systems Ltd.
  */
+#include <xen/event.h>
+#include <xen/guest_access.h>
 #include <xen/hypercall.h>
 #include <xen/init.h>
 #include <xen/shutdown.h>
@@ -35,6 +37,10 @@ bool pv_shim;
 boolean_param("pv-shim", pv_shim);
 #endif
 
+static struct domain *guest;
+
+static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) 
arg);
+
 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER| \
                  _PAGE_GUEST_KERNEL)
 #define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
@@ -63,6 +69,27 @@ static void __init replace_va_mapping(struct domain *d, 
l4_pgentry_t *l4start,
                                                       : COMPAT_L1_PROT));
 }
 
+static void evtchn_reserve(struct domain *d, unsigned int port)
+{
+    ASSERT(port_is_valid(d, port));
+    evtchn_from_port(d, port)->state = ECS_RESERVED;
+    BUG_ON(xen_hypercall_evtchn_unmask(port));
+}
+
+static bool evtchn_handled(struct domain *d, unsigned int port)
+{
+    ASSERT(port_is_valid(d, port));
+    /* The shim manages VIRQs, the rest is forwarded to L0. */
+    return evtchn_from_port(d, port)->state == ECS_VIRQ;
+}
+
+static void evtchn_assign_vcpu(struct domain *d, unsigned int port,
+                               unsigned int vcpu)
+{
+    ASSERT(port_is_valid(d, port));
+    evtchn_from_port(d, port)->notify_vcpu_id = vcpu;
+}
+
 void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
                               unsigned long va_start, unsigned long store_va,
                               unsigned long console_va, unsigned long vphysmap,
@@ -82,6 +109,11 @@ void __init pv_shim_setup_dom(struct domain *d, 
l4_pgentry_t *l4start,
         replace_va_mapping(d, l4start, va, param);                             
\
         dom0_update_physmap(d, PFN_DOWN((va) - va_start), param, vphysmap);    
\
     }                                                                          
\
+    else                                                                       
\
+    {                                                                          
\
+        BUG_ON(evtchn_allocate_port(d, param));                                
\
+        evtchn_reserve(d, param);                                              
\
+    }                                                                          
\
 })
     SET_AND_MAP_PARAM(HVM_PARAM_STORE_PFN, si->store_mfn, store_va);
     SET_AND_MAP_PARAM(HVM_PARAM_STORE_EVTCHN, si->store_evtchn, 0);
@@ -92,6 +124,10 @@ void __init pv_shim_setup_dom(struct domain *d, 
l4_pgentry_t *l4start,
         SET_AND_MAP_PARAM(HVM_PARAM_CONSOLE_EVTCHN, si->console.domU.evtchn, 
0);
     }
 #undef SET_AND_MAP_PARAM
+    pv_hypercall_table_replace(__HYPERVISOR_event_channel_op,
+                               (hypercall_fn_t *)pv_shim_event_channel_op,
+                               (hypercall_fn_t *)pv_shim_event_channel_op);
+    guest = d;
 }
 
 void pv_shim_shutdown(uint8_t reason)
@@ -100,6 +136,233 @@ void pv_shim_shutdown(uint8_t reason)
     xen_hypercall_shutdown(reason);
 }
 
+static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    struct domain *d = current->domain;
+    struct evtchn_close close;
+    long rc;
+
+    switch ( cmd )
+    {
+#define EVTCHN_FORWARD(cmd, port_field)                                     \
+    case EVTCHNOP_##cmd: {                                                  \
+        struct evtchn_##cmd op;                                             \
+                                                                            \
+        if ( copy_from_guest(&op, arg, 1) != 0 )                            \
+            return -EFAULT;                                                 \
+                                                                            \
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_##cmd, &op);           \
+        if ( rc )                                                           \
+            break;                                                          \
+                                                                            \
+        spin_lock(&d->event_lock);                                          \
+        rc = evtchn_allocate_port(d, op.port_field);                        \
+        if ( rc )                                                           \
+        {                                                                   \
+            close.port = op.port_field;                                     \
+            BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close)); \
+        }                                                                   \
+        else                                                                \
+            evtchn_reserve(d, op.port_field);                               \
+        spin_unlock(&d->event_lock);                                        \
+                                                                            \
+        if ( !rc && __copy_to_guest(arg, &op, 1) )                          \
+            rc = -EFAULT;                                                   \
+                                                                            \
+        break;                                                              \
+        }
+
+    EVTCHN_FORWARD(alloc_unbound, port)
+    EVTCHN_FORWARD(bind_interdomain, local_port)
+#undef EVTCHN_FORWARD
+
+    case EVTCHNOP_bind_virq: {
+        struct evtchn_bind_virq virq;
+        struct evtchn_alloc_unbound alloc = {
+            .dom = DOMID_SELF,
+            .remote_dom = DOMID_SELF,
+        };
+
+        if ( copy_from_guest(&virq, arg, 1) != 0 )
+            return -EFAULT;
+        /*
+         * The event channel space is actually controlled by L0 Xen, so
+         * allocate a port from L0 and then force the VIRQ to be bound to that
+         * specific port.
+         *
+         * This is only required for VIRQ because the rest of the event channel
+         * operations are handled directly by L0.
+         */
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_alloc_unbound, &alloc);
+        if ( rc )
+           break;
+
+        /* Force L1 to use the event channel port allocated on L0. */
+        rc = evtchn_bind_virq(&virq, alloc.port);
+        if ( rc )
+        {
+            close.port = alloc.port;
+            BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close));
+        }
+
+        if ( !rc && __copy_to_guest(arg, &virq, 1) )
+            rc = -EFAULT;
+
+        break;
+    }
+
+    case EVTCHNOP_status: {
+        struct evtchn_status status;
+
+        if ( copy_from_guest(&status, arg, 1) != 0 )
+            return -EFAULT;
+
+        /*
+         * NB: if the event channel is not handled by the shim, just forward
+         * the status request to L0, even if the port is not valid.
+         */
+        if ( port_is_valid(d, status.port) && evtchn_handled(d, status.port) )
+            rc = evtchn_status(&status);
+        else
+            rc = xen_hypercall_event_channel_op(EVTCHNOP_status, &status);
+
+        break;
+    }
+
+    case EVTCHNOP_bind_vcpu: {
+        struct evtchn_bind_vcpu vcpu;
+
+        if ( copy_from_guest(&vcpu, arg, 1) != 0 )
+            return -EFAULT;
+
+        if ( !port_is_valid(d, vcpu.port) )
+            return -EINVAL;
+
+        if ( evtchn_handled(d, vcpu.port) )
+            rc = evtchn_bind_vcpu(vcpu.port, vcpu.vcpu);
+        else
+        {
+            rc = xen_hypercall_event_channel_op(EVTCHNOP_bind_vcpu, &vcpu);
+            if ( !rc )
+                 evtchn_assign_vcpu(d, vcpu.port, vcpu.vcpu);
+        }
+
+        break;
+    }
+
+    case EVTCHNOP_close: {
+        if ( copy_from_guest(&close, arg, 1) != 0 )
+            return -EFAULT;
+
+        if ( !port_is_valid(d, close.port) )
+            return -EINVAL;
+
+        set_bit(close.port, XEN_shared_info->evtchn_mask);
+
+        if ( evtchn_handled(d, close.port) )
+        {
+            rc = evtchn_close(d, close.port, true);
+            if ( rc )
+                break;
+        }
+        else
+            evtchn_free(d, evtchn_from_port(d, close.port));
+
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_close, &close);
+        if ( rc )
+            /*
+             * If the port cannot be closed on the L0 mark it as reserved
+             * in the shim to avoid re-using it.
+             */
+            evtchn_reserve(d, close.port);
+
+        break;
+    }
+
+    case EVTCHNOP_bind_ipi: {
+        struct evtchn_bind_ipi ipi;
+
+        if ( copy_from_guest(&ipi, arg, 1) != 0 )
+            return -EFAULT;
+
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_bind_ipi, &ipi);
+        if ( rc )
+            break;
+
+        spin_lock(&d->event_lock);
+        rc = evtchn_allocate_port(d, ipi.port);
+        if ( rc )
+        {
+            spin_unlock(&d->event_lock);
+
+            close.port = ipi.port;
+            BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close));
+            break;
+        }
+
+        evtchn_assign_vcpu(d, ipi.port, ipi.vcpu);
+        evtchn_reserve(d, ipi.port);
+        spin_unlock(&d->event_lock);
+
+        if ( __copy_to_guest(arg, &ipi, 1) )
+            rc = -EFAULT;
+
+        break;
+    }
+
+    case EVTCHNOP_unmask: {
+        struct evtchn_unmask unmask;
+
+        if ( copy_from_guest(&unmask, arg, 1) != 0 )
+            return -EFAULT;
+
+        /* Unmask is handled in L1 */
+        rc = evtchn_unmask(unmask.port);
+
+        break;
+    }
+
+    case EVTCHNOP_send: {
+        struct evtchn_send send;
+
+        if ( copy_from_guest(&send, arg, 1) != 0 )
+            return -EFAULT;
+
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_send, &send);
+
+        break;
+    }
+
+    case EVTCHNOP_reset: {
+        struct evtchn_reset reset;
+
+        if ( copy_from_guest(&reset, arg, 1) != 0 )
+            return -EFAULT;
+
+        rc = xen_hypercall_event_channel_op(EVTCHNOP_reset, &reset);
+
+        break;
+    }
+
+    default:
+        /* No FIFO or PIRQ support for now */
+        rc = -EOPNOTSUPP;
+        break;
+    }
+
+    return rc;
+}
+
+void pv_shim_inject_evtchn(unsigned int port)
+{
+    if ( port_is_valid(guest, port) )
+    {
+         struct evtchn *chn = evtchn_from_port(guest, port);
+
+         evtchn_port_set_pending(guest, chn->notify_vcpu_id, chn);
+    }
+}
+
 domid_t get_initial_domain_id(void)
 {
     uint32_t eax, ebx, ecx, edx;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index c69f9db6db..be834c5c78 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -156,46 +156,62 @@ static void free_evtchn_bucket(struct domain *d, struct 
evtchn *bucket)
     xfree(bucket);
 }
 
+int evtchn_allocate_port(struct domain *d, evtchn_port_t port)
+{
+    if ( port > d->max_evtchn_port || port >= d->max_evtchns )
+        return -ENOSPC;
+
+    if ( port_is_valid(d, port) )
+    {
+        if ( evtchn_from_port(d, port)->state != ECS_FREE ||
+             evtchn_port_is_busy(d, port) )
+            return -EBUSY;
+    }
+    else
+    {
+        struct evtchn *chn;
+        struct evtchn **grp;
+
+        if ( !group_from_port(d, port) )
+        {
+            grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
+            if ( !grp )
+                return -ENOMEM;
+            group_from_port(d, port) = grp;
+        }
+
+        chn = alloc_evtchn_bucket(d, port);
+        if ( !chn )
+            return -ENOMEM;
+        bucket_from_port(d, port) = chn;
+
+        write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+    }
+
+    return 0;
+}
+
 static int get_free_port(struct domain *d)
 {
-    struct evtchn *chn;
-    struct evtchn **grp;
     int            port;
 
     if ( d->is_dying )
         return -EINVAL;
 
-    for ( port = 0; port_is_valid(d, port); port++ )
+    for ( port = 0; port <= d->max_evtchn_port; port++ )
     {
-        if ( port > d->max_evtchn_port )
-            return -ENOSPC;
-        if ( evtchn_from_port(d, port)->state == ECS_FREE
-             && !evtchn_port_is_busy(d, port) )
-            return port;
-    }
+        int rc = evtchn_allocate_port(d, port);
 
-    if ( port == d->max_evtchns || port > d->max_evtchn_port )
-        return -ENOSPC;
+        if ( rc == -EBUSY )
+            continue;
 
-    if ( !group_from_port(d, port) )
-    {
-        grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
-        if ( !grp )
-            return -ENOMEM;
-        group_from_port(d, port) = grp;
+        return port;
     }
 
-    chn = alloc_evtchn_bucket(d, port);
-    if ( !chn )
-        return -ENOMEM;
-    bucket_from_port(d, port) = chn;
-
-    write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
-
-    return port;
+    return -ENOSPC;
 }
 
-static void free_evtchn(struct domain *d, struct evtchn *chn)
+void evtchn_free(struct domain *d, struct evtchn *chn)
 {
     /* Clear pending event to avoid unexpected behavior on re-bind. */
     evtchn_port_clear_pending(d, chn);
@@ -345,13 +361,13 @@ static long 
evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
 }
 
 
-static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
+int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port)
 {
     struct evtchn *chn;
     struct vcpu   *v;
     struct domain *d = current->domain;
-    int            port, virq = bind->virq, vcpu = bind->vcpu;
-    long           rc = 0;
+    int            virq = bind->virq, vcpu = bind->vcpu;
+    int            rc = 0;
 
     if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
         return -EINVAL;
@@ -368,8 +384,19 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
     if ( v->virq_to_evtchn[virq] != 0 )
         ERROR_EXIT(-EEXIST);
 
-    if ( (port = get_free_port(d)) < 0 )
-        ERROR_EXIT(port);
+    if ( port != 0 )
+    {
+        if ( (rc = evtchn_allocate_port(d, port)) != 0 )
+            ERROR_EXIT(rc);
+    }
+    else
+    {
+        int alloc_port = get_free_port(d);
+
+        if ( alloc_port < 0 )
+            ERROR_EXIT(alloc_port);
+        port = alloc_port;
+    }
 
     chn = evtchn_from_port(d, port);
 
@@ -511,7 +538,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
 }
 
 
-static long evtchn_close(struct domain *d1, int port1, bool_t guest)
+int evtchn_close(struct domain *d1, int port1, bool guest)
 {
     struct domain *d2 = NULL;
     struct vcpu   *v;
@@ -619,7 +646,7 @@ static long evtchn_close(struct domain *d1, int port1, 
bool_t guest)
 
         double_evtchn_lock(chn1, chn2);
 
-        free_evtchn(d1, chn1);
+        evtchn_free(d1, chn1);
 
         chn2->state = ECS_UNBOUND;
         chn2->u.unbound.remote_domid = d1->domain_id;
@@ -633,7 +660,7 @@ static long evtchn_close(struct domain *d1, int port1, 
bool_t guest)
     }
 
     spin_lock(&chn1->lock);
-    free_evtchn(d1, chn1);
+    evtchn_free(d1, chn1);
     spin_unlock(&chn1->lock);
 
  out:
@@ -839,7 +866,7 @@ static void clear_global_virq_handlers(struct domain *d)
     }
 }
 
-static long evtchn_status(evtchn_status_t *status)
+int evtchn_status(evtchn_status_t *status)
 {
     struct domain   *d;
     domid_t          dom = status->dom;
@@ -1056,7 +1083,7 @@ long do_event_channel_op(int cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         struct evtchn_bind_virq bind_virq;
         if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
             return -EFAULT;
-        rc = evtchn_bind_virq(&bind_virq);
+        rc = evtchn_bind_virq(&bind_virq, 0);
         if ( !rc && __copy_to_guest(arg, &bind_virq, 1) )
             rc = -EFAULT; /* Cleaning up here would be a mess! */
         break;
diff --git a/xen/drivers/char/xen_pv_console.c 
b/xen/drivers/char/xen_pv_console.c
index d4f0532101..948343303e 100644
--- a/xen/drivers/char/xen_pv_console.c
+++ b/xen/drivers/char/xen_pv_console.c
@@ -88,6 +88,11 @@ static void notify_daemon(void)
     xen_hypercall_evtchn_send(cons_evtchn);
 }
 
+evtchn_port_t pv_console_evtchn(void)
+{
+    return cons_evtchn;
+}
+
 size_t pv_console_rx(struct cpu_user_regs *regs)
 {
     char c;
@@ -97,10 +102,6 @@ size_t pv_console_rx(struct cpu_user_regs *regs)
     if ( !cons_ring )
         return 0;
 
-    /* TODO: move this somewhere */
-    if ( !test_bit(cons_evtchn, XEN_shared_info->evtchn_pending) )
-        return 0;
-
     prod = ACCESS_ONCE(cons_ring->in_prod);
     cons = cons_ring->in_cons;
 
@@ -125,8 +126,6 @@ size_t pv_console_rx(struct cpu_user_regs *regs)
     ACCESS_ONCE(cons_ring->in_cons) = cons;
     notify_daemon();
 
-    clear_bit(cons_evtchn, XEN_shared_info->evtchn_pending);
-
     return recv;
 }
 
diff --git a/xen/include/asm-x86/hypercall.h b/xen/include/asm-x86/hypercall.h
index 3eb4a8db89..b9f3ecf9a3 100644
--- a/xen/include/asm-x86/hypercall.h
+++ b/xen/include/asm-x86/hypercall.h
@@ -28,6 +28,9 @@ extern const hypercall_args_t 
hypercall_args_table[NR_hypercalls];
 void pv_hypercall(struct cpu_user_regs *regs);
 void hypercall_page_initialise_ring3_kernel(void *hypercall_page);
 void hypercall_page_initialise_ring1_kernel(void *hypercall_page);
+void pv_hypercall_table_replace(unsigned int hypercall, hypercall_fn_t * 
native,
+                                hypercall_fn_t *compat);
+hypercall_fn_t *pv_get_hypercall_handler(unsigned int hypercall, bool compat);
 
 /*
  * Both do_mmuext_op() and do_mmu_update():
diff --git a/xen/include/asm-x86/pv/shim.h b/xen/include/asm-x86/pv/shim.h
index ff7c050dc6..ab656fd854 100644
--- a/xen/include/asm-x86/pv/shim.h
+++ b/xen/include/asm-x86/pv/shim.h
@@ -36,6 +36,7 @@ void pv_shim_setup_dom(struct domain *d, l4_pgentry_t 
*l4start,
                        unsigned long console_va, unsigned long vphysmap,
                        start_info_t *si);
 void pv_shim_shutdown(uint8_t reason);
+void pv_shim_inject_evtchn(unsigned int port);
 domid_t get_initial_domain_id(void);
 
 #else
@@ -53,6 +54,10 @@ static inline void pv_shim_shutdown(uint8_t reason)
 {
     ASSERT_UNREACHABLE();
 }
+static inline void pv_shim_inject_evtchn(unsigned int port)
+{
+    ASSERT_UNREACHABLE();
+}
 static inline domid_t get_initial_domain_id(void)
 {
     return 0;
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 87915ead69..ebb879e88d 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -48,6 +48,21 @@ int evtchn_send(struct domain *d, unsigned int lport);
 /* Bind a local event-channel port to the specified VCPU. */
 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
 
+/* Bind a VIRQ. */
+int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port);
+
+/* Get the status of an event channel port. */
+int evtchn_status(evtchn_status_t *status);
+
+/* Close an event channel. */
+int evtchn_close(struct domain *d1, int port1, bool guest);
+
+/* Free an event channel. */
+void evtchn_free(struct domain *d, struct evtchn *chn);
+
+/* Allocate a specific event channel port. */
+int evtchn_allocate_port(struct domain *d, unsigned int port);
+
 /* Unmask a local event-channel port. */
 int evtchn_unmask(unsigned int port);
 
diff --git a/xen/include/xen/pv_console.h b/xen/include/xen/pv_console.h
index e578b56620..cb92539666 100644
--- a/xen/include/xen/pv_console.h
+++ b/xen/include/xen/pv_console.h
@@ -10,6 +10,7 @@ void pv_console_set_rx_handler(serial_rx_fn fn);
 void pv_console_init_postirq(void);
 void pv_console_puts(const char *buf);
 size_t pv_console_rx(struct cpu_user_regs *regs);
+evtchn_port_t pv_console_evtchn(void);
 
 #else
 
@@ -18,6 +19,11 @@ static inline void pv_console_set_rx_handler(serial_rx_fn 
fn) { }
 static inline void pv_console_init_postirq(void) { }
 static inline void pv_console_puts(const char *buf) { }
 static inline size_t pv_console_rx(struct cpu_user_regs *regs) { return 0; }
+evtchn_port_t pv_console_evtchn(void)
+{
+    ASSERT_UNREACHABLE();
+    return 0;
+}
 
 #endif /* !CONFIG_XEN_GUEST */
 #endif /* __XEN_PV_CONSOLE_H__ */
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.10

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.