[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 06/10] Add vmport structs



On 12/12/13 18:10, Andrew Cooper wrote:
On 12/12/2013 19:15, Don Slutz wrote:
From: Don Slutz <dslutz@xxxxxxxxxxx>

Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
  xen/arch/x86/hvm/hvm.c           | 59 +++++++++++++++++++++++++++++-
  xen/include/asm-x86/hvm/domain.h |  4 +++
  xen/include/asm-x86/hvm/vmport.h | 77 ++++++++++++++++++++++++++++++++++++++++
  3 files changed, 139 insertions(+), 1 deletion(-)
  create mode 100644 xen/include/asm-x86/hvm/vmport.h

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 38641c4..fa5d382 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -57,6 +57,7 @@
  #include <asm/hvm/cacheattr.h>
  #include <asm/hvm/trace.h>
  #include <asm/hvm/nestedhvm.h>
+#include <asm/hvm/vmport.h>
  #include <asm/mtrr.h>
  #include <asm/apic.h>
  #include <public/sched.h>
@@ -536,6 +537,7 @@ static int handle_pvh_io(
  int hvm_domain_initialise(struct domain *d)
  {
      int rc;
+    long vmport_mem = 0;
if ( !hvm_enabled )
      {
@@ -562,6 +564,7 @@ int hvm_domain_initialise(struct domain *d)
spin_lock_init(&d->arch.hvm_domain.irq_lock);
      spin_lock_init(&d->arch.hvm_domain.uc_lock);
+    spin_lock_init(&d->arch.hvm_domain.vmport_lock);
INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
      spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
@@ -574,11 +577,47 @@ int hvm_domain_initialise(struct domain *d)
d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
      d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler);
+    d->arch.hvm_domain.vmport_data = xzalloc(struct vmport_state);
      rc = -ENOMEM;
-    if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler )
+    if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ||
+         !d->arch.hvm_domain.vmport_data )
          goto fail1;
      d->arch.hvm_domain.io_handler->num_slot = 0;
+ vmport_mem += sizeof(struct vmport_state);
+    d->arch.hvm_domain.vmport_data->open_cookie = ('C' << 8) + 'S';
This is one place where multicharacter constants would really help.
However, I suspect a change to add -Wno-multichar to the build might not
go down too well.

Maybe. This particular field is a starting value. It gets changed on every VMware rpc channel open:

hvm/vmport/vmport.c vmport_new_chan 224 c->cookie = vs->open_cookie++;

so using 0, 42, etc would be just as good. These characters made it a little easier to see that the right things were happening during unit testing.



+    d->arch.hvm_domain.vmport_data->used_guestinfo = 10;
+
+    for (rc = 0; rc < d->arch.hvm_domain.vmport_data->used_guestinfo; rc++) {
Xen style - space inside brackets and braces lined up.

Well, I need to fix up a lot to match Xen style. Not sure if I can get emacs to do it all. Do you have a astyle options file that matches CODING_STYLE?

If not it looks like I will work on one.



+        d->arch.hvm_domain.vmport_data->guestinfo[rc] = 
xzalloc(vmport_guestinfo_t);
xzalloc_array() of used_guestinfo entries, and needs to be checked for
an allocation failure.


Yes. will add.

+        vmport_mem += sizeof(vmport_guestinfo_t);
+    }
+    d->arch.hvm_domain.vmport_data->guestinfo[0]->key_len = 2;
+    memcpy(d->arch.hvm_domain.vmport_data->guestinfo[0]->key_data, "ip", 2);
+
+    gdprintk(XENLOG_DEBUG, "vmport_mem=%ld bytes (%ld KiB, %ld MiB)\n",
+             vmport_mem,
+             (vmport_mem + (1 << 10) - 1) >> 10,
+             (vmport_mem + (1 << 20) - 1) >> 20);
+    vmport_mem += sizeof(uint64_t) * HVM_NR_PARAMS;
+    vmport_mem += sizeof(struct hvm_io_handler);
+    gdprintk(XENLOG_DEBUG, "hvm overhead=%ld bytes (%ld KiB, %ld MiB)\n",
+             vmport_mem,
+             (vmport_mem + (1 << 10) - 1) >> 10,
+             (vmport_mem + (1 << 20) - 1) >> 20);
+    gdprintk(XENLOG_DEBUG, "tot_pages=%d bytes (%d KiB, %d MiB)\n",
+             d->tot_pages,
+             (d->tot_pages + (1 << 10) - 1) >> 10,
+             (d->tot_pages + (1 << 20) - 1) >> 20);
+    gdprintk(XENLOG_DEBUG, "max_pages=%d bytes (%d KiB, %d MiB)\n",
+             d->max_pages,
+             (d->max_pages + (1 << 10) - 1) >> 10,
+             (d->max_pages + (1 << 20) - 1) >> 20);
These two gdprintk()s do not appear to be related to the content of this
patch.

You are right. They were some temporary debug information to see if the calculation of the Xen overhead on it's heap per domain was giving good answers. Will drop them.

+
+#if 0
+    vmport_flush(&d->arch.hvm_domain);
+#endif
Is this stray debugging?

Nope, stray patch splitting.   Will correct in the next round.


+
      if ( is_pvh_domain(d) )
      {
          register_portio_handler(d, 0, 0x10003, handle_pvh_io);
@@ -617,6 +656,15 @@ int hvm_domain_initialise(struct domain *d)
      stdvga_deinit(d);
      vioapic_deinit(d);
   fail1:
+    if (d->arch.hvm_domain.vmport_data) {
+        struct vmport_state *vs = d->arch.hvm_domain.vmport_data;
+        int idx;
+
+        for (idx = 0; idx < vs->used_guestinfo; idx++) {
+            xfree(vs->guestinfo[idx]);
+        }
+    }
+    xfree(d->arch.hvm_domain.vmport_data);
      xfree(d->arch.hvm_domain.io_handler);
      xfree(d->arch.hvm_domain.params);
   fail0:
@@ -626,6 +674,15 @@ int hvm_domain_initialise(struct domain *d)
void hvm_domain_relinquish_resources(struct domain *d)
  {
+    if (d->arch.hvm_domain.vmport_data) {
+        struct vmport_state *vs = d->arch.hvm_domain.vmport_data;
+        int idx;
+
+        for (idx = 0; idx < vs->used_guestinfo; idx++) {
+            xfree(vs->guestinfo[idx]);
+        }
+        xfree(d->arch.hvm_domain.vmport_data);
+    }
      xfree(d->arch.hvm_domain.io_handler);
      xfree(d->arch.hvm_domain.params);
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index b1e3187..0ca2778 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -62,6 +62,10 @@ struct hvm_domain {
      /* emulated irq to pirq */
      struct radix_tree_root emuirq_pirq;
+ /* VMware special port */
+    spinlock_t             vmport_lock;
+    struct  vmport_state  *vmport_data;
+
Stray space between struct and vmport.

Will fix.



      uint64_t              *params;
/* Memory ranges with pinned cache attributes. */
diff --git a/xen/include/asm-x86/hvm/vmport.h b/xen/include/asm-x86/hvm/vmport.h
new file mode 100644
index 0000000..180ddac
--- /dev/null
+++ b/xen/include/asm-x86/hvm/vmport.h
@@ -0,0 +1,77 @@
+/*
+ * vmport.h: HVM VMPORT emulation
+ *
+ *
+ * Copyright (C) 2012 Verizon Corporation
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License Version 2 (GPLv2)
+ * as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details. <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_X86_HVM_VMPORT_H__
+#define __ASM_X86_HVM_VMPORT_H__
+
+/* Note: VMPORT_PORT and VMPORT_MAGIC is also defined as BDOOR_PORT
+ * and BDOOR_MAGIC in backdoor_def.h Defined here so that other
+ * parts of XEN can use it.
+ */
+
+#define VMPORT_PORT 0x5658
+#define VMPORT_MAGIC 0x564D5868
You should include backdoor_def.h rather than having the same constant
defined twice in the codebase.


Will look into getting it done. The include of backdoor_def.h (with it unchanged) is not as simple. Did not know if it was better to leave it unchanged, or just keep what is needed.


+
+#define VMPORT_MAX_KEY_LEN 30
+#define VMPORT_MAX_VAL_LEN 128
+#define VMPORT_MAX_NUM_KEY 128
+
+#define VMPORT_MAX_SEND_BUF ((22 + VMPORT_MAX_KEY_LEN + VMPORT_MAX_VAL_LEN + 
3)/4)
+#define VMPORT_MAX_RECV_BUF ((2 + VMPORT_MAX_VAL_LEN + 3)/4)
+#define VMPORT_MAX_CHANS    4
+#define VMPORT_MAX_BKTS     8
+
+
+typedef struct vmport_guestinfo_ {
Why the trailing underscores in the non-typedef'd name?

It does help to know this is a struct name. It came from one of the many coding styles I have used in the past. Happy to change it any way that is "Xen" style.

+    uint8_t key_len;
+    uint8_t val_len;
+    char key_data[VMPORT_MAX_KEY_LEN];
+    char val_data[VMPORT_MAX_VAL_LEN];
+} vmport_guestinfo_t;
+
+typedef struct vmport_bucket_ {
+    uint16_t recv_len;
+    uint16_t recv_idx;
+    uint32_t recv_buf[VMPORT_MAX_RECV_BUF + 1];
+    uint8_t  recv_slot;
+} vmport_bucket_t;
+
+typedef struct vmport_channel_ {
+    unsigned long active_time;
+    uint32_t chan_id;
+    uint32_t cookie;
+    uint32_t proto_num;
+    uint16_t send_len;
+    uint16_t send_idx;
+    uint32_t send_buf[VMPORT_MAX_SEND_BUF + 1];
+    vmport_bucket_t recv_bkt[VMPORT_MAX_BKTS];
+    uint8_t recv_read;
+    uint8_t recv_write;
+} vmport_channel_t;
+
+struct vmport_state {
+    unsigned long ping_time;
+    uint32_t open_cookie;
+    uint32_t used_guestinfo;
+    vmport_channel_t chans[VMPORT_MAX_CHANS];
+    vmport_guestinfo_t *guestinfo[VMPORT_MAX_NUM_KEY];
+};
+
+int vmport_ioport(int dir, int size, unsigned long data, struct cpu_user_regs 
*regs);
+void vmport_ctrl_send(struct hvm_domain *hd, char *msg, int slot);
+void vmport_flush(struct hvm_domain *hd);
These declarations need to be in the same patch as defines them.

Will do.

   -Don Slutz
~Andrew

+
+#endif /* __ASM_X86_HVM_VMPORT_H__ */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.