[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 06/28] ARM: GICv3 ITS: introduce ITS command handling



Hi Andre,

On 30/01/17 18:31, Andre Przywara wrote:
To be able to easily send commands to the ITS, create the respective
wrapper functions, which take care of the ring buffer.
The first two commands we implement provide methods to map a collection
to a redistributor (aka host core) and to flush the command queue (SYNC).
Start using these commands for mapping one collection to each host CPU.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
 xen/arch/arm/gic-v3-its.c         | 142 +++++++++++++++++++++++++++++++++++++-
 xen/arch/arm/gic-v3-lpi.c         |  20 ++++++
 xen/arch/arm/gic-v3.c             |  18 ++++-
 xen/include/asm-arm/gic_v3_defs.h |   2 +
 xen/include/asm-arm/gic_v3_its.h  |  36 ++++++++++
 5 files changed, 215 insertions(+), 3 deletions(-)

diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
index ad7cd2a..6578e8a 100644
--- a/xen/arch/arm/gic-v3-its.c
+++ b/xen/arch/arm/gic-v3-its.c
@@ -19,6 +19,7 @@
 #include <xen/config.h>
 #include <xen/lib.h>
 #include <xen/device_tree.h>
+#include <xen/delay.h>
 #include <xen/libfdt/libfdt.h>
 #include <xen/mm.h>
 #include <xen/sizes.h>
@@ -29,6 +30,98 @@

 #define ITS_CMD_QUEUE_SZ                SZ_64K

+#define BUFPTR_MASK                     GENMASK(19, 5)
+static int its_send_command(struct host_its *hw_its, const void *its_cmd)
+{
+    uint64_t readp, writep;
+
+    spin_lock(&hw_its->cmd_lock);

Do you never expect a command to be sent in an interrupt path? I could see at least one, we may decide to throttle the number of LPIs received by a guest so this would involve disabling the interrupt.

+
+    readp = readq_relaxed(hw_its->its_base + GITS_CREADR) & BUFPTR_MASK;
+    writep = readq_relaxed(hw_its->its_base + GITS_CWRITER) & BUFPTR_MASK;
+
+    if ( ((writep + ITS_CMD_SIZE) % ITS_CMD_QUEUE_SZ) == readp )
+    {

I look at all the series applied and there is no error message at all when the queue is full. This will make difficult to see what's going on.

Furthermore, this limit could be easily reached. Furthermore, this could happen easily if you decide to map a device with thousands of interrupts. For instance the function gicv3_map_its_map_host_events will issue 2 commands per event (MAPTI and INV).

So how do you plan to address this?

+        spin_unlock(&hw_its->cmd_lock);
+        return -EBUSY;
+    }
+
+    memcpy(hw_its->cmd_buf + writep, its_cmd, ITS_CMD_SIZE);
+    if ( hw_its->flags & HOST_ITS_FLUSH_CMD_QUEUE )
+        __flush_dcache_area(hw_its->cmd_buf + writep, ITS_CMD_SIZE);

Please use dcache_.... helpers.

+    else
+        dsb(ishst);
+
+    writep = (writep + ITS_CMD_SIZE) % ITS_CMD_QUEUE_SZ;
+    writeq_relaxed(writep & BUFPTR_MASK, hw_its->its_base + GITS_CWRITER);
+
+    spin_unlock(&hw_its->cmd_lock);
+
+    return 0;
+}
+
+static uint64_t encode_rdbase(struct host_its *hw_its, int cpu, uint64_t reg)

s/int cpu/unsigned int cpu/

+{
+    reg &= ~GENMASK(51, 16);
+
+    reg |= gicv3_get_redist_address(cpu, hw_its->flags & HOST_ITS_USES_PTA);
+
+    return reg;
+}
+
+static int its_send_cmd_sync(struct host_its *its, int cpu)

s/int cpu/unsigned int cpu/

+{
+    uint64_t cmd[4];
+
+    cmd[0] = GITS_CMD_SYNC;
+    cmd[1] = 0x00;
+    cmd[2] = encode_rdbase(its, cpu, 0x0);
+    cmd[3] = 0x00;
+
+    return its_send_command(its, cmd);
+}
+
+static int its_send_cmd_mapc(struct host_its *its, int collection_id, int cpu)

s/int/unsigned int/ for both collection_id and cpu.

+{
+    uint64_t cmd[4];
+
+    cmd[0] = GITS_CMD_MAPC;
+    cmd[1] = 0x00;
+    cmd[2] = encode_rdbase(its, cpu, (collection_id & GENMASK(15, 0)));

Please drop the mask here.

+    cmd[2] |= GITS_VALID_BIT;
+    cmd[3] = 0x00;
+
+    return its_send_command(its, cmd);
+}
+
+/* Set up the (1:1) collection mapping for the given host CPU. */
+int gicv3_its_setup_collection(int cpu)

So you are calling this function from gicv3_rdist_init_lpis which make little sense to me. This should probably called from gicv3_cpu_init.

+{
+    struct host_its *its;
+    int ret;
+
+    list_for_each_entry(its, &host_its_list, entry)
+    {
+        /*
+         * This function is called on CPU0 before any ITSes have been
+         * properly initialized. Skip the collection setup in this case,
+         * it will be done explicitly for CPU0 upon initializing the ITS.
+         */

Looking at the code, I don't understand why you need to do that. AFAIU there are no restriction to initialize the ITS (e.g call gicv3_its_init) before gicv3_cpu_init.

+        if ( !its->cmd_buf )
+            continue;
+
+        ret = its_send_cmd_mapc(its, cpu, cpu);
+        if ( ret )
+            return ret;
+
+        ret = its_send_cmd_sync(its, cpu);
+        if ( ret )
+            return ret;
+    }
+
+    return 0;
+}
+
 #define BASER_ATTR_MASK                                           \
         ((0x3UL << GITS_BASER_SHAREABILITY_SHIFT)               | \
          (0x7UL << GITS_BASER_OUTER_CACHEABILITY_SHIFT)         | \
@@ -156,18 +249,51 @@ static int its_map_baser(void __iomem *basereg, uint64_t 
regc, int nr_items)
     return -EINVAL;
 }

+/* Wait for an ITS to become quiescient (all ITS operations completed). */

s/quiescient/quiescent/

+static int gicv3_its_wait_quiescient(struct host_its *hw_its)

s/quiescient/quiescent/

+{
+    uint32_t reg;
+    s_time_t deadline = NOW() + MILLISECS(1000);

So that sounds fine for handling a couple of command, but what about thousands at the same time?

+
+    reg = readl_relaxed(hw_its->its_base + GITS_CTLR);
+    if ( (reg & (GITS_CTLR_QUIESCENT | GITS_CTLR_ENABLE)) == 
GITS_CTLR_QUIESCENT )
+        return 0;
+
+    writel_relaxed(reg & ~GITS_CTLR_ENABLE, hw_its->its_base + GITS_CTLR);
+
+    do {
+        reg = readl_relaxed(hw_its->its_base + GITS_CTLR);
+        if ( reg & GITS_CTLR_QUIESCENT )
+            return 0;
+
+        cpu_relax();
+        udelay(1);
+    } while ( NOW() <= deadline );
+
+    dprintk(XENLOG_ERR, "ITS not quiescient\n");

s/quiescient/quiescent/ + newline.

+    return -ETIMEDOUT;
+}
+

Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.