[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 12/17] xen/arm: Data abort exception (R/W) mem_events.





On Thu, Sep 11, 2014 at 11:19 PM, Julien Grall <julien.grall@xxxxxxxxxx> wrote:
Hello Tamas,

You forgot to handle add the permission in the radix when the a table is shattered.

On 10/09/14 06:28, Tamas K Lengyel wrote:
  #define P2M_ONE_DESCEND        0
  #define P2M_ONE_PROGRESS_NOP   0x1
  #define P2M_ONE_PROGRESS       0x10
@@ -504,6 +570,10 @@ static int apply_one_level(struct domain *d,
              page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
              if ( page )
              {
+                rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);

It's possible to allocate a 2M/1G mapping here. In the case of memaccess you only want 4K mapping for granularity.

Right, I should only set it if level==3.
 

+                if ( rc < 0 )

You should free the page via free_domheap_pages if Xen fail to adds the access type in the radix tree.
 

Ack.
 
+                    return rc;
+
                  pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
                  if ( level < 3 )
                      pte.p2m.table = 0;
@@ -538,6 +608,10 @@ static int apply_one_level(struct domain *d,
             /* We do not handle replacing an existing table with a superpage */
               (level == 3 || !p2m_table(orig_pte)) )
          {
+            rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
+            if ( rc < 0 )
+                return rc;
+

Same remark here about the mapping.


Ack.
 
              /* New mapping is superpage aligned, make it */
              pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
              if ( level < 3 )
@@ -663,6 +737,7 @@ static int apply_one_level(struct domain *d,

          memset(&pte, 0x00, sizeof(pte));
          p2m_write_pte(entry, pte, flush_cache);
+        radix_tree_delete(&p2m->mem_access_settings, paddr_to_pfn(*addr));

          *addr += level_size;
          *maddr += level_size;
@@ -707,6 +782,53 @@ static int apply_one_level(struct domain *d,
              *addr += PAGE_SIZE;
              return P2M_ONE_PROGRESS_NOP;
          }
+
+    case MEMACCESS:
+        if ( level < 3 )
+        {
+            if ( !p2m_valid(orig_pte) )
+            {
+                (*addr)++;

Why increment by 1? You the PTE doesn't contain valid mapping you want to skip the whole level range. ie:

*addr += level_size;


It doesn't make a difference, apply_p2m_changes is called with start=paddr, end=paddr+1 from a separate loop. So just incrementing it by one or a whole level achieves the same effect, that is, the apply_p2m_changes loop breaks.
 
+                return P2M_ONE_PROGRESS_NOP;
+            }
+
+            /* Shatter large pages as we descend */
+            if ( p2m_mapping(orig_pte) )
+            {
+                rc = p2m_create_table(d, entry,
+                                      level_shift - PAGE_SHIFT, flush_cache);
+                if ( rc < 0 )
+                    return rc;
+
+                p2m->stats.shattered[level]++;
+                p2m->stats.mappings[level]--;
+                p2m->stats.mappings[level+1] += LPAE_ENTRIES;
+            } /* else: an existing table mapping -> descend */
+

This piece of code is exactly the same in INSERT, REMOVE and now MEMACCESS. I would create an helper to shatter and update the stats.

Ack.
 

+            return P2M_ONE_DESCEND;
+        }
+        else
+        {
+            pte = orig_pte;
+
+            if ( !p2m_table(pte) )
+                pte.bits = 0;
+
+            if ( p2m_valid(pte) )
+            {
+                ASSERT(pte.p2m.type != p2m_invalid);
+
+                rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
+                if ( rc < 0 )
+                    return rc;
+
+                p2m_set_permission(&pte, pte.p2m.type, a);
+                p2m_write_pte(entry, pte, flush_cache);
+            }
+
+            (*addr)++;

*addr += PAGE_SIZE;

[..]


+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    p2m_access_t a;
+    long rc = 0;
+    paddr_t paddr;
+
+    static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+        ACCESS(n),
+        ACCESS(r),
+        ACCESS(w),
+        ACCESS(rw),
+        ACCESS(x),
+        ACCESS(rx),
+        ACCESS(wx),
+        ACCESS(rwx),
+#undef ACCESS
+    };
+
+    switch ( access )
+    {
+    case 0 ... ARRAY_SIZE(memaccess) - 1:
+        a = memaccess[access];
+        break;
+    case XENMEM_access_default:
+        a = p2m->default_access;
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    /* If request to set default access */
+    if ( pfn == ~0ul )
+    {
+        p2m->default_access = a;
+        return 0;
+    }
+
+    for ( pfn += start; nr > start; ++pfn )
+    {
+        paddr = pfn_to_paddr(pfn);
+        rc = apply_p2m_changes(d, MEMACCESS, paddr, paddr+1, 0, MATTR_MEM, 0, a);

Hmmm... why didn't you call directly apply_p2m_changes with the whole range?


Because the hypercall continuation. Setting mem_access permissions needs to be preemptible and it has its own separate routine to do that here. See http://xenbits.xen.org/xsa/advisory-89.html for more info.
 
[..]


+int p2m_get_mem_access(struct domain *d, unsigned long gpfn,
+                       xenmem_access_t *access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    void *i;
+    unsigned int index;
+
+    static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = XENMEM_access_##ac
+            ACCESS(n),
+            ACCESS(r),
+            ACCESS(w),
+            ACCESS(rw),
+            ACCESS(x),
+            ACCESS(rx),
+            ACCESS(wx),
+            ACCESS(rwx),
+#undef ACCESS
+    };
+
+    /* If request to get default access */
+    if ( gpfn == ~0ull )
+    {
+        *access = memaccess[p2m->default_access];
+        return 0;
+    }
+
+    spin_lock(&p2m->lock);
+
+    i = radix_tree_lookup(&p2m->mem_access_settings, gpfn);
+
+    spin_unlock(&p2m->lock);
+
+    if ( !i )
+        return -ESRCH;

If the gpfn is not in the radix tree, it means that either the mapping doesn't exists or the access type is p2m_access_rwx.

You handle the former case but not the latter.

Ack.
 

Regards,

--
Julien Grall


Thanks!
Tamas
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.