|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC v2 09/12] xen/arm: Data abort exception (R/W) mem_events.
This patch enables to store, set, check and deliver LPAE R/W mem_events.
Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
---
v2: - Patch been split to ease the review process.
- Add definitions of data abort data fetch status codes (enum dabt_dfsc)
and only call p2m_mem_access_check for traps caused by permission
violations.
- Only call p2m_write_pte in p2m_lookup if the PTE permission actually
changed.
- Properly save settings in the Radix tree and pause the VCPU with
mem_event_vcpu_pause.
Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
---
xen/arch/arm/p2m.c | 406 ++++++++++++++++++++++++++++++++++------
xen/arch/arm/traps.c | 37 +++-
xen/include/asm-arm/p2m.h | 29 ++-
xen/include/asm-arm/processor.h | 30 +++
4 files changed, 439 insertions(+), 63 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index a6dea5b..c18e2ef 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -10,6 +10,7 @@
#include <asm/event.h>
#include <asm/hardirq.h>
#include <asm/page.h>
+#include <xen/radix-tree.h>
#include <xen/mem_event.h>
#include <public/mem_event.h>
#include <xen/mem_access.h>
@@ -148,16 +149,99 @@ static lpae_t *p2m_map_first(struct p2m_domain *p2m,
paddr_t addr)
return __map_domain_page(page);
}
+static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
+{
+ /* First apply type permissions */
+ switch (t)
+ {
+ case p2m_ram_rw:
+ e->p2m.xn = 0;
+ e->p2m.write = 1;
+ break;
+
+ case p2m_ram_ro:
+ e->p2m.xn = 0;
+ e->p2m.write = 0;
+ break;
+
+ case p2m_iommu_map_rw:
+ case p2m_map_foreign:
+ case p2m_grant_map_rw:
+ case p2m_mmio_direct:
+ e->p2m.xn = 1;
+ e->p2m.write = 1;
+ break;
+
+ case p2m_iommu_map_ro:
+ case p2m_grant_map_ro:
+ case p2m_invalid:
+ e->p2m.xn = 1;
+ e->p2m.write = 0;
+ break;
+
+ case p2m_max_real_type:
+ BUG();
+ break;
+ }
+
+ /* Then restrict with access permissions */
+ switch(a)
+ {
+ case p2m_access_n:
+ e->p2m.read = e->p2m.write = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_r:
+ e->p2m.write = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_x:
+ e->p2m.write = 0;
+ e->p2m.read = 0;
+ break;
+ case p2m_access_rx:
+ e->p2m.write = 0;
+ break;
+ case p2m_access_w:
+ e->p2m.read = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_rw:
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_wx:
+ e->p2m.read = 0;
+ break;
+ case p2m_access_rwx:
+ break;
+ }
+}
+
+static inline void p2m_write_pte(lpae_t *p, lpae_t pte, bool_t flush_cache)
+{
+ write_pte(p, pte);
+ if ( flush_cache )
+ clean_xen_dcache(*p);
+}
+
/*
* Lookup the MFN corresponding to a domain's PFN.
*
* There are no processor functions to do a stage 2 only lookup therefore we
* do a a software walk.
+ *
+ * [IN]: d Domain
+ * [IN]: paddr IPA
+ * [IN]: a (Optional) Update PTE access permission
+ * [OUT]: t (Optional) Return PTE type
*/
-paddr_t p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t)
+paddr_t p2m_lookup(struct domain *d,
+ paddr_t paddr,
+ p2m_access_t *a,
+ p2m_type_t *t)
{
struct p2m_domain *p2m = &d->arch.p2m;
- lpae_t pte, *first = NULL, *second = NULL, *third = NULL;
+ lpae_t pte, *pte_loc, *first = NULL, *second = NULL, *third = NULL;
paddr_t maddr = INVALID_PADDR;
paddr_t mask;
p2m_type_t _t;
@@ -167,20 +251,20 @@ paddr_t p2m_lookup(struct domain *d, paddr_t paddr,
p2m_type_t *t)
*t = p2m_invalid;
- spin_lock(&p2m->lock);
-
first = p2m_map_first(p2m, paddr);
if ( !first )
goto err;
mask = FIRST_MASK;
- pte = first[first_table_offset(paddr)];
+ pte_loc = &first[first_table_offset(paddr)];
+ pte = *pte_loc;
if ( !p2m_table(pte) )
goto done;
mask = SECOND_MASK;
second = map_domain_page(pte.p2m.base);
- pte = second[second_table_offset(paddr)];
+ pte_loc = &second[second_table_offset(paddr)];
+ pte = *pte_loc;
if ( !p2m_table(pte) )
goto done;
@@ -189,7 +273,8 @@ paddr_t p2m_lookup(struct domain *d, paddr_t paddr,
p2m_type_t *t)
BUILD_BUG_ON(THIRD_MASK != PAGE_MASK);
third = map_domain_page(pte.p2m.base);
- pte = third[third_table_offset(paddr)];
+ pte_loc = &third[third_table_offset(paddr)];
+ pte = *pte_loc;
/* This bit must be one in the level 3 entry */
if ( !p2m_table(pte) )
@@ -200,6 +285,21 @@ done:
{
ASSERT(pte.p2m.type != p2m_invalid);
maddr = (pte.bits & PADDR_MASK & mask) | (paddr & ~mask);
+ ASSERT(mfn_valid(maddr>>PAGE_SHIFT));
+
+ if ( a )
+ {
+ p2m_set_permission(&pte, pte.p2m.type, *a);
+
+ /* Only write the PTE if the access permissions changed */
+ if(pte.p2m.read != pte_loc->p2m.read
+ || pte.p2m.write != pte_loc->p2m.write
+ || pte.p2m.xn != pte_loc->p2m.xn)
+ {
+ p2m_write_pte(pte_loc, pte, 1);
+ }
+ }
+
*t = pte.p2m.type;
}
@@ -208,8 +308,6 @@ done:
if (first) unmap_domain_page(first);
err:
- spin_unlock(&p2m->lock);
-
return maddr;
}
@@ -228,7 +326,7 @@ int p2m_pod_decrease_reservation(struct domain *d,
}
static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
- p2m_type_t t)
+ p2m_type_t t, p2m_access_t a)
{
paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
/* sh, xn and write bit will be defined in the following switches
@@ -258,37 +356,7 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned
int mattr,
break;
}
- switch (t)
- {
- case p2m_ram_rw:
- e.p2m.xn = 0;
- e.p2m.write = 1;
- break;
-
- case p2m_ram_ro:
- e.p2m.xn = 0;
- e.p2m.write = 0;
- break;
-
- case p2m_iommu_map_rw:
- case p2m_map_foreign:
- case p2m_grant_map_rw:
- case p2m_mmio_direct:
- e.p2m.xn = 1;
- e.p2m.write = 1;
- break;
-
- case p2m_iommu_map_ro:
- case p2m_grant_map_ro:
- case p2m_invalid:
- e.p2m.xn = 1;
- e.p2m.write = 0;
- break;
-
- case p2m_max_real_type:
- BUG();
- break;
- }
+ p2m_set_permission(&e, t, a);
ASSERT(!(pa & ~PAGE_MASK));
ASSERT(!(pa & ~PADDR_MASK));
@@ -298,13 +366,6 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned
int mattr,
return e;
}
-static inline void p2m_write_pte(lpae_t *p, lpae_t pte, bool_t flush_cache)
-{
- write_pte(p, pte);
- if ( flush_cache )
- clean_xen_dcache(*p);
-}
-
/*
* Allocate a new page table page and hook it in via the given entry.
* apply_one_level relies on this returning 0 on success
@@ -346,7 +407,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
for ( i=0 ; i < LPAE_ENTRIES; i++ )
{
pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)),
- MATTR_MEM, t);
+ MATTR_MEM, t, p2m->default_access);
/*
* First and second level super pages set p2m.table = 0, but
@@ -366,7 +427,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
unmap_domain_page(p);
- pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);
+ pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid,
p2m->default_access);
p2m_write_pte(entry, pte, flush_cache);
@@ -498,7 +559,7 @@ static int apply_one_level(struct domain *d,
page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
if ( page )
{
- pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);
+ pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
if ( level < 3 )
pte.p2m.table = 0;
p2m_write_pte(entry, pte, flush_cache);
@@ -533,7 +594,7 @@ static int apply_one_level(struct domain *d,
(level == 3 || !p2m_table(orig_pte)) )
{
/* New mapping is superpage aligned, make it */
- pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t);
+ pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
if ( level < 3 )
pte.p2m.table = 0; /* Superpage entry */
@@ -640,6 +701,7 @@ static int apply_one_level(struct domain *d,
memset(&pte, 0x00, sizeof(pte));
p2m_write_pte(entry, pte, flush_cache);
+ radix_tree_delete(&p2m->mem_access_settings, paddr_to_pfn(*addr));
*addr += level_size;
@@ -1048,7 +1110,10 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t
start_mfn, xen_pfn_t end_mfn)
unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
{
- paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL);
+ paddr_t p;
+ spin_lock(&d->arch.p2m.lock);
+ p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL, NULL);
+ spin_unlock(&d->arch.p2m.lock);
return p >> PAGE_SHIFT;
}
@@ -1080,6 +1145,241 @@ err:
return page;
}
+int p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
+ bool_t access_r, bool_t access_w, bool_t access_x,
+ bool_t ptw)
+{
+ struct vcpu *v = current;
+ mem_event_request_t *req = NULL;
+ xenmem_access_t xma;
+ bool_t violation;
+ int rc;
+
+ /* If we have no listener, nothing to do */
+ if( !mem_event_check_ring( &v->domain->mem_event->access ) )
+ {
+ return 1;
+ }
+
+ rc = p2m_get_mem_access(v->domain, paddr_to_pfn(gpa), &xma);
+ if ( rc )
+ return rc;
+
+ switch (xma)
+ {
+ default:
+ case XENMEM_access_n:
+ violation = access_r || access_w || access_x;
+ break;
+ case XENMEM_access_r:
+ violation = access_w || access_x;
+ break;
+ case XENMEM_access_w:
+ violation = access_r || access_x;
+ break;
+ case XENMEM_access_x:
+ violation = access_r || access_w;
+ break;
+ case XENMEM_access_rx:
+ violation = access_w;
+ break;
+ case XENMEM_access_wx:
+ violation = access_r;
+ break;
+ case XENMEM_access_rw:
+ violation = access_x;
+ break;
+ case XENMEM_access_rwx:
+ violation = 0;
+ break;
+ }
+
+ if (!violation)
+ return 1;
+
+ req = xzalloc(mem_event_request_t);
+ if ( req )
+ {
+ req->reason = MEM_EVENT_REASON_VIOLATION;
+ req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+ req->gfn = gpa >> PAGE_SHIFT;
+ req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
+ req->gla = gla;
+ req->gla_valid = 1;
+ req->access_r = access_r;
+ req->access_w = access_w;
+ req->access_x = access_x;
+ req->vcpu_id = v->vcpu_id;
+
+ mem_event_vcpu_pause(v);
+ mem_access_send_req(v->domain, req);
+
+ xfree(req);
+
+ return 0;
+ }
+
+ return 1;
+}
+
+void p2m_mem_access_resume(struct domain *d)
+{
+ mem_event_response_t rsp;
+
+ /* Pull all responses off the ring */
+ while( mem_event_get_response(d, &d->mem_event->access, &rsp) )
+ {
+ struct vcpu *v;
+
+ if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
+ continue;
+
+ /* Validate the vcpu_id in the response. */
+ if ( (rsp.vcpu_id >= d->max_vcpus) || !d->vcpu[rsp.vcpu_id] )
+ continue;
+
+ v = d->vcpu[rsp.vcpu_id];
+
+ /* Unpause domain */
+ if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+ mem_event_vcpu_unpause(v);
+ }
+}
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
+ uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ p2m_access_t a;
+ long rc = 0;
+
+ static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+#undef ACCESS
+ };
+
+ switch ( access )
+ {
+ case 0 ... ARRAY_SIZE(memaccess) - 1:
+ a = memaccess[access];
+ break;
+ case XENMEM_access_default:
+ a = p2m->default_access;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* If request to set default access */
+ if ( pfn == ~0ul )
+ {
+ p2m->default_access = a;
+ return 0;
+ }
+
+ spin_lock(&p2m->lock);
+ for ( pfn += start; nr > start; ++pfn )
+ {
+
+ unsigned long mfn = p2m_lookup(d, pfn_to_paddr(pfn), &a, NULL);
+ mfn >>= PAGE_SHIFT;
+
+ if ( !mfn_valid(mfn) )
+ break;
+
+ rc = radix_tree_insert(&p2m->mem_access_settings, pfn,
+ radix_tree_int_to_ptr(a));
+
+ switch ( rc )
+ {
+ case 0:
+ /* Nothing to do, setting saved successfully */
+ break;
+ case -EEXIST:
+ /* If a setting existed already, change it to the new one */
+ radix_tree_replace_slot(
+ radix_tree_lookup_slot(
+ &p2m->mem_access_settings, pfn),
+ radix_tree_int_to_ptr(a));
+ rc = 0;
+ break;
+ default:
+ /* If we fail to save the setting in the Radix tree, we
+ * need to reset the PTE permissions to default. */
+ p2m_lookup(d, pfn_to_paddr(pfn), &p2m->default_access, NULL);
+ break;
+ }
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+ {
+ rc = start;
+ break;
+ }
+ }
+
+ /* Flush the TLB of the domain to ensure consistency */
+ flush_tlb_domain(d);
+
+ spin_unlock(&p2m->lock);
+ return rc;
+}
+
+int p2m_get_mem_access(struct domain *d, unsigned long gpfn,
+ xenmem_access_t *access)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ void *i;
+ int index;
+
+ static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = XENMEM_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+#undef ACCESS
+ };
+
+ /* If request to get default access */
+ if ( gpfn == ~0ull )
+ {
+ *access = memaccess[p2m->default_access];
+ return 0;
+ }
+
+ spin_lock(&p2m->lock);
+
+ i = radix_tree_lookup(&p2m->mem_access_settings, gpfn);
+
+ spin_unlock(&p2m->lock);
+
+ if (!i)
+ return -ESRCH;
+
+ index = radix_tree_ptr_to_int(i);
+
+ if ( (unsigned) index >= ARRAY_SIZE(memaccess) )
+ return -ERANGE;
+
+ *access = memaccess[ (unsigned) index];
+ return 0;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 76a9586..860905a 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1674,23 +1674,25 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
uint32_t offset;
uint32_t *first = NULL, *second = NULL;
+ spin_lock(&d->arch.p2m.lock);
+
printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr);
printk(" TTBCR: 0x%08"PRIregister"\n", ttbcr);
printk(" TTBR0: 0x%016"PRIx64" = 0x%"PRIpaddr"\n",
- ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK, NULL));
+ ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK, NULL, NULL));
if ( ttbcr & TTBCR_EAE )
{
printk("Cannot handle LPAE guest PT walk\n");
- return;
+ goto err;
}
if ( (ttbcr & TTBCR_N_MASK) != 0 )
{
printk("Cannot handle TTBR1 guest walks\n");
- return;
+ goto err;
}
- paddr = p2m_lookup(d, ttbr0 & PAGE_MASK, NULL);
+ paddr = p2m_lookup(d, ttbr0 & PAGE_MASK, NULL, NULL);
if ( paddr == INVALID_PADDR )
{
printk("Failed TTBR0 maddr lookup\n");
@@ -1705,7 +1707,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
!(first[offset] & 0x2) )
goto done;
- paddr = p2m_lookup(d, first[offset] & PAGE_MASK, NULL);
+ paddr = p2m_lookup(d, first[offset] & PAGE_MASK, NULL, NULL);
if ( paddr == INVALID_PADDR )
{
@@ -1720,6 +1722,9 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
done:
if (second) unmap_domain_page(second);
if (first) unmap_domain_page(first);
+
+err:
+ spin_unlock(&d->arch.p2m.lock);
}
static void do_trap_instr_abort_guest(struct cpu_user_regs *regs,
@@ -1749,13 +1754,29 @@ static void do_trap_data_abort_guest(struct
cpu_user_regs *regs,
info.gva = READ_SYSREG64(FAR_EL2);
#endif
- if (dabt.s1ptw)
- goto bad_data_abort;
-
rc = gva_to_ipa(info.gva, &info.gpa);
if ( rc == -EFAULT )
goto bad_data_abort;
+ rc = 0;
+ switch ( dabt.dfsc )
+ {
+ case DABT_DFSC_PERMISSION_1:
+ case DABT_DFSC_PERMISSION_2:
+ case DABT_DFSC_PERMISSION_3:
+ rc = p2m_mem_access_check(info.gpa, info.gva,
+ 1, info.dabt.write, 0,
+ info.dabt.s1ptw);
+
+ /* Trap was triggered by mem_access, work here is done */
+ if ( !rc )
+ return;
+
+ break;
+ default:
+ break;
+ }
+
/* XXX: Decode the instruction if ISS is not valid */
if ( !dabt.valid )
goto bad_data_abort;
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index afdbf84..0412a60 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -130,7 +130,10 @@ void p2m_restore_state(struct vcpu *n);
void p2m_dump_info(struct domain *d);
/* Look up the MFN corresponding to a domain's PFN. */
-paddr_t p2m_lookup(struct domain *d, paddr_t gpfn, p2m_type_t *t);
+paddr_t p2m_lookup(struct domain *d,
+ paddr_t gpfn,
+ p2m_access_t *a,
+ p2m_type_t *t);
/* Clean & invalidate caches corresponding to a region of guest address space
*/
int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn);
@@ -187,7 +190,7 @@ static inline struct page_info *get_page_from_gfn(
{
struct page_info *page;
p2m_type_t p2mt;
- paddr_t maddr = p2m_lookup(d, pfn_to_paddr(gfn), &p2mt);
+ paddr_t maddr = p2m_lookup(d, pfn_to_paddr(gfn), NULL, &p2mt);
unsigned long mfn = maddr >> PAGE_SHIFT;
if (t)
@@ -233,6 +236,28 @@ static inline int get_page_and_type(struct page_info *page,
return rc;
}
+/* get host p2m table */
+#define p2m_get_hostp2m(d) (&((d)->arch.p2m))
+
+/* Send mem event based on the access (gla is -1ull if not available). Boolean
+ * return value indicates if trap needs to be injected into guest. */
+int p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
+ bool_t access_r, bool_t access_w, bool_t access_x,
+ bool_t ptw);
+
+/* Resumes the running of the VCPU, restarting the last instruction */
+void p2m_mem_access_resume(struct domain *d);
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
+ uint32_t start, uint32_t mask, xenmem_access_t access);
+
+/* Get access type for a pfn
+ * If pfn == -1ul, gets the default access type */
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+ xenmem_access_t *access);
+
#endif /* _XEN_P2M_H */
/*
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 9d230f3..0f1500a 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -259,6 +259,36 @@ enum dabt_size {
DABT_DOUBLE_WORD = 3,
};
+/* Data abort data fetch status codes */
+enum dabt_dfsc {
+ DABT_DFSC_ADDR_SIZE_0 = 0b000000,
+ DABT_DFSC_ADDR_SIZE_1 = 0b000001,
+ DABT_DFSC_ADDR_SIZE_2 = 0b000010,
+ DABT_DFSC_ADDR_SIZE_3 = 0b000011,
+ DABT_DFSC_TRANSLATION_0 = 0b000100,
+ DABT_DFSC_TRANSLATION_1 = 0b000101,
+ DABT_DFSC_TRANSLATION_2 = 0b000110,
+ DABT_DFSC_TRANSLATION_3 = 0b000111,
+ DABT_DFSC_ACCESS_1 = 0b001001,
+ DABT_DFSC_ACCESS_2 = 0b001010,
+ DABT_DFSC_ACCESS_3 = 0b001011,
+ DABT_DFSC_PERMISSION_1 = 0b001101,
+ DABT_DFSC_PERMISSION_2 = 0b001110,
+ DABT_DFSC_PERMISSION_3 = 0b001111,
+ DABT_DFSC_SYNC_EXT = 0b010000,
+ DABT_DFSC_SYNC_PARITY = 0b011000,
+ DABT_DFSC_SYNC_EXT_TTW_0 = 0b010100,
+ DABT_DFSC_SYNC_EXT_TTW_1 = 0b010101,
+ DABT_DFSC_SYNC_EXT_TTW_2 = 0b010110,
+ DABT_DFSC_SYNC_EXT_TTW_3 = 0b010111,
+ DABT_DFSC_SYNC_PARITY_TTW_0 = 0b011100,
+ DABT_DFSC_SYNC_PARITY_TTW_1 = 0b011101,
+ DABT_DFSC_SYNC_PARITY_TTW_2 = 0b011110,
+ DABT_DFSC_SYNC_PARITY_TTW_3 = 0b011111,
+ DABT_DFSC_ALIGNMENT = 0b100001,
+ DABT_DFSC_TLB_CONFLICT = 0b110000,
+};
+
union hsr {
uint32_t bits;
struct {
--
2.1.0.rc1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |