[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v9 07/10] xen: delay allocation of grant table sub structures



Delay the allocation of the grant table sub structures in order to
allow modifying parameters needed for sizing of these structures at a
per domain basis. Allocate the structures and the table frames only
from grant_table_set_limits() (dom0: from grant_table_create()).

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
V9:
- allocate initial grant frames from grant_table_init() (Jan Beulich)

V6:
- move call of grant_table_init() for dom0 to grant_table_create()
  (Jan Beulich)
- move frame allocations to gnttab_grow_table() (Jan Beulich)
- several other changes due to new patch order

V4:
- make ret more local (Wei Liu)

V3:
- move call of grant_table_init() from gnttab_setup_table() to
  gnttab_grow_table() (Paul Durrant)
---
 xen/common/grant_table.c | 118 +++++++++++++++++++++++------------------------
 1 file changed, 58 insertions(+), 60 deletions(-)

diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 0f09891f59..3250db4c5a 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1667,6 +1667,10 @@ gnttab_grow_table(struct domain *d, unsigned int 
req_nr_frames)
     struct grant_table *gt = d->grant_table;
     unsigned int i, j;
 
+    ASSERT(gt->active);
+
+    if ( req_nr_frames < INITIAL_NR_GRANT_FRAMES )
+        req_nr_frames = INITIAL_NR_GRANT_FRAMES;
     ASSERT(req_nr_frames <= max_grant_frames);
 
     gdprintk(XENLOG_INFO,
@@ -1723,6 +1727,48 @@ active_alloc_failed:
     return 0;
 }
 
+static int
+grant_table_init(struct domain *d, struct grant_table *gt)
+{
+    if ( gt->active )
+        return -EBUSY;
+
+    /* Active grant table. */
+    gt->active = xzalloc_array(struct active_grant_entry *,
+                               max_nr_active_grant_frames);
+    if ( gt->active == NULL )
+        goto no_mem;
+
+    /* Tracking of mapped foreign frames table */
+    gt->maptrack = vzalloc(max_maptrack_frames * sizeof(*gt->maptrack));
+    if ( gt->maptrack == NULL )
+        goto no_mem;
+
+    /* Shared grant table. */
+    gt->shared_raw = xzalloc_array(void *, max_grant_frames);
+    if ( gt->shared_raw == NULL )
+        goto no_mem;
+
+    /* Status pages for grant table - for version 2 */
+    gt->status = xzalloc_array(grant_status_t *,
+                               grant_to_status_frames(max_grant_frames));
+    if ( gt->status == NULL )
+        goto no_mem;
+
+    /* gnttab_grow_table() allocates a min number of frames, so 0 is okay. */
+    if ( gnttab_grow_table(d, 0) )
+        return 0;
+
+ no_mem:
+    xfree(gt->shared_raw);
+    gt->shared_raw = NULL;
+    vfree(gt->maptrack);
+    gt->maptrack = NULL;
+    xfree(gt->active);
+    gt->active = NULL;
+    return -ENOMEM;
+}
+
 static long
 gnttab_setup_table(
     XEN_GUEST_HANDLE_PARAM(gnttab_setup_table_t) uop, unsigned int count)
@@ -3383,75 +3429,26 @@ grant_table_create(
     struct domain *d)
 {
     struct grant_table *t;
-    unsigned int i, j;
+    int ret = 0;
 
     if ( (t = xzalloc(struct grant_table)) == NULL )
-        goto no_mem_0;
+        return -ENOMEM;
 
     /* Simple stuff. */
     percpu_rwlock_resource_init(&t->lock, grant_rwlock);
     spin_lock_init(&t->maptrack_lock);
-    t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
-
-    /* Active grant table. */
-    if ( (t->active = xzalloc_array(struct active_grant_entry *,
-                                    max_nr_active_grant_frames)) == NULL )
-        goto no_mem_1;
-    for ( i = 0;
-          i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
-    {
-        if ( (t->active[i] = alloc_xenheap_page()) == NULL )
-            goto no_mem_2;
-        clear_page(t->active[i]);
-        for ( j = 0; j < ACGNT_PER_PAGE; j++ )
-            spin_lock_init(&t->active[i][j].lock);
-    }
 
-    /* Tracking of mapped foreign frames table */
-    t->maptrack = vzalloc(max_maptrack_frames * sizeof(*t->maptrack));
-    if ( t->maptrack == NULL )
-        goto no_mem_2;
+    /* Okay, install the structure. */
+    d->grant_table = t;
 
-    /* Shared grant table. */
-    if ( (t->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
-        goto no_mem_3;
-    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+    if ( d->domain_id == 0 )
     {
-        if ( (t->shared_raw[i] = alloc_xenheap_page()) == NULL )
-            goto no_mem_4;
-        clear_page(t->shared_raw[i]);
+        grant_write_lock(t);
+        ret = grant_table_init(d, t);
+        grant_write_unlock(t);
     }
 
-    /* Status pages for grant table - for version 2 */
-    t->status = xzalloc_array(grant_status_t *,
-                              grant_to_status_frames(max_grant_frames));
-    if ( t->status == NULL )
-        goto no_mem_4;
-
-    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
-        gnttab_create_shared_page(d, t, i);
-
-    t->nr_status_frames = 0;
-
-    /* Okay, install the structure. */
-    d->grant_table = t;
-    return 0;
-
- no_mem_4:
-    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
-        free_xenheap_page(t->shared_raw[i]);
-    xfree(t->shared_raw);
- no_mem_3:
-    vfree(t->maptrack);
- no_mem_2:
-    for ( i = 0;
-          i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
-        free_xenheap_page(t->active[i]);
-    xfree(t->active);
- no_mem_1:
-    xfree(t);
- no_mem_0:
-    return -ENOMEM;
+    return ret;
 }
 
 void
@@ -3653,8 +3650,9 @@ int grant_table_set_limits(struct domain *d, unsigned int 
grant_frames,
 
     grant_write_lock(gt);
 
-    ret = 0;
-    /* Set limits, alloc needed arrays. */
+    /* Set limits. */
+    if ( !gt->active )
+        ret = grant_table_init(d, gt);
 
     grant_write_unlock(gt);
 
-- 
2.12.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.